file_path
stringlengths
3
280
file_language
stringclasses
66 values
content
stringlengths
1
1.04M
repo_name
stringlengths
5
92
repo_stars
int64
0
154k
repo_description
stringlengths
0
402
repo_primary_language
stringclasses
108 values
developer_username
stringlengths
1
25
developer_name
stringlengths
0
30
developer_company
stringlengths
0
82
tooling/tailwind/eslint.config.js
JavaScript
// FIXME: This kinda stinks... /// <reference types="../../tooling/eslint/types.d.ts" /> import baseConfig from "@acme/eslint-config/base"; export default [...baseConfig];
ymc9/my-t3-turbo
0
TypeScript
ymc9
Yiming Cao
zenstackhq
tooling/tailwind/native.ts
TypeScript
import type { Config } from "tailwindcss"; import base from "./base"; export default { content: base.content, presets: [base], theme: {}, } satisfies Config;
ymc9/my-t3-turbo
0
TypeScript
ymc9
Yiming Cao
zenstackhq
tooling/tailwind/web.ts
TypeScript
import type { Config } from "tailwindcss"; import animate from "tailwindcss-animate"; import base from "./base"; export default { content: base.content, presets: [base], theme: { container: { center: true, padding: "2rem", screens: { "2xl": "1400px", }, }, extend: { borderRadius: { lg: "var(--radius)", md: "calc(var(--radius) - 2px)", sm: "calc(var(--radius) - 4px)", }, keyframes: { "accordion-down": { from: { height: "0" }, to: { height: "var(--radix-accordion-content-height)" }, }, "accordion-up": { from: { height: "var(--radix-accordion-content-height)" }, to: { height: "0" }, }, }, animation: { "accordion-down": "accordion-down 0.2s ease-out", "accordion-up": "accordion-up 0.2s ease-out", }, }, }, plugins: [animate], } satisfies Config;
ymc9/my-t3-turbo
0
TypeScript
ymc9
Yiming Cao
zenstackhq
turbo/generators/config.ts
TypeScript
import { execSync } from "node:child_process"; import type { PlopTypes } from "@turbo/gen"; interface PackageJson { name: string; scripts: Record<string, string>; dependencies: Record<string, string>; devDependencies: Record<string, string>; } export default function generator(plop: PlopTypes.NodePlopAPI): void { plop.setGenerator("init", { description: "Generate a new package for the Acme Monorepo", prompts: [ { type: "input", name: "name", message: "What is the name of the package? (You can skip the `@acme/` prefix)", }, { type: "input", name: "deps", message: "Enter a space separated list of dependencies you would like to install", }, ], actions: [ (answers) => { if ("name" in answers && typeof answers.name === "string") { if (answers.name.startsWith("@acme/")) { answers.name = answers.name.replace("@acme/", ""); } } return "Config sanitized"; }, { type: "add", path: "packages/{{ name }}/eslint.config.js", templateFile: "templates/eslint.config.js.hbs", }, { type: "add", path: "packages/{{ name }}/package.json", templateFile: "templates/package.json.hbs", }, { type: "add", path: "packages/{{ name }}/tsconfig.json", templateFile: "templates/tsconfig.json.hbs", }, { type: "add", path: "packages/{{ name }}/src/index.ts", template: "export const name = '{{ name }}';", }, { type: "modify", path: "packages/{{ name }}/package.json", async transform(content, answers) { if ("deps" in answers && typeof answers.deps === "string") { const pkg = JSON.parse(content) as PackageJson; for (const dep of answers.deps.split(" ").filter(Boolean)) { const version = await fetch( `https://registry.npmjs.org/-/package/${dep}/dist-tags`, ) .then((res) => res.json()) .then((json) => json.latest); if (!pkg.dependencies) pkg.dependencies = {}; pkg.dependencies[dep] = `^${version}`; } return JSON.stringify(pkg, null, 2); } return content; }, }, async (answers) => { /** * Install deps and format everything */ if ("name" in answers && typeof answers.name === "string") { // execSync("pnpm dlx sherif@latest --fix", { // stdio: "inherit", // }); execSync("pnpm i", { stdio: "inherit" }); execSync( `pnpm prettier --write packages/${answers.name}/** --list-different`, ); return "Package scaffolded"; } return "Package not scaffolded"; }, ], }); }
ymc9/my-t3-turbo
0
TypeScript
ymc9
Yiming Cao
zenstackhq
scripts/ceval/eval.py
Python
# This code is modified from C-Eval Project: https://github.com/SJTU-LIT/ceval import os import argparse import pandas as pd import torch import json from llama_evaluator import Llama_Evaluator import time choices = ["A", "B", "C", "D"] def main(args, evaluator,take): assert os.path.exists("subject_mapping.json"), "subject_mapping.json not found!" with open("subject_mapping.json") as f: subject_mapping = json.load(f) filenames = os.listdir("data/val") subject_list = [val_file.replace("_val.csv","") for val_file in filenames] accuracy, summary = {}, {} run_date=time.strftime('%Y-%m-%d_%H-%M-%S',time.localtime(time.time())) output_dir = args.output_dir save_result_dir=os.path.join(output_dir,f"take{take}") if not os.path.exists(save_result_dir): os.makedirs(save_result_dir,exist_ok=True) all_answers = {} for index,subject_name in enumerate(subject_list): print(f"{index/len(subject_list)} Inference starts at {run_date} on {args.model_path} with subject of {subject_name}!") val_file_path=os.path.join('data/val',f'{subject_name}_val.csv') dev_file_path=os.path.join('data/dev',f'{subject_name}_dev.csv') test_file_path=os.path.join('data/test',f'{subject_name}_test.csv') val_df=pd.read_csv(val_file_path) if args.do_test is False else pd.read_csv(test_file_path) dev_df=pd.read_csv(dev_file_path) if args.few_shot else None correct_ratio, answers = evaluator.eval_subject(subject_name, val_df, dev_df, save_result_dir=save_result_dir if args.do_save_csv else None, few_shot=args.few_shot, cot=args.cot, with_prompt=args.with_prompt, constrained_decoding=args.constrained_decoding, do_test=args.do_test) print(f"Subject: {subject_name}") print(f"Acc: {correct_ratio}") accuracy[subject_name] = correct_ratio summary[subject_name] = {"score":correct_ratio, "num":len(val_df), "correct":correct_ratio*len(val_df)/100} all_answers[subject_name] = answers json.dump(all_answers,open(save_result_dir+'/submission.json','w'),ensure_ascii=False,indent=4) print("Accuracy:") for k, v in accuracy.items(): print(k, ": ", v) total_num = 0 total_correct = 0 summary['grouped'] = { "STEM": {"correct": 0.0, "num": 0}, "Social Science": {"correct": 0.0, "num": 0}, "Humanities": {"correct": 0.0, "num": 0}, "Other": {"correct": 0.0, "num": 0} } for subj, info in subject_mapping.items(): group = info[2] summary['grouped'][group]["num"] += summary[subj]['num'] summary['grouped'][group]["correct"] += summary[subj]['correct'] for group, info in summary['grouped'].items(): info['score'] = info["correct"] / info["num"] total_num += info["num"] total_correct += info["correct"] summary['All'] = {"score": total_correct / total_num, "num": total_num, "correct": total_correct} json.dump(summary,open(save_result_dir+'/summary.json','w'),ensure_ascii=False,indent=2) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--model_path", type=str) parser.add_argument("--cot",choices=["False","True"], default="False") parser.add_argument("--few_shot", choices=["False","True"], default="True") parser.add_argument("--ntrain", "-k", type=int, default=5) parser.add_argument("--with_prompt", choices=["False","True"], default="False") parser.add_argument("--constrained_decoding", choices=["False","True"], default="True") parser.add_argument("--temperature",type=float,default=0.2) parser.add_argument("--n_times", default=1,type=int) parser.add_argument("--do_save_csv", choices=["False","True"], default="False") parser.add_argument("--output_dir", type=str) parser.add_argument("--do_test", choices=["False","True"], default="False") args = parser.parse_args() args.cot = args.cot == "True" args.few_shot = args.few_shot == "True" args.with_prompt = args.with_prompt == "True" args.constrained_decoding = args.constrained_decoding == "True" args.do_test = args.do_test == "True" args.do_save_csv = args.do_save_csv == "True" if args.constrained_decoding is True: args.n_times=max(args.n_times,1) print(args) device = torch.device(0) print(device) evaluator=Llama_Evaluator( choices=choices, k=args.ntrain, model_path=args.model_path, device=device, temperature = args.temperature ) for i in range(args.n_times): main(args,evaluator=evaluator,take=i)
ymcui/Chinese-LLaMA-Alpaca
18,964
中文LLaMA&Alpaca大语言模型+本地CPU/GPU训练部署 (Chinese LLaMA & Alpaca LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/ceval/evaluator.py
Python
# This code is modified from C-Eval Project: https://github.com/SJTU-LIT/ceval import string class Evaluator: def __init__(self, choices, model_name, k=-1): self.choices = choices self.model_name = model_name self.k = k self.puncs = list(string.punctuation) def format_example(self, line, include_answer=True): example = line['question'] for choice in self.choices: example += f'\n{choice}. {line[f"{choice}"]}' example += '\n答案:' if include_answer: example += f'{line["answer"]}\n\n' return example def generate_few_shot_prompt(self, subject, dev_df): prompt = f"以下是中国关于{subject}考试的单项选择题,请选出其中的正确答案。\n\n" k = self.k if self.k == -1: k = dev_df.shape[0] for i in range(k): prompt += self.format_example(dev_df.iloc[i, :]) return prompt def eval_subject(self, subject_name, test_df, dev_df=None, few_shot=False, save_result_dir=None): pass def normalize_answer(self,s): def white_space_fix(text): return ' '.join(text.split()) def remove_punc(text): exclude=set(self.puncs) return ''.join(ch for ch in text if ch not in exclude) def lower(text): return text.lower() return white_space_fix(remove_punc(lower(s))) def exact_match(self,pred, target): return self.normalize_answer(pred)==self.normalize_answer(target)
ymcui/Chinese-LLaMA-Alpaca
18,964
中文LLaMA&Alpaca大语言模型+本地CPU/GPU训练部署 (Chinese LLaMA & Alpaca LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/ceval/llama_evaluator.py
Python
# This code is modified from C-Eval Project: https://github.com/SJTU-LIT/ceval import os import re from tqdm import tqdm import random import numpy as np import torch from transformers import LlamaForCausalLM, LlamaTokenizer from evaluator import Evaluator class Llama_Evaluator(Evaluator): def __init__(self, choices, k, model_path, device, temperature=0.2): super(Llama_Evaluator, self).__init__(choices, model_path, k) load_type = torch.float16 self.model_path = model_path self.device = device self.tokenizer = LlamaTokenizer.from_pretrained(model_path) self.model = LlamaForCausalLM.from_pretrained( model_path, load_in_8bit=False, torch_dtype=load_type, low_cpu_mem_usage=True, device_map='auto') self.generation_config = dict( temperature=temperature, top_k=40, top_p=0.9, do_sample=True, num_beams=1, repetition_penalty=1.1, max_new_tokens=20 ) self.sA_id = self.tokenizer.encode("A", add_special_tokens=False)[0] self.sB_id = self.tokenizer.encode("B", add_special_tokens=False)[0] self.sC_id = self.tokenizer.encode("C", add_special_tokens=False)[0] self.sD_id = self.tokenizer.encode("D", add_special_tokens=False)[0] self.A_id = self.tokenizer.encode(":A")[-1] self.B_id = self.tokenizer.encode(":B")[-1] self.C_id = self.tokenizer.encode(":C")[-1] self.D_id = self.tokenizer.encode(":D")[-1] def eval_subject(self, subject_name, test_df, dev_df=None, few_shot=False, cot=False, save_result_dir=None, with_prompt=False, constrained_decoding=False, do_test=False): all_answers = {} if constrained_decoding is True: self.generation_config['output_scores'] = True self.generation_config['return_dict_in_generate'] = True self.generation_config['max_new_tokens'] = 1 self.generation_config['top_p'] = 1.0 self.generation_config['top_k'] = 0 correct_num = 0 if save_result_dir: result = [] score = [] if few_shot: history = self.generate_few_shot_prompt(subject_name, dev_df, cot=cot) else: history = '' answers = ['NA'] * len(test_df) if do_test is True else list(test_df['answer']) for row_index, row in tqdm(test_df.iterrows(), total=len(test_df)): question = self.format_example(row, include_answer=False, cot=cot,with_prompt=with_prompt) instruction = history + question if with_prompt: prompt_template = ( "Below is an instruction that describes a task. " "Write a response that appropriately completes the request.\n\n" "### Instruction:\n{instruction}\n\n### Response: ") instruction = prompt_template.format_map({'instruction': instruction,'subject':subject_name}) inputs = self.tokenizer(instruction, return_tensors="pt") generation_output = self.model.generate( input_ids = inputs["input_ids"].to(self.device), attention_mask = inputs['attention_mask'].to(self.device), eos_token_id=self.tokenizer.eos_token_id, pad_token_id=self.tokenizer.pad_token_id, **self.generation_config ) batch_size, length = inputs.input_ids.shape if constrained_decoding is True: logits = generation_output.scores[0][0] logits = logits.float().cpu().detach() choices1_logits = logits[[self.sA_id,self.sB_id,self.sC_id,self.sD_id]] choices2_logits = logits[[self.A_id,self.B_id,self.C_id,self.D_id]] choicesAll_logits = (choices1_logits + choices2_logits).numpy() assert not (np.any(np.isinf(choicesAll_logits)) or np.any(np.isnan(choicesAll_logits))) ans = {0: "A", 1: "B", 2: "C", 3: "D"}[np.argmax(choicesAll_logits)] response = self.tokenizer.decode([logits.argmax(-1).item()]) else: response = self.tokenizer.decode(generation_output[0, length:], skip_special_tokens=True) ans, direct_extract = self.extract_answer(row, response) if ans == answers[row_index]: correct_num += 1 correct = 1 else: correct = 0 print(f"\n=======begin {str(row_index)}=======") print("question: ", question) print("response: ", response) print("ans: ", ans) print("ground truth: ", answers[row_index], "\n") if save_result_dir: result.append(response) score.append(correct) print(f"=======end {str(row_index)}=======") all_answers[str(row_index)] = ans correct_ratio = 100*correct_num/len(answers) if save_result_dir: test_df['model_output'] = result test_df['correctness'] = score test_df.to_csv(os.path.join(save_result_dir, f'{subject_name}_test.csv')) return correct_ratio, all_answers def format_example(self, line, include_answer=True, cot=False, with_prompt=False): example = line['question'] for choice in self.choices: example += f'\n{choice}. {line[f"{choice}"]}' if include_answer: if cot: example += "\n答案:让我们一步一步思考,\n" + \ line["explanation"] + f"\n所以答案是{line['answer']}。\n\n" else: example += '\n答案:' + line["answer"] + '\n\n' else: if with_prompt is False: if cot: example += "\n答案:让我们一步一步思考,\n1." else: example += '\n答案:' else: if cot: example += "\n答案是什么?让我们一步一步思考,\n1." else: example += '\n答案是什么? ' return example def generate_few_shot_prompt(self, subject, dev_df, cot=False): prompt = f"以下是中国关于{subject}考试的单项选择题,请选出其中的正确答案。\n\n" k = self.k if self.k == -1: k = dev_df.shape[0] for i in range(k): prompt += self.format_example( dev_df.iloc[i, :], include_answer=True, cot=cot ) return prompt def extract_answer(self, line, gen_ans): m = re.findall(r'所以答案是(.+?)。', gen_ans, re.M) if len(m) > 0 and m[-1] in self.choices: return m[-1], True answer_patterns = [ r'([ABCD])是正确的', r'选项([ABCD])正确', r'答案为([ABCD])', r'答案是([ABCD])', r'答案([ABCD])', r'选择([ABCD])', r'答案:([ABCD])', r'选择答案([ABCD])' ] # RE extraction for answer_pattern in answer_patterns: m = re.search(answer_pattern, gen_ans, re.M) if m: answer = m.group(1) return answer, False # only containing one choice-character m = re.findall(r'[ABCD]', gen_ans, re.M) if len(m) >= 1: answer = m[0] return answer, False # only containing one choice-context choices_dict = {} pattern = "" for c in self.choices: choices_dict[str(line[f'{c}'])] = c pattern += re.escape(str(line[f'{c}']))+"|" pattern = pattern[:-1] m = re.findall(pattern, gen_ans, re.M) print("w/ escape:",repr(pattern),gen_ans,(len(m)>=1)) if len(m) >= 1: answer = choices_dict[m[0]] return answer, False return random.choice('ABCD'), False
ymcui/Chinese-LLaMA-Alpaca
18,964
中文LLaMA&Alpaca大语言模型+本地CPU/GPU训练部署 (Chinese LLaMA & Alpaca LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/crawl_prompt.py
Python
import openai import sys import random openai.api_key = "" # you must provide your OpenAI API key before crawling if not openai.api_key: raise ValueError("OpenAI API key not provided. Please set the 'openai.api_key' variable.") def return_random_prompt(): system_prompt = "你需要尽可能给出多样化的任务指令和对应的回答。我们将用于人工评估ChatGPT模型对指令的完成情况。要求:\n" # generate random topics topic_list = ["科技", "娱乐", "体育", "金融", "时政", "教育", "医疗", "旅游", "美食", "汽车", "房产", "文化", "历史", "地理", "自然", "人文", "社会", "法律", "军事", "政治", "经济", "文学", "艺术", "宗教", "哲学", "语言", "数学", "物理", "化学", "生物", "地球科学", "天文学", "计算机科学", "工程", "建筑", "设计", "音乐", "舞蹈", "电影", "电视", "动漫", "游戏", "健康", "美容", "时尚", "家居", "家电", "家具", "家装", "母婴", "育儿", "职场", "工作", "生活", "养生", "心理", "情感", "人际", "社交", "交友", "恋爱", "婚姻", "家庭", "亲子", "宠物", "动物", "植物", "食品", "饮料", "餐饮", "酒店", "购物", "消费", "理财", "税务", "法规", "法院", "司法", "刑事", "民事", "行政", "战争"] system_prompt += "1. 主题多样化,涵盖各个领域,例如:" + "、".join(random.sample(topic_list, 10)) + "等。\n" # generate random tasks task_list = ["开放式生成", "分类", "问答", "编辑", "摘要", "写作", "翻译", "写代码", "分析", "代码解析", "常识推理", "写信", "抽取", "推荐"] system_prompt += "2. 表述多样化,结合真实问题;指令类型多样化,例如:" + "、".join(random.sample(task_list, 10)) + "等。\n" # other requirements system_prompt += "3. 如果遇到无法处理的指令(只靠文本无法回答),给出无法处理的回复。\n" system_prompt += "4. 除非特别要求,请使用中文,指令可以是命令句、疑问句、或其他合适的类型。\n" system_prompt += "5. 为指令生成一个适当且涉及真实情况的<input>,不应该只包含简单的占位符。<input>应提供实质性的内容,具有挑战性。字数不超过" + str(random.randint(80, 120)) + "字。\n" system_prompt += "6. <output>应该是对指令的适当且真实的回应,不能只回复答应或拒绝请求。如果需要额外信息才能回复时,请努力预测用户意图并尝试回复。<output>的内容应少于" + str(random.randint(128, 512)) + "字。\n\n" system_prompt += "请给出满足条件的20条JSON格式数据:\n" return system_prompt if __name__ == "__main__": if len(sys.argv) != 2: print("Usage: python crawl_prompt.py <output_file>") exit(1) output_file = open(sys.argv[1], 'w') MAX_EPOCHS = 1 # number of data to generate (each prompt contains 20 JSON-formatted data) for k in range(MAX_EPOCHS): response = openai.ChatCompletion.create( model="gpt-3.5-turbo", # here we use `gpt-3.5-turbo` model, while Stanford-Alpaca uses `text-davinci-003` messages=[ {"role": "user", "content": return_random_prompt()}, ] ) output_file.write(response["choices"][0]["message"]["content"] + '\n') output_file.close()
ymcui/Chinese-LLaMA-Alpaca
18,964
中文LLaMA&Alpaca大语言模型+本地CPU/GPU训练部署 (Chinese LLaMA & Alpaca LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/inference/gradio_demo.py
Python
import torch from transformers import ( LlamaForCausalLM, LlamaTokenizer, StoppingCriteria, ) import gradio as gr import argparse import os from queue import Queue from threading import Thread import traceback import gc # Parse command-line arguments parser = argparse.ArgumentParser() parser.add_argument( '--base_model', default=None, type=str, required=True, help='Base model path') parser.add_argument('--lora_model', default=None, type=str, help="If None, perform inference on the base model") parser.add_argument( '--tokenizer_path', default=None, type=str, help='If None, lora model path or base model path will be used') parser.add_argument( '--gpus', default="0", type=str, help='If None, cuda:0 will be used. Inference using multi-cards: --gpus=0,1,... ') parser.add_argument('--share', default=True, help='Share gradio domain name') parser.add_argument('--port', default=19324, type=int, help='Port of gradio demo') parser.add_argument( '--max_memory', default=256, type=int, help='Maximum input prompt length, if exceeded model will receive prompt[-max_memory:]') parser.add_argument( '--load_in_8bit', action='store_true', help='Use 8 bit quantified model') parser.add_argument( '--only_cpu', action='store_true', help='Only use CPU for inference') parser.add_argument( '--alpha', type=str, default="1.0", help="The scaling factor of NTK method, can be a float or 'auto'. ") args = parser.parse_args() if args.only_cpu is True: args.gpus = "" from patches import apply_attention_patch, apply_ntk_scaling_patch apply_attention_patch(use_memory_efficient_attention=True) apply_ntk_scaling_patch(args.alpha) # Set CUDA devices if available os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus # Peft library can only import after setting CUDA devices from peft import PeftModel # Set up the required components: model and tokenizer def setup(): global tokenizer, model, device, share, port, max_memory max_memory = args.max_memory port = args.port share = args.share load_in_8bit = args.load_in_8bit load_type = torch.float16 if torch.cuda.is_available(): device = torch.device(0) else: device = torch.device('cpu') if args.tokenizer_path is None: args.tokenizer_path = args.lora_model if args.lora_model is None: args.tokenizer_path = args.base_model tokenizer = LlamaTokenizer.from_pretrained(args.tokenizer_path) base_model = LlamaForCausalLM.from_pretrained( args.base_model, load_in_8bit=load_in_8bit, torch_dtype=load_type, low_cpu_mem_usage=True, device_map='auto', ) model_vocab_size = base_model.get_input_embeddings().weight.size(0) tokenzier_vocab_size = len(tokenizer) print(f"Vocab of the base model: {model_vocab_size}") print(f"Vocab of the tokenizer: {tokenzier_vocab_size}") if model_vocab_size != tokenzier_vocab_size: assert tokenzier_vocab_size > model_vocab_size print("Resize model embeddings to fit tokenizer") base_model.resize_token_embeddings(tokenzier_vocab_size) if args.lora_model is not None: print("loading peft model") model = PeftModel.from_pretrained( base_model, args.lora_model, torch_dtype=load_type, device_map='auto', ) else: model = base_model if device == torch.device('cpu'): model.float() model.eval() # Reset the user input def reset_user_input(): return gr.update(value='') # Reset the state def reset_state(): return [] # Generate the prompt for the input of LM model def generate_prompt(instruction): return f""" Below is an instruction that describes a task. Write a response that appropriately completes the request. {instruction} """ # User interaction function for chat def user(user_message, history): return gr.update(value="", interactive=False), history + \ [[user_message, None]] class Stream(StoppingCriteria): def __init__(self, callback_func=None): self.callback_func = callback_func def __call__(self, input_ids, scores) -> bool: if self.callback_func is not None: self.callback_func(input_ids[0]) return False class Iteratorize: """ Transforms a function that takes a callback into a lazy iterator (generator). Adapted from: https://stackoverflow.com/a/9969000 """ def __init__(self, func, kwargs=None, callback=None): self.mfunc = func self.c_callback = callback self.q = Queue() self.sentinel = object() self.kwargs = kwargs or {} self.stop_now = False def _callback(val): if self.stop_now: raise ValueError self.q.put(val) def gentask(): try: ret = self.mfunc(callback=_callback, **self.kwargs) except ValueError: pass except Exception: traceback.print_exc() clear_torch_cache() self.q.put(self.sentinel) if self.c_callback: self.c_callback(ret) self.thread = Thread(target=gentask) self.thread.start() def __iter__(self): return self def __next__(self): obj = self.q.get(True, None) if obj is self.sentinel: raise StopIteration else: return obj def __del__(self): clear_torch_cache() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.stop_now = True clear_torch_cache() def clear_torch_cache(): gc.collect() if torch.cuda.device_count() > 0: torch.cuda.empty_cache() # Perform prediction based on the user input and history @torch.no_grad() def predict( history, max_new_tokens=128, top_p=0.75, temperature=0.1, top_k=40, do_sample=True, repetition_penalty=1.0 ): history[-1][1] = "" if len(history) != 0: input = "".join(["### Instruction:\n" + i[0] + "\n\n" + "### Response: " + i[1] + ("\n\n" if i[1] != "" else "") for i in history]) if len(input) > max_memory: input = input[-max_memory:] prompt = generate_prompt(input) inputs = tokenizer(prompt, return_tensors="pt") input_ids = inputs["input_ids"].to(device) generate_params = { 'input_ids': input_ids, 'max_new_tokens': max_new_tokens, 'top_p': top_p, 'temperature': temperature, 'top_k': top_k, 'do_sample': do_sample, 'repetition_penalty': repetition_penalty, } def generate_with_callback(callback=None, **kwargs): if 'stopping_criteria' in kwargs: kwargs['stopping_criteria'].append(Stream(callback_func=callback)) else: kwargs['stopping_criteria'] = [Stream(callback_func=callback)] clear_torch_cache() with torch.no_grad(): model.generate(**kwargs) def generate_with_streaming(**kwargs): return Iteratorize(generate_with_callback, kwargs, callback=None) with generate_with_streaming(**generate_params) as generator: for output in generator: next_token_ids = output[len(input_ids[0]):] if next_token_ids[0] == tokenizer.eos_token_id: break new_tokens = tokenizer.decode( next_token_ids, skip_special_tokens=True) if isinstance(tokenizer, LlamaTokenizer) and len(next_token_ids) > 0: if tokenizer.convert_ids_to_tokens(int(next_token_ids[0])).startswith('▁'): new_tokens = ' ' + new_tokens history[-1][1] = new_tokens yield history if len(next_token_ids) >= max_new_tokens: break # Call the setup function to initialize the components setup() # Create the Gradio interface with gr.Blocks() as demo: github_banner_path = 'https://raw.githubusercontent.com/ymcui/Chinese-LLaMA-Alpaca/main/pics/banner.png' gr.HTML(f'<p align="center"><a href="https://github.com/ymcui/Chinese-LLaMA-Alpaca"><img src={github_banner_path} width="700"/></a></p>') gr.Markdown("> 为了促进大模型在中文NLP社区的开放研究,本项目开源了中文LLaMA模型和指令精调的Alpaca大模型。这些模型在原版LLaMA的基础上扩充了中文词表并使用了中文数据进行二次预训练,进一步提升了中文基础语义理解能力。同时,中文Alpaca模型进一步使用了中文指令数据进行精调,显著提升了模型对指令的理解和执行能力。") chatbot = gr.Chatbot() with gr.Row(): with gr.Column(scale=4): with gr.Column(scale=12): user_input = gr.Textbox( show_label=False, placeholder="Shift + Enter发送消息...", lines=10).style( container=False) with gr.Column(min_width=32, scale=1): submitBtn = gr.Button("Submit", variant="primary") with gr.Column(scale=1): emptyBtn = gr.Button("Clear History") max_new_token = gr.Slider( 0, 4096, value=512, step=1.0, label="Maximum New Token Length", interactive=True) top_p = gr.Slider(0, 1, value=0.9, step=0.01, label="Top P", interactive=True) temperature = gr.Slider( 0, 1, value=0.5, step=0.01, label="Temperature", interactive=True) top_k = gr.Slider(1, 40, value=40, step=1, label="Top K", interactive=True) do_sample = gr.Checkbox( value=True, label="Do Sample", info="use random sample strategy", interactive=True) repetition_penalty = gr.Slider( 1.0, 3.0, value=1.1, step=0.1, label="Repetition Penalty", interactive=True) params = [user_input, chatbot] predict_params = [ chatbot, max_new_token, top_p, temperature, top_k, do_sample, repetition_penalty] submitBtn.click( user, params, params, queue=False).then( predict, predict_params, chatbot).then( lambda: gr.update( interactive=True), None, [user_input], queue=False) user_input.submit( user, params, params, queue=False).then( predict, predict_params, chatbot).then( lambda: gr.update( interactive=True), None, [user_input], queue=False) submitBtn.click(reset_user_input, [], [user_input]) emptyBtn.click(reset_state, outputs=[chatbot], show_progress=True) # Launch the Gradio interface demo.queue().launch( share=share, inbrowser=True, server_name='0.0.0.0', server_port=port)
ymcui/Chinese-LLaMA-Alpaca
18,964
中文LLaMA&Alpaca大语言模型+本地CPU/GPU训练部署 (Chinese LLaMA & Alpaca LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/inference/inference_hf.py
Python
import argparse import json, os parser = argparse.ArgumentParser() parser.add_argument('--base_model', default=None, type=str, required=True) parser.add_argument('--lora_model', default=None, type=str,help="If None, perform inference on the base model") parser.add_argument('--tokenizer_path',default=None,type=str) parser.add_argument('--data_file',default=None, type=str,help="A file that contains instructions (one instruction per line)") parser.add_argument('--with_prompt',action='store_true',help="wrap the input with the prompt automatically") parser.add_argument('--interactive',action='store_true',help="run in the instruction mode (single-turn)") parser.add_argument('--predictions_file', default='./predictions.json', type=str) parser.add_argument('--gpus', default="0", type=str) parser.add_argument('--only_cpu',action='store_true',help='only use CPU for inference') parser.add_argument('--alpha',type=str,default="1.0", help="The scaling factor of NTK method, can be a float or 'auto'. ") parser.add_argument('--load_in_8bit',action='store_true', help="Load the LLM in the 8bit mode") args = parser.parse_args() if args.only_cpu is True: args.gpus = "" os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus import torch from transformers import LlamaForCausalLM, LlamaTokenizer from peft import PeftModel from patches import apply_attention_patch, apply_ntk_scaling_patch apply_attention_patch(use_memory_efficient_attention=True) apply_ntk_scaling_patch(args.alpha) generation_config = dict( temperature=0.2, top_k=40, top_p=0.9, do_sample=True, num_beams=1, repetition_penalty=1.1, max_new_tokens=400 ) # The prompt template below is taken from llama.cpp # and is slightly different from the one used in training. # But we find it gives better results prompt_input = ( "Below is an instruction that describes a task. " "Write a response that appropriately completes the request.\n\n" "### Instruction:\n\n{instruction}\n\n### Response:\n\n" ) sample_data = ["为什么要减少污染,保护环境?"] def generate_prompt(instruction, input=None): if input: instruction = instruction + '\n' + input return prompt_input.format_map({'instruction': instruction}) if __name__ == '__main__': load_type = torch.float16 if torch.cuda.is_available(): device = torch.device(0) else: device = torch.device('cpu') if args.tokenizer_path is None: args.tokenizer_path = args.lora_model if args.lora_model is None: args.tokenizer_path = args.base_model tokenizer = LlamaTokenizer.from_pretrained(args.tokenizer_path) base_model = LlamaForCausalLM.from_pretrained( args.base_model, load_in_8bit=args.load_in_8bit, torch_dtype=load_type, low_cpu_mem_usage=True, device_map='auto', ) model_vocab_size = base_model.get_input_embeddings().weight.size(0) tokenzier_vocab_size = len(tokenizer) print(f"Vocab of the base model: {model_vocab_size}") print(f"Vocab of the tokenizer: {tokenzier_vocab_size}") if model_vocab_size!=tokenzier_vocab_size: assert tokenzier_vocab_size > model_vocab_size print("Resize model embeddings to fit tokenizer") base_model.resize_token_embeddings(tokenzier_vocab_size) if args.lora_model is not None: print("loading peft model") model = PeftModel.from_pretrained(base_model, args.lora_model,torch_dtype=load_type,device_map='auto',) else: model = base_model if device==torch.device('cpu'): model.float() # test data if args.data_file is None: examples = sample_data else: with open(args.data_file,'r') as f: examples = [l.strip() for l in f.readlines()] print("first 10 examples:") for example in examples[:10]: print(example) model.eval() with torch.no_grad(): if args.interactive: print("Start inference with instruction mode.") print('='*85) print("+ 该模式下仅支持单轮问答,无多轮对话能力。\n" "+ 如要进行多轮对话,请使用llama.cpp或llamachat工具。") print('-'*85) print("+ This mode only supports single-turn QA.\n" "+ If you want to experience multi-turn dialogue, please use llama.cpp or llamachat.") print('='*85) while True: raw_input_text = input("Input:") if len(raw_input_text.strip())==0: break if args.with_prompt: input_text = generate_prompt(instruction=raw_input_text) else: input_text = raw_input_text inputs = tokenizer(input_text,return_tensors="pt") #add_special_tokens=False ? generation_output = model.generate( input_ids = inputs["input_ids"].to(device), attention_mask = inputs['attention_mask'].to(device), eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id, **generation_config ) s = generation_output[0] output = tokenizer.decode(s,skip_special_tokens=True) if args.with_prompt: response = output.split("### Response:")[1].strip() else: response = output print("Response: ",response) print("\n") else: print("Start inference.") results = [] for index, example in enumerate(examples): if args.with_prompt is True: input_text = generate_prompt(instruction=example) else: input_text = example inputs = tokenizer(input_text,return_tensors="pt") #add_special_tokens=False ? generation_output = model.generate( input_ids = inputs["input_ids"].to(device), attention_mask = inputs['attention_mask'].to(device), eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id, **generation_config ) s = generation_output[0] output = tokenizer.decode(s,skip_special_tokens=True) if args.with_prompt: response = output.split("### Response:")[1].strip() else: response = output print(f"======={index}=======") print(f"Input: {example}\n") print(f"Output: {response}\n") results.append({"Input":input_text,"Output":response}) dirname = os.path.dirname(args.predictions_file) os.makedirs(dirname,exist_ok=True) with open(args.predictions_file,'w') as f: json.dump(results,f,ensure_ascii=False,indent=2) with open(dirname+'/generation_config.json','w') as f: json.dump(generation_config,f,ensure_ascii=False,indent=2)
ymcui/Chinese-LLaMA-Alpaca
18,964
中文LLaMA&Alpaca大语言模型+本地CPU/GPU训练部署 (Chinese LLaMA & Alpaca LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/inference/patches.py
Python
import torch from torch import nn from typing import Optional, Tuple, Union import transformers from transformers.models.llama.modeling_llama import apply_rotary_pos_emb, rotate_half import math try: from xformers import ops as xops except ImportError: xops = None print( "Xformers is not installed correctly. If you want to use memory_efficient_attention use the following command to install Xformers\npip install xformers." ) STORE_KV_BEFORE_ROPE = False USE_MEM_EFF_ATTENTION = False ALPHA = 1.0 def apply_rotary_pos_emb_single(q, cos, sin, position_ids): # The first two dimensions of cos and sin are always 1, so we can `squeeze` them. cos = cos.squeeze(1).squeeze(0) # [seq_len, dim] sin = sin.squeeze(1).squeeze(0) # [seq_len, dim] cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] q_embed = (q * cos) + (rotate_half(q) * sin) return q_embed def xformers_forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] if STORE_KV_BEFORE_ROPE is False: cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) # [bsz, nh, t, hd] if past_key_value is not None: # reuse k, v, self_attention key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) past_key_value = (key_states, value_states) if use_cache else None else: if past_key_value is not None: # reuse k, v, self_attention key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) past_key_value = (key_states, value_states) if use_cache else None cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) query_states = apply_rotary_pos_emb_single(query_states, cos, sin, position_ids) position_ids = torch.arange(kv_seq_len, dtype=torch.long, device=cos.device) position_ids = position_ids.unsqueeze(0).view(-1, kv_seq_len) key_states = apply_rotary_pos_emb_single(key_states, cos, sin, position_ids) if xops is not None and USE_MEM_EFF_ATTENTION: attn_weights = None query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) attn_bias = None if (query_states.size(1)==1 and key_states.size(1)>1) else xops.LowerTriangularMask() attn_output = xops.memory_efficient_attention( query_states, key_states, value_states, attn_bias=attn_bias, p=0) else: attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, q_len, kv_seq_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights + attention_mask attn_weights = torch.max( attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min, device=attn_weights.device) ) # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value old_init = transformers.models.llama.modeling_llama.LlamaRotaryEmbedding.__init__ def adaptive_ntk_init(self, dim, max_position_embeddings=2048, base=10000, device=None): self.dim = dim self.alpha = ALPHA if isinstance(ALPHA,(float,int)): base = base * ALPHA ** (dim / (dim-2)) self.base = base elif ALPHA=='auto': self.base = base else: raise ValueError(ALPHA) old_init(self, dim, max_position_embeddings, base, device) ntk_inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim)) self.register_buffer("ntk_inv_freq", ntk_inv_freq, persistent=False) def adaptive_ntk_forward(self, x, seq_len=None): if seq_len > self.max_seq_len_cached: if isinstance(self.alpha,(float,int)): self.max_seq_len_cached = seq_len t = torch.arange(seq_len, device=x.device, dtype=self.ntk_inv_freq.dtype) freqs = torch.einsum("i,j->ij", t, self.ntk_inv_freq) emb = torch.cat((freqs, freqs), dim=-1).to(x.device) self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False) self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False) return ( self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype), self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype), ) elif self.alpha=='auto': t = torch.arange(seq_len, device=x.device, dtype=self.ntk_inv_freq.dtype) dim = self.dim alpha = (seq_len / 1024 - 1) * 1.1 base = self.base * alpha ** (dim / (dim-2)) ntk_inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(x.device) / dim )) freqs = torch.einsum("i,j->ij", t, ntk_inv_freq) emb = torch.cat((freqs, freqs), dim=-1).to(x.device) cos_cached = emb.cos()[None, None, :, :] sin_cached = emb.sin()[None, None, :, :] return ( cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype), sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype) ) else: return ( self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype), self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype) ) def apply_attention_patch( use_memory_efficient_attention=False, store_kv_before_rope=False ): global USE_MEM_EFF_ATTENTION, STORE_KV_BEFORE_ROPE if use_memory_efficient_attention is True and xops is not None: USE_MEM_EFF_ATTENTION = use_memory_efficient_attention print("USE_MEM_EFF_ATTENTION: ",USE_MEM_EFF_ATTENTION) STORE_KV_BEFORE_ROPE = store_kv_before_rope print("STORE_KV_BEFORE_ROPE:", STORE_KV_BEFORE_ROPE) transformers.models.llama.modeling_llama.LlamaAttention.forward = xformers_forward def apply_ntk_scaling_patch(alpha: Union[float,str]): global ALPHA ALPHA = alpha try: ALPHA = float(ALPHA) except ValueError: if ALPHA!="auto": raise ValueError(f"Alpha can only be a float or 'auto', but given {ALPHA}") print(f"Apply NTK scaling with ALPHA={ALPHA}") transformers.models.llama.modeling_llama.LlamaRotaryEmbedding.__init__ = adaptive_ntk_init transformers.models.llama.modeling_llama.LlamaRotaryEmbedding.forward = adaptive_ntk_forward
ymcui/Chinese-LLaMA-Alpaca
18,964
中文LLaMA&Alpaca大语言模型+本地CPU/GPU训练部署 (Chinese LLaMA & Alpaca LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/langchain/langchain_qa.py
Python
import argparse import os parser = argparse.ArgumentParser() parser.add_argument('--file_path',required=True,type=str) parser.add_argument('--embedding_path',required=True,type=str) parser.add_argument('--model_path',required=True,type=str) parser.add_argument('--gpus', default="0", type=str) parser.add_argument('--chain_type', default="refine", type=str) args = parser.parse_args() os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus # os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION']='python' file_path = args.file_path embedding_path = args.embedding_path model_path = args.model_path import torch from langchain import HuggingFacePipeline from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores import FAISS from langchain.document_loaders import TextLoader from langchain.prompts import PromptTemplate from langchain.chains import RetrievalQA from langchain.embeddings.huggingface import HuggingFaceEmbeddings prompt_template = ("Below is an instruction that describes a task. " "Write a response that appropriately completes the request.\n\n" "### Instruction:\n{context}\n{question}\n\n### Response: ") refine_prompt_template = ( "Below is an instruction that describes a task. " "Write a response that appropriately completes the request.\n\n" "### Instruction:\n" "这是原始问题: {question}\n" "已有的回答: {existing_answer}\n" "现在还有一些文字,(如果有需要)你可以根据它们完善现有的回答。" "\n\n" "{context_str}\n" "\\nn" "请根据新的文段,进一步完善你的回答。\n\n" "### Response: " ) initial_qa_template = ( "Below is an instruction that describes a task. " "Write a response that appropriately completes the request.\n\n" "### Instruction:\n" "以下为背景知识:\n" "{context_str}" "\n" "请根据以上背景知识, 回答这个问题:{question}。\n\n" "### Response: " ) if __name__ == '__main__': load_type = torch.float16 if torch.cuda.is_available(): device = torch.device(0) else: device = torch.device('cpu') loader = TextLoader(file_path) documents = loader.load() text_splitter = RecursiveCharacterTextSplitter( chunk_size=600, chunk_overlap=100) texts = text_splitter.split_documents(documents) print("Loading the embedding model...") embeddings = HuggingFaceEmbeddings(model_name=embedding_path) docsearch = FAISS.from_documents(texts, embeddings) print("loading LLM...") model = HuggingFacePipeline.from_model_id(model_id=model_path, task="text-generation", model_kwargs={ "torch_dtype" : load_type, "low_cpu_mem_usage" : True, "temperature": 0.2, "max_length": 1000, "device_map": "auto", "repetition_penalty":1.1} ) if args.chain_type == "stuff": PROMPT = PromptTemplate( template=prompt_template, input_variables=["context", "question"] ) chain_type_kwargs = {"prompt": PROMPT} qa = RetrievalQA.from_chain_type( llm=model, chain_type="stuff", retriever=docsearch.as_retriever(search_kwargs={"k": 1}), chain_type_kwargs=chain_type_kwargs) elif args.chain_type == "refine": refine_prompt = PromptTemplate( input_variables=["question", "existing_answer", "context_str"], template=refine_prompt_template, ) initial_qa_prompt = PromptTemplate( input_variables=["context_str", "question"], template=initial_qa_template, ) chain_type_kwargs = {"question_prompt": initial_qa_prompt, "refine_prompt": refine_prompt} qa = RetrievalQA.from_chain_type( llm=model, chain_type="refine", retriever=docsearch.as_retriever(search_kwargs={"k": 1}), chain_type_kwargs=chain_type_kwargs) while True: query = input("请输入问题:") if len(query.strip())==0: break print(qa.run(query))
ymcui/Chinese-LLaMA-Alpaca
18,964
中文LLaMA&Alpaca大语言模型+本地CPU/GPU训练部署 (Chinese LLaMA & Alpaca LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/langchain/langchain_sum.py
Python
import argparse import os parser = argparse.ArgumentParser() parser.add_argument('--file_path',required=True,type=str) parser.add_argument('--model_path',required=True,type=str) parser.add_argument('--gpus', default="0", type=str) parser.add_argument('--chain_type', default="refine", type=str) args = parser.parse_args() os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus # os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION']='python' file_path = args.file_path model_path = args.model_path import torch from langchain import HuggingFacePipeline from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.prompts import PromptTemplate from langchain.chains.summarize import load_summarize_chain prompt_template = ("Below is an instruction that describes a task. " "Write a response that appropriately completes the request.\n\n" "### Instruction:\n请为以下文字写一段摘要:\n{text}\n\n### Response: ") refine_template = ( "Below is an instruction that describes a task." "Write a response that appropriately completes the request.\n\n" "### Instruction:\n" "已有一段摘要:{existing_answer}\n" "现在还有一些文字,(如果有需要)你可以根据它们完善现有的摘要。" "\n" "{text}\n" "\n" "如果这段文字没有用,返回原来的摘要即可。请你生成一个最终的摘要。" "\n\n### Response: " ) if __name__ == '__main__': load_type = torch.float16 if torch.cuda.is_available(): device = torch.device(0) else: device = torch.device('cpu') text_splitter = RecursiveCharacterTextSplitter(chunk_size=600, chunk_overlap=100, length_function=len) with open(file_path) as f: text = f.read() docs = text_splitter.create_documents([text]) print("loading LLM...") model = HuggingFacePipeline.from_model_id(model_id=model_path, task="text-generation", model_kwargs={ "torch_dtype" : load_type, "low_cpu_mem_usage" : True, "temperature": 0.2, "max_length": 1000, "device_map": "auto", "repetition_penalty":1.1} ) PROMPT = PromptTemplate(template=prompt_template, input_variables=["text"]) REFINE_PROMPT = PromptTemplate( template=refine_template,input_variables=["existing_answer", "text"], ) if args.chain_type == "stuff": chain = load_summarize_chain(model, chain_type="stuff", prompt=PROMPT) elif args.chain_type == "refine": chain = load_summarize_chain(model, chain_type="refine", question_prompt=PROMPT, refine_prompt=REFINE_PROMPT) print(chain.run(docs))
ymcui/Chinese-LLaMA-Alpaca
18,964
中文LLaMA&Alpaca大语言模型+本地CPU/GPU训练部署 (Chinese LLaMA & Alpaca LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/merge_llama_with_chinese_lora.py
Python
""" Usage: python merge_llama_with_chinese_lora.py \ --base_model path/to/llama/model \ --lora_model path/to/first/lora/model [path/to/second/lora/model] \ --output_type [pth|huggingface] \ --output_dir path/to/output/dir """ import argparse import json import os import gc import torch import peft from peft import PeftModel from transformers import LlamaForCausalLM, LlamaTokenizer from huggingface_hub import hf_hub_download parser = argparse.ArgumentParser() parser.add_argument('--base_model', default=None, required=True, type=str, help="Please specify a base_model") parser.add_argument('--lora_model', default=None, required=True, type=str, help="Please specify LoRA models to be merged (ordered); use commas to separate multiple LoRA models.") parser.add_argument('--offload_dir', default=None, type=str, help="(Optional) Please specify a temp folder for offloading (useful for low-RAM machines). Default None (disable offload).") parser.add_argument('--output_type', default='pth',choices=['pth','huggingface'], type=str, help="save the merged model in pth or huggingface format.") parser.add_argument('--output_dir', default='./', type=str) emb_to_model_size = { 4096 : '7B', 5120 : '13B', 6656 : '33B', 8192 : '65B', } num_shards_of_models = {'7B': 1, '13B': 2, '33B': 4, '65B': 8} params_of_models = { '7B': { "dim": 4096, "multiple_of": 256, "n_heads": 32, "n_layers": 32, "norm_eps": 1e-06, "vocab_size": -1, }, '13B': { "dim": 5120, "multiple_of": 256, "n_heads": 40, "n_layers": 40, "norm_eps": 1e-06, "vocab_size": -1, }, '33B': { "dim": 6656, "multiple_of": 256, "n_heads": 52, "n_layers": 60, "norm_eps": 1e-06, "vocab_size": -1, }, '65B': { "dim": 8192, "multiple_of": 256, "n_heads": 64, "n_layers": 80, "norm_eps": 1e-05, "vocab_size": -1, }, } def transpose(weight, fan_in_fan_out): return weight.T if fan_in_fan_out else weight # Borrowed and modified from https://github.com/tloen/alpaca-lora def translate_state_dict_key(k): k = k.replace("base_model.model.", "") if k == "model.embed_tokens.weight": return "tok_embeddings.weight" elif k == "model.norm.weight": return "norm.weight" elif k == "lm_head.weight": return "output.weight" elif k.startswith("model.layers."): layer = k.split(".")[2] if k.endswith(".self_attn.q_proj.weight"): return f"layers.{layer}.attention.wq.weight" elif k.endswith(".self_attn.k_proj.weight"): return f"layers.{layer}.attention.wk.weight" elif k.endswith(".self_attn.v_proj.weight"): return f"layers.{layer}.attention.wv.weight" elif k.endswith(".self_attn.o_proj.weight"): return f"layers.{layer}.attention.wo.weight" elif k.endswith(".mlp.gate_proj.weight"): return f"layers.{layer}.feed_forward.w1.weight" elif k.endswith(".mlp.down_proj.weight"): return f"layers.{layer}.feed_forward.w2.weight" elif k.endswith(".mlp.up_proj.weight"): return f"layers.{layer}.feed_forward.w3.weight" elif k.endswith(".input_layernorm.weight"): return f"layers.{layer}.attention_norm.weight" elif k.endswith(".post_attention_layernorm.weight"): return f"layers.{layer}.ffn_norm.weight" elif k.endswith("rotary_emb.inv_freq") or "lora" in k: return None else: print(layer, k) raise NotImplementedError else: print(k) raise NotImplementedError def unpermute(w): return ( w.view(n_heads, 2, dim // n_heads // 2, dim).transpose(1, 2).reshape(dim, dim) ) def save_shards(model_sd, num_shards: int): # Add the no_grad context manager with torch.no_grad(): if num_shards == 1: new_state_dict = {} for k, v in model_sd.items(): new_k = translate_state_dict_key(k) if new_k is not None: if "wq" in new_k or "wk" in new_k: new_state_dict[new_k] = unpermute(v) else: new_state_dict[new_k] = v os.makedirs(output_dir, exist_ok=True) print(f"Saving shard 1 of {num_shards} into {output_dir}/consolidated.00.pth") torch.save(new_state_dict, output_dir + "/consolidated.00.pth") with open(output_dir + "/params.json", "w") as f: json.dump(params, f) else: new_state_dicts = [dict() for _ in range(num_shards)] for k in list(model_sd.keys()): v = model_sd[k] new_k = translate_state_dict_key(k) if new_k is not None: if new_k=='tok_embeddings.weight': print(f"Processing {new_k}") assert v.size(1)%num_shards==0 splits = v.split(v.size(1)//num_shards,dim=1) elif new_k=='output.weight': print(f"Processing {new_k}") if v.size(0)%num_shards==0: splits = v.split(v.size(0)//num_shards,dim=0) else: size_list = [v.size(0)//num_shards] * num_shards size_list[-1] += v.size(0)%num_shards splits = v.split(size_list, dim=0) # 13B: size_list == [24976,24977] elif new_k=='norm.weight': print(f"Processing {new_k}") splits = [v] * num_shards elif 'ffn_norm.weight' in new_k: print(f"Processing {new_k}") splits = [v] * num_shards elif 'attention_norm.weight' in new_k: print(f"Processing {new_k}") splits = [v] * num_shards elif 'w1.weight' in new_k: print(f"Processing {new_k}") splits = v.split(v.size(0)//num_shards,dim=0) elif 'w2.weight' in new_k: print(f"Processing {new_k}") splits = v.split(v.size(1)//num_shards,dim=1) elif 'w3.weight' in new_k: print(f"Processing {new_k}") splits = v.split(v.size(0)//num_shards,dim=0) elif 'wo.weight' in new_k: print(f"Processing {new_k}") splits = v.split(v.size(1)//num_shards,dim=1) elif 'wv.weight' in new_k: print(f"Processing {new_k}") splits = v.split(v.size(0)//num_shards,dim=0) elif "wq.weight" in new_k or "wk.weight" in new_k: print(f"Processing {new_k}") v = unpermute(v) splits = v.split(v.size(0)//num_shards,dim=0) else: print(f"Unexpected key {new_k}") raise ValueError for sd,split in zip(new_state_dicts,splits): sd[new_k] = split.clone() del split del splits del model_sd[k],v gc.collect() # Effectively enforce garbage collection os.makedirs(output_dir, exist_ok=True) for i,new_state_dict in enumerate(new_state_dicts): print(f"Saving shard {i+1} of {num_shards} into {output_dir}/consolidated.0{i}.pth") torch.save(new_state_dict, output_dir + f"/consolidated.0{i}.pth") with open(output_dir + "/params.json", "w") as f: print(f"Saving params.json into {output_dir}/params.json") json.dump(params, f) if __name__=='__main__': args = parser.parse_args() base_model_path = args.base_model lora_model_paths = [s.strip() for s in args.lora_model.split(',') if len(s.strip())!=0] output_dir = args.output_dir output_type = args.output_type offload_dir = args.offload_dir print(f"Base model: {base_model_path}") print(f"LoRA model(s) {lora_model_paths}:") if offload_dir is not None: # Load with offloading, which is useful for low-RAM machines. # Note that if you have enough RAM, please use original method instead, as it is faster. base_model = LlamaForCausalLM.from_pretrained( base_model_path, load_in_8bit=False, torch_dtype=torch.float16, offload_folder=offload_dir, offload_state_dict=True, low_cpu_mem_usage=True, device_map={"": "cpu"}, ) else: # Original method without offloading base_model = LlamaForCausalLM.from_pretrained( base_model_path, load_in_8bit=False, torch_dtype=torch.float16, device_map={"": "cpu"}, ) ## infer the model size from the checkpoint embedding_size = base_model.get_input_embeddings().weight.size(1) model_size = emb_to_model_size[embedding_size] print(f"Peft version: {peft.__version__}") print(f"Loading LoRA for {model_size} model") lora_model = None lora_model_sd = None for lora_index, lora_model_path in enumerate(lora_model_paths): print(f"Loading LoRA {lora_model_path}...") tokenizer = LlamaTokenizer.from_pretrained(lora_model_path) print(f"base_model vocab size: {base_model.get_input_embeddings().weight.size(0)}") print(f"tokenizer vocab size: {len(tokenizer)}") model_vocab_size = base_model.get_input_embeddings().weight.size(0) assert len(tokenizer) >= model_vocab_size, \ (f"The vocab size of the tokenizer {len(tokenizer)} is smaller than the vocab size of the base model {model_vocab_size}\n" "This is not the intended use. Please check your model and tokenizer.") if model_vocab_size != len(tokenizer): base_model.resize_token_embeddings(len(tokenizer)) print(f"Extended vocabulary size to {len(tokenizer)}") first_weight = base_model.model.layers[0].self_attn.q_proj.weight first_weight_old = first_weight.clone() print(f"Loading LoRA weights") if hasattr(peft.LoraModel,'merge_and_unload'): try: lora_model = PeftModel.from_pretrained( base_model, lora_model_path, device_map={"": "cpu"}, torch_dtype=torch.float16, ) except RuntimeError as e: if '[49953, 4096]' in str(e): print("The vocab size of the tokenizer does not match the vocab size of the LoRA weight. \n" "Did you misuse the LLaMA tokenizer with the Alpaca-LoRA weight?\n" "Make sure that you use LLaMA tokenizer with the LLaMA-LoRA weight and Alpaca tokenizer with the Alpaca-LoRA weight!") raise e assert torch.allclose(first_weight_old, first_weight) print(f"Merging with merge_and_unload...") base_model = lora_model.merge_and_unload() else: base_model_sd = base_model.state_dict() try: lora_model_sd = torch.load(os.path.join(lora_model_path,'adapter_model.bin'),map_location='cpu') except FileNotFoundError: print("Cannot find lora model on the disk. Downloading lora model from hub...") filename = hf_hub_download(repo_id=lora_model_path,filename='adapter_model.bin') lora_model_sd = torch.load(filename,map_location='cpu') if 'base_model.model.model.embed_tokens.weight' in lora_model_sd: assert lora_model_sd['base_model.model.model.embed_tokens.weight'].shape[0]==len(tokenizer), \ ("The vocab size of the tokenizer does not match the vocab size of the LoRA weight. \n" "Did you misuse the LLaMA tokenizer with the Alpaca-LoRA weight?\n" "Make sure that you use LLaMA tokenizer with the LLaMA-LoRA weight and Alpaca tokenizer with the Alpaca-LoRA weight!") lora_config = peft.LoraConfig.from_pretrained(lora_model_path) lora_scaling = lora_config.lora_alpha / lora_config.r fan_in_fan_out = lora_config.fan_in_fan_out lora_keys = [k for k in lora_model_sd if 'lora_A' in k] non_lora_keys = [k for k in lora_model_sd if not 'lora_' in k] for k in non_lora_keys: print(f"merging {k}") original_k = k.replace('base_model.model.','') base_model_sd[original_k].copy_(lora_model_sd[k]) for k in lora_keys: print(f"merging {k}") original_key = k.replace('.lora_A','').replace('base_model.model.','') assert original_key in base_model_sd lora_a_key = k lora_b_key = k.replace('lora_A','lora_B') base_model_sd[original_key] += ( transpose(lora_model_sd[lora_b_key].float() @ lora_model_sd[lora_a_key].float(),fan_in_fan_out) * lora_scaling ) assert base_model_sd[original_key].dtype == torch.float16 # did we do anything? assert not torch.allclose(first_weight_old, first_weight) tokenizer.save_pretrained(output_dir) if output_type=='huggingface': print("Saving to Hugging Face format...") LlamaForCausalLM.save_pretrained(base_model, output_dir) #, state_dict=deloreanized_sd) else: # output_type=='pth print("Saving to pth format...") base_model_sd = base_model.state_dict() del lora_model, base_model, lora_model_sd params = params_of_models[model_size] num_shards = num_shards_of_models[model_size] n_layers = params["n_layers"] n_heads = params["n_heads"] dim = params["dim"] dims_per_head = dim // n_heads base = 10000.0 inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head)) save_shards(model_sd=base_model_sd, num_shards=num_shards)
ymcui/Chinese-LLaMA-Alpaca
18,964
中文LLaMA&Alpaca大语言模型+本地CPU/GPU训练部署 (Chinese LLaMA & Alpaca LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/merge_llama_with_chinese_lora_low_mem.py
Python
""" Usage: python merge_llama_with_chinese_lora_low_mem.py \ --base_model path/to/llama/model \ --lora_model path/to/first/lora[,path/to/second/lora] \ --output_type [pth|huggingface] \ --output_dir path/to/output/dir """ import argparse import json import os import gc import torch import peft from transformers import LlamaTokenizer from transformers.modeling_utils import dtype_byte_size from huggingface_hub import snapshot_download import re parser = argparse.ArgumentParser() parser.add_argument('--base_model', default=None, required=True, type=str, help="Please specify a base model") parser.add_argument('--lora_model', default=None, required=True, type=str, help="Please specify LoRA models to be merged (ordered); use commas to separate multiple LoRA models") parser.add_argument('--output_type', default='pth',choices=['pth','huggingface'], type=str, help="Save the merged model in pth or huggingface format") parser.add_argument('--output_dir', default='./merged_model', type=str, help="The output folder to save the merged model") parser.add_argument('--verbose', default=False, action='store_true', help="Show detailed messages") emb_to_model_size = { 4096 : '7B', 5120 : '13B', 6656 : '33B', 8192 : '65B', } num_shards_of_models = {'7B': 1, '13B': 2, '33B': 4, '65B': 8} params_of_models = { '7B': { "dim": 4096, "multiple_of": 256, "n_heads": 32, "n_layers": 32, "norm_eps": 1e-06, "vocab_size": -1, }, '13B': { "dim": 5120, "multiple_of": 256, "n_heads": 40, "n_layers": 40, "norm_eps": 1e-06, "vocab_size": -1, }, '33B': { "dim": 6656, "multiple_of": 256, "n_heads": 52, "n_layers": 60, "norm_eps": 1e-06, "vocab_size": -1, }, '65B': { "dim": 8192, "multiple_of": 256, "n_heads": 64, "n_layers": 80, "norm_eps": 1e-05, "vocab_size": -1, }, } def transpose(weight, fan_in_fan_out): return weight.T if fan_in_fan_out else weight # Borrowed and modified from https://github.com/tloen/alpaca-lora def translate_state_dict_key(k): k = k.replace("base_model.model.", "") if k == "model.embed_tokens.weight": return "tok_embeddings.weight" elif k == "model.norm.weight": return "norm.weight" elif k == "lm_head.weight": return "output.weight" elif k.startswith("model.layers."): layer = k.split(".")[2] if k.endswith(".self_attn.q_proj.weight"): return f"layers.{layer}.attention.wq.weight" elif k.endswith(".self_attn.k_proj.weight"): return f"layers.{layer}.attention.wk.weight" elif k.endswith(".self_attn.v_proj.weight"): return f"layers.{layer}.attention.wv.weight" elif k.endswith(".self_attn.o_proj.weight"): return f"layers.{layer}.attention.wo.weight" elif k.endswith(".mlp.gate_proj.weight"): return f"layers.{layer}.feed_forward.w1.weight" elif k.endswith(".mlp.down_proj.weight"): return f"layers.{layer}.feed_forward.w2.weight" elif k.endswith(".mlp.up_proj.weight"): return f"layers.{layer}.feed_forward.w3.weight" elif k.endswith(".input_layernorm.weight"): return f"layers.{layer}.attention_norm.weight" elif k.endswith(".post_attention_layernorm.weight"): return f"layers.{layer}.ffn_norm.weight" elif k.endswith("rotary_emb.inv_freq") or "lora" in k: return None else: print(layer, k) raise NotImplementedError else: print(k) raise NotImplementedError def unpermute(w): return ( w.view(n_heads, 2, dim // n_heads // 2, dim).transpose(1, 2).reshape(dim, dim) ) def save_shards(model_sd, num_shards: int, prefix="", verbose=False): """ Convert and save the HF format weights to PTH format weights """ with torch.no_grad(): if num_shards == 1: new_state_dict = {} for k, v in model_sd.items(): new_k = translate_state_dict_key(k) if new_k is not None: if "wq" in new_k or "wk" in new_k: new_state_dict[new_k] = unpermute(v) else: new_state_dict[new_k] = v os.makedirs(output_dir, exist_ok=True) print(f"Saving shard 1 of {num_shards} into {output_dir}/{prefix}consolidated.00.pth") torch.save(new_state_dict, output_dir + f"/{prefix}consolidated.00.pth") else: new_state_dicts = [dict() for _ in range(num_shards)] for k in list(model_sd.keys()): v = model_sd[k] new_k = translate_state_dict_key(k) if new_k is not None: if new_k=='tok_embeddings.weight': assert v.size(1)%num_shards==0 splits = v.split(v.size(1)//num_shards,dim=1) elif new_k=='output.weight': if v.size(0)%num_shards==0: splits = v.split(v.size(0)//num_shards,dim=0) else: size_list = [v.size(0)//num_shards] * num_shards size_list[-1] += v.size(0)%num_shards splits = v.split(size_list, dim=0) # 13B: size_list == [24976,24977] elif new_k=='norm.weight': splits = [v] * num_shards elif 'ffn_norm.weight' in new_k: splits = [v] * num_shards elif 'attention_norm.weight' in new_k: splits = [v] * num_shards elif 'w1.weight' in new_k: splits = v.split(v.size(0)//num_shards,dim=0) elif 'w2.weight' in new_k: splits = v.split(v.size(1)//num_shards,dim=1) elif 'w3.weight' in new_k: splits = v.split(v.size(0)//num_shards,dim=0) elif 'wo.weight' in new_k: splits = v.split(v.size(1)//num_shards,dim=1) elif 'wv.weight' in new_k: splits = v.split(v.size(0)//num_shards,dim=0) elif "wq.weight" in new_k or "wk.weight" in new_k: v = unpermute(v) splits = v.split(v.size(0)//num_shards,dim=0) else: print(f"Unexpected key {new_k}") raise ValueError if verbose: print(f"Processing {new_k}") for sd,split in zip(new_state_dicts,splits): sd[new_k] = split.clone() del split del splits del model_sd[k],v gc.collect() # Effectively enforce garbage collection os.makedirs(output_dir, exist_ok=True) for i,new_state_dict in enumerate(new_state_dicts): print(f"Saving shard {i+1} of {num_shards} into {output_dir}/{prefix}consolidated.0{i}.pth") torch.save(new_state_dict, output_dir + f"/{prefix}consolidated.0{i}.pth") def merge_shards(output_dir, num_shards: int): ckpt_filenames = sorted([f for f in os.listdir(output_dir) if re.match('L(\d+)-consolidated.(\d+).pth',f)]) for i in range(num_shards): shards_filenames = sorted([f for f in ckpt_filenames if re.match(f'L(\d+)-consolidated.0{i}.pth',f)]) print(f"Loading {shards_filenames} ...") shards_dicts = [torch.load(os.path.join(output_dir,fn)) for fn in shards_filenames] shards_merged = {} for d in shards_dicts: shards_merged |= d print(f"Saving the merged shard to " + os.path.join(output_dir, f"consolidated.0{i}.pth")) torch.save(shards_merged, os.path.join(output_dir, f"consolidated.0{i}.pth")) print("Cleaning up...") del shards_merged for d in shards_dicts: del d del shards_dicts gc.collect() # Effectively enforce garbage collection for fn in shards_filenames: os.remove(os.path.join(output_dir,fn)) if __name__=='__main__': args = parser.parse_args() base_model_path = args.base_model lora_model_paths = [s.strip() for s in args.lora_model.split(',') if len(s.strip())!=0] output_dir = args.output_dir output_type = args.output_type os.makedirs(output_dir, exist_ok=True) print(f"Base model: {base_model_path}") print(f"LoRA model(s) {lora_model_paths}:") tokenizers_and_loras = [] for lora_model_path in lora_model_paths: print(f"Loading {lora_model_path}") if not os.path.exists(lora_model_path): print("Cannot find lora model on the disk. Downloading lora model from hub...") lora_model_path = snapshot_download(repo_id=lora_model_path) tokenizer = LlamaTokenizer.from_pretrained(lora_model_path) lora_config = peft.LoraConfig.from_pretrained(lora_model_path) lora_state_dict = torch.load(os.path.join(lora_model_path,'adapter_model.bin'),map_location='cpu') if 'base_model.model.model.embed_tokens.weight' in lora_state_dict: lora_vocab_size = lora_state_dict['base_model.model.model.embed_tokens.weight'].shape[0] assert lora_vocab_size==len(tokenizer), \ (f"The vocab size of the tokenizer {len(tokenizer)} does not match the vocab size of the LoRA weight {lora_vocab_size}.\n" "Make sure that you use LLaMA tokenizer with the LLaMA-LoRA weight and Alpaca tokenizer with the Alpaca-LoRA weight!") tokenizers_and_loras.append( { "tokenizer" :tokenizer, "state_dict" :lora_state_dict, "config": lora_config, "scaling": lora_config.lora_alpha / lora_config.r, "fan_in_fan_out" : lora_config.fan_in_fan_out, }) if len(tokenizers_and_loras)==2: t1_vocab_size = len(tokenizers_and_loras[0]["tokenizer"]) t2_vocab_size = len(tokenizers_and_loras[1]["tokenizer"]) assert t1_vocab_size<=t2_vocab_size, \ (f"The vocab size of the first tokenizer is {t1_vocab_size}\n" f"The vocab size of the second tokenizer is {t2_vocab_size}, found to be smaller than {t1_vocab_size}\n" "This is not the intended use. Please check your model and tokenizer.") if not os.path.exists(base_model_path): print("Cannot find lora model on the disk. Downloading lora model from hub...") base_model_path = snapshot_download(repo_id=base_model_path) ckpt_filenames = sorted([f for f in os.listdir(base_model_path) if re.match('pytorch_model-(\d+)-of-(\d+).bin',f)]) embedding_size = None model_size = None total_size = 0 for index, filename in enumerate(ckpt_filenames): print(f"Loading ckpt {filename}") state_dict = torch.load(os.path.join(base_model_path,filename), map_location='cpu') if index == 0: embedding_size = state_dict['model.embed_tokens.weight'].shape[1] model_size = emb_to_model_size[embedding_size] if output_type=='pth': params = params_of_models[model_size] num_shards = num_shards_of_models[model_size] n_layers = params["n_layers"] n_heads = params["n_heads"] dim = params["dim"] dims_per_head = dim // n_heads base = 10000.0 inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head)) print("Merging...") for k in state_dict: for tl_idx, t_and_l in enumerate(tokenizers_and_loras): saved_key = 'base_model.model.'+k lora_key_A = saved_key.replace('.weight','.lora_A.weight') if saved_key in t_and_l['state_dict']: if args.verbose: print(f"copying {saved_key} from {tl_idx}-th LoRA weight to {k}") state_dict[k] = t_and_l['state_dict'][saved_key].half().clone() # do we need half()? if lora_key_A in t_and_l['state_dict']: lora_key_B = lora_key_A.replace('lora_A.weight','lora_B.weight') if args.verbose: print(f"merging {lora_key_A} and lora_B.weight form {tl_idx}-th LoRA weight to {k}") state_dict[k] += ( transpose( t_and_l['state_dict'][lora_key_B].float() @ t_and_l['state_dict'][lora_key_A].float(), t_and_l['fan_in_fan_out']) * t_and_l['scaling'] ) weight_size = state_dict[k].numel() * dtype_byte_size(state_dict[k].dtype) total_size += weight_size if output_type=='huggingface': print(f"Saving ckpt {filename} to {output_dir} in HF format...") torch.save(state_dict,os.path.join(output_dir, filename)) elif output_type=='pth': print(f"Converting to pth format...") save_shards(model_sd=state_dict, num_shards=num_shards,prefix=f"L{index+1}-", verbose=args.verbose) del state_dict gc.collect() # Effectively enforce garbage collection print(f"Saving tokenizer") tokenizers_and_loras[-1]['tokenizer'].save_pretrained(output_dir) if output_type == 'pth': with open(output_dir + "/params.json", "w") as f: print(f"Saving params.json into {output_dir}/params.json") json.dump(params, f) merge_shards(output_dir, num_shards=num_shards) if output_type=='huggingface': configs = ('config.json', 'generation_config.json', 'pytorch_model.bin.index.json') for config in configs: if os.path.exists(os.path.join(base_model_path, config)): print(f"Saving {config}") with open(os.path.join(base_model_path, config),'r') as f: obj = json.load(f) if config=='config.json': obj['vocab_size'] = len(tokenizers_and_loras[-1]['tokenizer']) if config=='pytorch_model.bin.index.json': obj['metadata']['total_size'] = total_size with open(os.path.join(output_dir, config), 'w') as f: json.dump(obj, f, indent=2) print("Done.")
ymcui/Chinese-LLaMA-Alpaca
18,964
中文LLaMA&Alpaca大语言模型+本地CPU/GPU训练部署 (Chinese LLaMA & Alpaca LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/merge_tokenizer/merge_tokenizers.py
Python
import os os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"]="python" from transformers import LlamaTokenizer from sentencepiece import sentencepiece_model_pb2 as sp_pb2_model import sentencepiece as spm import argparse parser = argparse.ArgumentParser() parser.add_argument('--llama_tokenizer_dir', default=None, type=str, required=True) parser.add_argument('--chinese_sp_model_file', default='./chinese_sp.model', type=str) args = parser.parse_args() llama_tokenizer_dir = args.llama_tokenizer_dir chinese_sp_model_file = args.chinese_sp_model_file # load llama_tokenizer = LlamaTokenizer.from_pretrained(llama_tokenizer_dir) chinese_sp_model = spm.SentencePieceProcessor() chinese_sp_model.Load(chinese_sp_model_file) llama_spm = sp_pb2_model.ModelProto() llama_spm.ParseFromString(llama_tokenizer.sp_model.serialized_model_proto()) chinese_spm = sp_pb2_model.ModelProto() chinese_spm.ParseFromString(chinese_sp_model.serialized_model_proto()) # print number of tokens print(len(llama_tokenizer),len(chinese_sp_model)) print(llama_tokenizer.all_special_tokens) print(llama_tokenizer.all_special_ids) print(llama_tokenizer.special_tokens_map) ## Add Chinese tokens to LLaMA tokenizer llama_spm_tokens_set=set(p.piece for p in llama_spm.pieces) print(len(llama_spm_tokens_set)) print(f"Before:{len(llama_spm_tokens_set)}") for p in chinese_spm.pieces: piece = p.piece if piece not in llama_spm_tokens_set: new_p = sp_pb2_model.ModelProto().SentencePiece() new_p.piece = piece new_p.score = 0 llama_spm.pieces.append(new_p) print(f"New model pieces: {len(llama_spm.pieces)}") ## Save output_sp_dir = 'merged_tokenizer_sp' output_hf_dir = 'merged_tokenizer_hf' # the path to save Chinese-LLaMA tokenizer os.makedirs(output_sp_dir,exist_ok=True) with open(output_sp_dir+'/chinese_llama.model', 'wb') as f: f.write(llama_spm.SerializeToString()) tokenizer = LlamaTokenizer(vocab_file=output_sp_dir+'/chinese_llama.model') tokenizer.save_pretrained(output_hf_dir) print(f"Chinese-LLaMA tokenizer has been saved to {output_hf_dir}") # Test llama_tokenizer = LlamaTokenizer.from_pretrained(llama_tokenizer_dir) chinese_llama_tokenizer = LlamaTokenizer.from_pretrained(output_hf_dir) print(tokenizer.all_special_tokens) print(tokenizer.all_special_ids) print(tokenizer.special_tokens_map) text='''白日依山尽,黄河入海流。欲穷千里目,更上一层楼。 The primary use of LLaMA is research on large language models, including''' print("Test text:\n",text) print(f"Tokenized by LLaMA tokenizer:{llama_tokenizer.tokenize(text)}") print(f"Tokenized by Chinese-LLaMA tokenizer:{chinese_llama_tokenizer.tokenize(text)}")
ymcui/Chinese-LLaMA-Alpaca
18,964
中文LLaMA&Alpaca大语言模型+本地CPU/GPU训练部署 (Chinese LLaMA & Alpaca LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/openai_server_demo/openai_api_protocol.py
Python
from typing import Optional, List, Dict, Any, Union import time import shortuuid from pydantic import BaseModel, Field class ChatCompletionRequest(BaseModel): model: str = "chinese-llama-alpaca" messages: Union[str, List[Dict[str, str]]] temperature: Optional[float] = 0.7 top_p: Optional[float] = 1.0 top_k: Optional[int] = 40 n: Optional[int] = 1 max_tokens: Optional[int] = 128 num_beams: Optional[int] = 1 stop: Optional[Union[str, List[str]]] = None stream: Optional[bool] = False repetition_penalty: Optional[float] = 1.0 user: Optional[str] = None do_sample: Optional[bool] = True class ChatMessage(BaseModel): role: str content: str class ChatCompletionResponseChoice(BaseModel): index: int message: ChatMessage class ChatCompletionResponse(BaseModel): id: str = Field(default_factory=lambda: f"chatcmpl-{shortuuid.random()}") object: str = "chat.completion" created: int = Field(default_factory=lambda: int(time.time())) model: str = "chinese-llama-alpaca" choices: List[ChatCompletionResponseChoice] class EmbeddingsRequest(BaseModel): input: Union[str, List[Any]] user: Optional[str] = None class EmbeddingsResponse(BaseModel): object: str = "list" data: List[Dict[str, Any]] model: str = "chinese-llama-alpaca" class CompletionRequest(BaseModel): prompt: Union[str, List[Any]] temperature: Optional[float] = 0.1 n: Optional[int] = 1 max_tokens: Optional[int] = 128 stop: Optional[Union[str, List[str]]] = None stream: Optional[bool] = False top_p: Optional[float] = 0.75 top_k: Optional[int] = 40 num_beams: Optional[int] = 1 logprobs: Optional[int] = None echo: Optional[bool] = False repetition_penalty: Optional[float] = 1.0 user: Optional[str] = None do_sample: Optional[bool] = True class CompletionResponseChoice(BaseModel): index: int text: str class CompletionResponse(BaseModel): id: Optional[str] = Field(default_factory=lambda: f"cmpl-{shortuuid.random()}") object: Optional[str] = "text_completion" created: Optional[int] = Field(default_factory=lambda: int(time.time())) model: Optional[str] = 'chinese-llama-alpaca' choices: List[CompletionResponseChoice]
ymcui/Chinese-LLaMA-Alpaca
18,964
中文LLaMA&Alpaca大语言模型+本地CPU/GPU训练部署 (Chinese LLaMA & Alpaca LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/openai_server_demo/openai_api_server.py
Python
import argparse import os from fastapi import FastAPI import uvicorn parser = argparse.ArgumentParser() parser.add_argument('--base_model', default=None, type=str, required=True) parser.add_argument('--lora_model', default=None, type=str,help="If None, perform inference on the base model") parser.add_argument('--tokenizer_path',default=None,type=str) parser.add_argument('--gpus', default="0", type=str) parser.add_argument('--load_in_8bit',action='store_true', help='use 8 bit model') parser.add_argument('--only_cpu',action='store_true',help='only use CPU for inference') parser.add_argument('--alpha',type=str,default="1.0", help="The scaling factor of NTK method, can be a float or 'auto'. ") args = parser.parse_args() load_in_8bit = args.load_in_8bit if args.only_cpu is True: args.gpus = "" os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus import torch import torch.nn.functional as F from transformers import LlamaForCausalLM, LlamaTokenizer, GenerationConfig from peft import PeftModel from patches import apply_attention_patch, apply_ntk_scaling_patch apply_attention_patch(use_memory_efficient_attention=True) apply_ntk_scaling_patch(args.alpha) from openai_api_protocol import ( ChatCompletionRequest, ChatCompletionResponse, ChatMessage, ChatCompletionResponseChoice, CompletionRequest, CompletionResponse, CompletionResponseChoice, EmbeddingsRequest, EmbeddingsResponse, ) load_type = torch.float16 if torch.cuda.is_available(): device = torch.device(0) else: device = torch.device('cpu') if args.tokenizer_path is None: args.tokenizer_path = args.lora_model if args.lora_model is None: args.tokenizer_path = args.base_model tokenizer = LlamaTokenizer.from_pretrained(args.tokenizer_path) base_model = LlamaForCausalLM.from_pretrained( args.base_model, load_in_8bit=load_in_8bit, torch_dtype=load_type, low_cpu_mem_usage=True, device_map='auto' if not args.only_cpu else None, ) model_vocab_size = base_model.get_input_embeddings().weight.size(0) tokenzier_vocab_size = len(tokenizer) print(f"Vocab of the base model: {model_vocab_size}") print(f"Vocab of the tokenizer: {tokenzier_vocab_size}") if model_vocab_size!=tokenzier_vocab_size: assert tokenzier_vocab_size > model_vocab_size print("Resize model embeddings to fit tokenizer") base_model.resize_token_embeddings(tokenzier_vocab_size) if args.lora_model is not None: print("loading peft model") model = PeftModel.from_pretrained(base_model, args.lora_model,torch_dtype=load_type,device_map='auto',) else: model = base_model if device==torch.device('cpu'): model.float() model.eval() def generate_completion_prompt(instruction: str): """Generate prompt for completion""" return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: {instruction} ### Response: """ def generate_chat_prompt(messages: list): """Generate prompt for chat completion""" system_msg = '''Below is an instruction that describes a task. Write a response that appropriately completes the request.''' for msg in messages: if msg.role == 'system': system_msg = msg.content prompt = f"{system_msg}\n\n" for msg in messages: if msg.role == 'system': continue if msg.role == 'assistant': prompt += f"### Response: {msg.content}\n\n" if msg.role == 'user': prompt += f"### Instruction:\n{msg.content}\n\n" prompt += "### Response: " return prompt def predict( input, max_new_tokens=128, top_p=0.75, temperature=0.1, top_k=40, num_beams=4, repetition_penalty=1.0, do_sample=True, **kwargs, ): """ Main inference method type(input) == str -> /v1/completions type(input) == list -> /v1/chat/completions """ if isinstance(input, str): prompt = generate_completion_prompt(input) else: prompt = generate_chat_prompt(input) inputs = tokenizer(prompt, return_tensors="pt") input_ids = inputs["input_ids"].to(device) generation_config = GenerationConfig( temperature=temperature, top_p=top_p, top_k=top_k, num_beams=num_beams, do_sample=do_sample, **kwargs, ) with torch.no_grad(): generation_output = model.generate( input_ids=input_ids, generation_config=generation_config, return_dict_in_generate=True, output_scores=False, max_new_tokens=max_new_tokens, repetition_penalty=float(repetition_penalty), ) s = generation_output.sequences[0] output = tokenizer.decode(s, skip_special_tokens=True) output = output.split("### Response:")[-1].strip() return output def get_embedding(input): """Get embedding main function""" with torch.no_grad(): if tokenizer.pad_token == None: tokenizer.add_special_tokens({'pad_token': '[PAD]'}) encoding = tokenizer( input, padding=True, return_tensors="pt" ) input_ids = encoding["input_ids"].to(device) attention_mask = encoding["attention_mask"].to(device) model_output = model( input_ids, attention_mask, output_hidden_states=True ) data = model_output.hidden_states[-1] mask = attention_mask.unsqueeze(-1).expand(data.size()).float() masked_embeddings = data * mask sum_embeddings = torch.sum(masked_embeddings, dim=1) seq_length = torch.sum(mask, dim=1) embedding = sum_embeddings / seq_length normalized_embeddings = F.normalize(embedding, p=2, dim=1) ret = normalized_embeddings.squeeze(0).tolist() return ret app = FastAPI() @app.post("/v1/chat/completions") async def create_chat_completion(request: ChatCompletionRequest): """Creates a completion for the chat message""" msgs = request.messages if isinstance(msgs, str): msgs = [ChatMessage(role='user',content=msgs)] else: msgs = [ChatMessage(role=x['role'],content=x['message']) for x in msgs] output = predict( input=msgs, max_new_tokens=request.max_tokens, top_p=request.top_p, top_k=request.top_k, temperature=request.temperature, num_beams=request.num_beams, repetition_penalty=request.repetition_penalty, do_sample=request.do_sample, ) choices = [ChatCompletionResponseChoice(index = i, message = msg) for i, msg in enumerate(msgs)] choices += [ChatCompletionResponseChoice(index = len(choices), message = ChatMessage(role='assistant',content=output))] return ChatCompletionResponse(choices = choices) @app.post("/v1/completions") async def create_completion(request: CompletionRequest): """Creates a completion""" output = predict( input=request.prompt, max_new_tokens=request.max_tokens, top_p=request.top_p, top_k=request.top_k, temperature=request.temperature, num_beams=request.num_beams, repetition_penalty=request.repetition_penalty, do_sample=request.do_sample, ) choices = [CompletionResponseChoice(index = 0, text = output)] return CompletionResponse(choices = choices) @app.post("/v1/embeddings") async def create_embeddings(request: EmbeddingsRequest): """Creates text embedding""" embedding = get_embedding(request.input) data = [{ "object": "embedding", "embedding": embedding, "index": 0 }] return EmbeddingsResponse(data=data) if __name__ == "__main__": log_config = uvicorn.config.LOGGING_CONFIG log_config["formatters"]["access"]["fmt"] = "%(asctime)s - %(levelname)s - %(message)s" log_config["formatters"]["default"]["fmt"] = "%(asctime)s - %(levelname)s - %(message)s" uvicorn.run(app, host='0.0.0.0', port=19327, workers=1, log_config=log_config)
ymcui/Chinese-LLaMA-Alpaca
18,964
中文LLaMA&Alpaca大语言模型+本地CPU/GPU训练部署 (Chinese LLaMA & Alpaca LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/openai_server_demo/patches.py
Python
import torch from torch import nn from typing import Optional, Tuple, Union import transformers from transformers.models.llama.modeling_llama import apply_rotary_pos_emb, rotate_half import math try: from xformers import ops as xops except ImportError: xops = None print( "Xformers is not installed correctly. If you want to use memory_efficient_attention use the following command to install Xformers\npip install xformers." ) STORE_KV_BEFORE_ROPE = False USE_MEM_EFF_ATTENTION = False ALPHA = 1.0 def apply_rotary_pos_emb_single(q, cos, sin, position_ids): # The first two dimensions of cos and sin are always 1, so we can `squeeze` them. cos = cos.squeeze(1).squeeze(0) # [seq_len, dim] sin = sin.squeeze(1).squeeze(0) # [seq_len, dim] cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] q_embed = (q * cos) + (rotate_half(q) * sin) return q_embed def xformers_forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] if STORE_KV_BEFORE_ROPE is False: cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) # [bsz, nh, t, hd] if past_key_value is not None: # reuse k, v, self_attention key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) past_key_value = (key_states, value_states) if use_cache else None else: if past_key_value is not None: # reuse k, v, self_attention key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) past_key_value = (key_states, value_states) if use_cache else None cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) query_states = apply_rotary_pos_emb_single(query_states, cos, sin, position_ids) position_ids = torch.arange(kv_seq_len, dtype=torch.long, device=cos.device) position_ids = position_ids.unsqueeze(0).view(-1, kv_seq_len) key_states = apply_rotary_pos_emb_single(key_states, cos, sin, position_ids) if xops is not None and USE_MEM_EFF_ATTENTION: attn_weights = None query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) attn_bias = None if (query_states.size(1)==1 and key_states.size(1)>1) else xops.LowerTriangularMask() attn_output = xops.memory_efficient_attention( query_states, key_states, value_states, attn_bias=attn_bias, p=0) else: attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, q_len, kv_seq_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights + attention_mask attn_weights = torch.max( attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min, device=attn_weights.device) ) # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value old_init = transformers.models.llama.modeling_llama.LlamaRotaryEmbedding.__init__ def adaptive_ntk_init(self, dim, max_position_embeddings=2048, base=10000, device=None): self.dim = dim self.alpha = ALPHA if isinstance(ALPHA,(float,int)): base = base * ALPHA ** (dim / (dim-2)) self.base = base elif ALPHA=='auto': self.base = base else: raise ValueError(ALPHA) old_init(self, dim, max_position_embeddings, base, device) ntk_inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim)) self.register_buffer("ntk_inv_freq", ntk_inv_freq, persistent=False) def adaptive_ntk_forward(self, x, seq_len=None): if seq_len > self.max_seq_len_cached: if isinstance(self.alpha,(float,int)): self.max_seq_len_cached = seq_len t = torch.arange(seq_len, device=x.device, dtype=self.ntk_inv_freq.dtype) freqs = torch.einsum("i,j->ij", t, self.ntk_inv_freq) emb = torch.cat((freqs, freqs), dim=-1).to(x.device) self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False) self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False) return ( self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype), self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype), ) elif self.alpha=='auto': t = torch.arange(seq_len, device=x.device, dtype=self.ntk_inv_freq.dtype) dim = self.dim alpha = (seq_len / 1024 - 1) * 1.1 base = self.base * alpha ** (dim / (dim-2)) ntk_inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(x.device) / dim )) freqs = torch.einsum("i,j->ij", t, ntk_inv_freq) emb = torch.cat((freqs, freqs), dim=-1).to(x.device) cos_cached = emb.cos()[None, None, :, :] sin_cached = emb.sin()[None, None, :, :] return ( cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype), sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype) ) else: return ( self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype), self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype) ) def apply_attention_patch( use_memory_efficient_attention=False, store_kv_before_rope=False ): global USE_MEM_EFF_ATTENTION, STORE_KV_BEFORE_ROPE if use_memory_efficient_attention is True and xops is not None: USE_MEM_EFF_ATTENTION = use_memory_efficient_attention print("USE_MEM_EFF_ATTENTION: ",USE_MEM_EFF_ATTENTION) STORE_KV_BEFORE_ROPE = store_kv_before_rope print("STORE_KV_BEFORE_ROPE:", STORE_KV_BEFORE_ROPE) transformers.models.llama.modeling_llama.LlamaAttention.forward = xformers_forward def apply_ntk_scaling_patch(alpha: Union[float,str]): global ALPHA ALPHA = alpha try: ALPHA = float(ALPHA) except ValueError: if ALPHA!="auto": raise ValueError(f"Alpha can only be a float or 'auto', but given {ALPHA}") print(f"Apply NTK scaling with ALPHA={ALPHA}") transformers.models.llama.modeling_llama.LlamaRotaryEmbedding.__init__ = adaptive_ntk_init transformers.models.llama.modeling_llama.LlamaRotaryEmbedding.forward = adaptive_ntk_forward
ymcui/Chinese-LLaMA-Alpaca
18,964
中文LLaMA&Alpaca大语言模型+本地CPU/GPU训练部署 (Chinese LLaMA & Alpaca LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/training/build_dataset.py
Python
import logging import os from dataclasses import dataclass from typing import Dict, Sequence, Union, List import datasets import torch from datasets import load_dataset, concatenate_datasets import transformers IGNORE_INDEX = -100 logger = logging.getLogger('__name__') PROMPT_TEMPLATE = ( "Below is an instruction that describes a task. " "Write a response that appropriately completes the request.\n\n" "### Instruction:\n{instruction}\n\n### Response: " ) def build_instruction_dataset(data_path: Union[List[str],str], tokenizer: transformers.PreTrainedTokenizer, max_seq_length: int, data_cache_dir = None, preprocessing_num_workers = None, ): def tokenization(examples): sources = [] targets = [] prompt = PROMPT_TEMPLATE for instruction, input, output in zip(examples['instruction'],examples['input'],examples['output']): if input is not None and input !="": instruction = instruction+'\n'+input source = prompt.format_map({'instruction':instruction}) target = f"{output}{tokenizer.eos_token}" sources.append(source) targets.append(target) tokenized_sources = tokenizer(sources,return_attention_mask=False) tokenized_targets = tokenizer(targets,return_attention_mask=False,add_special_tokens=False) all_input_ids = [] all_labels = [] for s,t in zip(tokenized_sources['input_ids'],tokenized_targets['input_ids']): input_ids = torch.LongTensor(s + t)[:max_seq_length] labels = torch.LongTensor([IGNORE_INDEX] * len(s) + t)[:max_seq_length] assert len(input_ids) == len(labels) all_input_ids.append(input_ids) all_labels.append(labels) results = {'input_ids':all_input_ids, 'labels': all_labels} return results logging.warning("building dataset...") all_datasets = [] if not isinstance(data_path,(list,tuple)): data_path = [data_path] for file in data_path: if data_cache_dir is None: data_cache_dir = str(os.path.dirname(file)) cache_path = os.path.join(data_cache_dir,os.path.basename(file).split('.')[0]) os.makedirs(cache_path, exist_ok=True) try: processed_dataset = datasets.load_from_disk(cache_path) logger.info(f'training datasets-{file} has been loaded from disk') except Exception: raw_dataset = load_dataset("json", data_files=file, cache_dir=cache_path) tokenization_func = tokenization tokenized_dataset = raw_dataset.map( tokenization_func, batched=True, num_proc=preprocessing_num_workers, remove_columns=["instruction","input","output"], keep_in_memory=False, desc="preprocessing on dataset", ) processed_dataset = tokenized_dataset processed_dataset.save_to_disk(cache_path) processed_dataset.set_format('torch') all_datasets.append(processed_dataset['train']) all_datasets = concatenate_datasets(all_datasets) return all_datasets @dataclass class DataCollatorForSupervisedDataset(object): """Collate examples for supervised fine-tuning.""" tokenizer: transformers.PreTrainedTokenizer def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]: input_ids, labels = tuple([instance[key] for instance in instances] for key in ("input_ids", "labels")) input_ids = torch.nn.utils.rnn.pad_sequence( input_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id ) labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=-100) return dict( input_ids=input_ids, labels=labels, attention_mask=input_ids.ne(self.tokenizer.pad_token_id), )
ymcui/Chinese-LLaMA-Alpaca
18,964
中文LLaMA&Alpaca大语言模型+本地CPU/GPU训练部署 (Chinese LLaMA & Alpaca LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/training/run_clm_pt_with_peft.py
Python
#!/usr/bin/env python # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset. Here is the full list of checkpoints on the hub that can be fine-tuned by this script: https://huggingface.co/models?filter=text-generation """ # You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments. import logging import numpy as np import math import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, List, Dict, Any, Mapping from pathlib import Path import datasets import torch from datasets import load_dataset, concatenate_datasets import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_CAUSAL_LM_MAPPING, AutoConfig, AutoModelForCausalLM, LlamaForCausalLM, LlamaTokenizer, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, is_torch_tpu_available, set_seed, ) from transformers.testing_utils import CaptureLogger from transformers.trainer_utils import get_last_checkpoint from transformers.utils import send_example_telemetry from transformers.utils.versions import require_version from sklearn.metrics import accuracy_score from peft import LoraConfig, TaskType, get_peft_model, PeftModel, get_peft_model_state_dict from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR class SavePeftModelCallback(transformers.TrainerCallback): def save_model(self, args, state, kwargs): if state.best_model_checkpoint is not None: checkpoint_folder = os.path.join(state.best_model_checkpoint, "pt_lora_model") else: checkpoint_folder = os.path.join(args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}") peft_model_path = os.path.join(checkpoint_folder, "pt_lora_model") kwargs["model"].save_pretrained(peft_model_path) kwargs["tokenizer"].save_pretrained(peft_model_path) def on_save(self, args, state, control, **kwargs): self.save_model(args, state, kwargs) return control def on_train_end(self, args, state, control, **kwargs): peft_model_path = os.path.join(args.output_dir, "pt_lora_model") kwargs["model"].save_pretrained(peft_model_path) kwargs["tokenizer"].save_pretrained(peft_model_path) def accuracy(predictions, references, normalize=True, sample_weight=None): return { "accuracy": float( accuracy_score(references, predictions, normalize=normalize, sample_weight=sample_weight) ) } def compute_metrics(eval_preds): preds, labels = eval_preds # preds have the same shape as the labels, after the argmax(-1) has been calculated # by preprocess_logits_for_metrics but we need to shift the labels labels = labels[:, 1:].reshape(-1) preds = preds[:, :-1].reshape(-1) return accuracy(predictions=preds, references=labels) def preprocess_logits_for_metrics(logits, labels): if isinstance(logits, tuple): # Depending on the model and config, logits may contain extra tensors, # like past_key_values, but logits always come first logits = logits[0] return logits.argmax(dim=-1) def fault_tolerance_data_collator(features: List) -> Dict[str, Any]: if not isinstance(features[0], Mapping): features = [vars(f) for f in features] first = features[0] batch = {} # Special handling for labels. # Ensure that tensor is created with the correct type # (it should be automatically the case, but let's make sure of it.) if "label" in first and first["label"] is not None: label = first["label"].item() if isinstance(first["label"], torch.Tensor) else first["label"] dtype = torch.long if isinstance(label, int) else torch.float batch["labels"] = torch.tensor([f["label"] for f in features], dtype=dtype) elif "label_ids" in first and first["label_ids"] is not None: if isinstance(first["label_ids"], torch.Tensor): batch["labels"] = torch.stack([f["label_ids"] for f in features]) else: dtype = torch.long if isinstance(first["label_ids"][0], int) else torch.float batch["labels"] = torch.tensor([f["label_ids"] for f in features], dtype=dtype) # Handling of all other possible keys. # Again, we will use the first element to figure out which key/values are not None for this model. try: for k, v in first.items(): if k not in ("label", "label_ids") and v is not None and not isinstance(v, str): if isinstance(v, torch.Tensor): batch[k] = torch.stack([f[k] for f in features]) elif isinstance(v, np.ndarray): batch[k] = torch.tensor(np.stack([f[k] for f in features])) else: batch[k] = torch.tensor([f[k] for f in features]) except ValueError: # quick fix by simply take the first example for k, v in first.items(): if k not in ("label", "label_ids") and v is not None and not isinstance(v, str): if isinstance(v, torch.Tensor): batch[k] = torch.stack([features[0][k]] * len(features)) elif isinstance(v, np.ndarray): batch[k] = torch.tensor(np.stack([features[0][k]] * len(features))) else: batch[k] = torch.tensor([features[0][k]] * len(features)) return batch MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ model_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." ) }, ) tokenizer_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The tokenizer for weights initialization.Don't set if you want to train a model from scratch." ) }, ) model_type: Optional[str] = field( default=None, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, ) config_overrides: Optional[str] = field( default=None, metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) }, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) torch_dtype: Optional[str] = field( default=None, metadata={ "help": ( "Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the " "dtype will be automatically derived from the model's weights." ), "choices": ["auto", "bfloat16", "float16", "float32"], }, ) def __post_init__(self): if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( "--config_overrides can't be used in combination with --config_name or --model_name_or_path" ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_dir: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) streaming: bool = field(default=False, metadata={"help": "Enable streaming mode"}) block_size: Optional[int] = field( default=None, metadata={ "help": ( "Optional input sequence length after tokenization. " "The training dataset will be truncated in block of this size for training. " "Default to the model max input length for single sentence inputs (take into account special tokens)." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) validation_split_percentage: Optional[float] = field( default=0.05, metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" }, ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) keep_linebreaks: bool = field( default=True, metadata={"help": "Whether to keep line breaks when using TXT files or not."} ) data_cache_dir: Optional[str] = field(default="./", metadata={"help": "The datasets processed stored"}) def __post_init__(self): if self.streaming: require_version("datasets>=2.0.0", "The streaming feature requires `datasets>=2.0.0`") @dataclass class MyTrainingArguments(TrainingArguments): trainable : Optional[str] = field(default="q_proj,v_proj") lora_rank : Optional[int] = field(default=8) lora_dropout : Optional[float] = field(default=0.1) lora_alpha : Optional[float] = field(default=32.) modules_to_save : Optional[str] = field(default=None) debug_mode : Optional[bool] = field(default=False) peft_path : Optional[str] = field(default=None) logger = logging.getLogger(__name__) def main(): parser = HfArgumentParser((ModelArguments, DataTrainingArguments, MyTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_clm", model_args, data_args) # Setup logging logging.basicConfig(format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, # if training_args.local_rank in [-1, 0] else logging.WARN, handlers=[logging.StreamHandler(sys.stdout)],) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # transformers.tokenization_utils.logging.set_verbosity_warning() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) config_kwargs = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name: config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.config_overrides is not None: logger.info(f"Overriding config: {model_args.config_overrides}") config.update_from_string(model_args.config_overrides) logger.info(f"New config: {config}") tokenizer_kwargs = { "cache_dir": model_args.cache_dir, "use_fast": model_args.use_fast_tokenizer, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs) elif model_args.tokenizer_name_or_path: tokenizer = LlamaTokenizer.from_pretrained(model_args.tokenizer_name_or_path, **tokenizer_kwargs) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) # Preprocessing the datasets. # First we tokenize all the texts. # since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base") def tokenize_function(examples): with CaptureLogger(tok_logger) as cl: output = tokenizer(examples["text"]) # clm input could be much much longer than block_size if "Token indices sequence length is longer than the" in cl.out: tok_logger.warning( "^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits" " before being passed to the model." ) return output if data_args.block_size is None: block_size = tokenizer.model_max_length if block_size > 1024: logger.warning( "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" " override this default with `--block_size xxx`." ) block_size = 1024 else: if data_args.block_size > tokenizer.model_max_length: logger.warning( f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model" f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}." ) block_size = min(data_args.block_size, tokenizer.model_max_length) # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size. def group_texts(examples): # Concatenate all texts. concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can # customize this part to your needs. if total_length >= block_size: total_length = (total_length // block_size) * block_size # Split by chunks of max_len. result = { k: [t[i : i + block_size] for i in range(0, total_length, block_size)] for k, t in concatenated_examples.items() } result["labels"] = result["input_ids"].copy() return result with training_args.main_process_first(desc="dataset map tokenization and grouping"): lm_datasets = [] path = Path(data_args.dataset_dir) files = [file.name for file in path.glob("*.txt")] if training_args.debug_mode is True: files = [files[0]] for idx, file in enumerate(files): data_file = os.path.join(path, file) filename = ''.join(file.split(".")[:-1]) cache_path = os.path.join(data_args.data_cache_dir, filename) os.makedirs(cache_path, exist_ok=True) try: processed_dataset = datasets.load_from_disk(cache_path, keep_in_memory=False) logger.info(f'training datasets-{filename} has been loaded from disk') except Exception: cache_dir = os.path.join(data_args.data_cache_dir, filename+"_text") os.makedirs(cache_dir, exist_ok=True) raw_dataset = load_dataset("text", data_files=data_file, cache_dir=cache_dir, keep_in_memory=False) logger.info(f"{file} has been loaded") tokenized_dataset = raw_dataset.map( tokenize_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns="text", load_from_cache_file=True, keep_in_memory=False, cache_file_names = {k: os.path.join(cache_dir, 'tokenized.arrow') for k in raw_dataset}, desc="Running tokenizer on dataset", ) grouped_datasets = tokenized_dataset.map( group_texts, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=True, keep_in_memory=False, cache_file_names = {k: os.path.join(cache_dir, 'grouped.arrow') for k in tokenized_dataset}, desc=f"Grouping texts in chunks of {block_size}", ) processed_dataset = grouped_datasets processed_dataset.save_to_disk(cache_path) if idx == 0: lm_datasets = processed_dataset['train'] else: assert lm_datasets.features.type == processed_dataset["train"].features.type lm_datasets = concatenate_datasets([lm_datasets, processed_dataset["train"]]) lm_datasets = lm_datasets.train_test_split(test_size = data_args.validation_split_percentage) if training_args.do_train: train_dataset = lm_datasets['train'] if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) logger.info(f"Num train_samples {len(train_dataset)}") logger.info("training example:") logger.info(tokenizer.decode(train_dataset[0]['input_ids'])) if training_args.do_eval: eval_dataset = lm_datasets["test"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) logger.info(f"Num eval_samples {len(eval_dataset)}") logger.info("training example:") logger.info(tokenizer.decode(eval_dataset[0]['input_ids'])) if model_args.model_name_or_path: torch_dtype = ( model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype) ) model = LlamaForCausalLM.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, torch_dtype=torch_dtype, low_cpu_mem_usage=True ) else: model = AutoModelForCausalLM.from_config(config) n_params = sum({p.data_ptr(): p.numel() for p in model.parameters()}.values()) logger.info(f"Training new model from scratch - Total size={n_params/2**20:.2f}M params") model_vocab_size = model.get_output_embeddings().weight.size(0) if not ( (model_vocab_size==32000 and len(tokenizer)==49953) or \ (model_vocab_size==32000 and len(tokenizer)==32000) or \ (model_vocab_size==49953 and len(tokenizer)==49953) or \ (model_vocab_size==49954 and len(tokenizer)==49954) ): raise ValueError( f"The combination of base model (size: {model_vocab_size}) and tokenizer (size: {len(tokenizer)}) is not a valid configuration. Please check our project wiki for further information. \n" "Valid configurations (base model / tokenizer):\n" "- Continue pre-training original LLaMA: 32000 / 32000 \n" "- Pre-training Chinese LLaMA based on original LLaMA: 32000 / 49953 \n" "- Continue pre-training Chinese LLaMA: 49953 / 49953 \n" "- Continue pre-training Chinese Alpaca: 49954 / 49954 \n") model.resize_token_embeddings(len(tokenizer)) if training_args.peft_path is not None: logger.info("Peft from pre-trained model") model = PeftModel.from_pretrained(model, training_args.peft_path) else: logger.info("Init new peft model") target_modules = training_args.trainable.split(',') modules_to_save = training_args.modules_to_save if modules_to_save is not None: modules_to_save = modules_to_save.split(',') lora_rank = training_args.lora_rank lora_dropout = training_args.lora_dropout lora_alpha = training_args.lora_alpha logger.info(f"target_modules: {target_modules}") logger.info(f"lora_rank: {lora_rank}") peft_config = LoraConfig( task_type=TaskType.CAUSAL_LM, target_modules=target_modules, inference_mode=False, r=lora_rank, lora_alpha=lora_alpha, lora_dropout=lora_dropout, modules_to_save=modules_to_save) model = get_peft_model(model, peft_config) model.print_trainable_parameters() old_state_dict = model.state_dict model.state_dict = ( lambda self, *_, **__: get_peft_model_state_dict(self, old_state_dict()) ).__get__(model, type(model)) # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=tokenizer, data_collator=fault_tolerance_data_collator, compute_metrics=compute_metrics if training_args.do_eval and not is_torch_tpu_available() else None, preprocess_logits_for_metrics=preprocess_logits_for_metrics if training_args.do_eval and not is_torch_tpu_available() else None, ) trainer.add_callback(SavePeftModelCallback) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate() max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) try: perplexity = math.exp(metrics["eval_loss"]) except OverflowError: perplexity = float("inf") metrics["perplexity"] = perplexity trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) if __name__ == "__main__": main()
ymcui/Chinese-LLaMA-Alpaca
18,964
中文LLaMA&Alpaca大语言模型+本地CPU/GPU训练部署 (Chinese LLaMA & Alpaca LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/training/run_clm_sft_with_peft.py
Python
#!/usr/bin/env python # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset. Here is the full list of checkpoints on the hub that can be fine-tuned by this script: https://huggingface.co/models?filter=text-generation """ # You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments. import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from pathlib import Path import datasets import torch from build_dataset import build_instruction_dataset, DataCollatorForSupervisedDataset import transformers from transformers import ( CONFIG_MAPPING, AutoConfig, AutoModelForCausalLM, LlamaForCausalLM, LlamaTokenizer, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import send_example_telemetry from transformers.utils.versions import require_version from peft import LoraConfig, TaskType, get_peft_model, PeftModel, get_peft_model_state_dict from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR IGNORE_INDEX = -100 DEFAULT_PAD_TOKEN = "[PAD]" DEFAULT_EOS_TOKEN = "</s>" DEFAULT_BOS_TOKEN = "<s>" DEFAULT_UNK_TOKEN = "<unk>" require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt") class SavePeftModelCallback(transformers.TrainerCallback): def save_model(self, args, state, kwargs): if state.best_model_checkpoint is not None: checkpoint_folder = os.path.join(state.best_model_checkpoint, "sft_lora_model") else: checkpoint_folder = os.path.join(args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}") peft_model_path = os.path.join(checkpoint_folder, "sft_lora_model") kwargs["model"].save_pretrained(peft_model_path) kwargs["tokenizer"].save_pretrained(peft_model_path) def on_save(self, args, state, control, **kwargs): self.save_model(args, state, kwargs) return control def on_train_end(self, args, state, control, **kwargs): peft_model_path = os.path.join(args.output_dir, "sft_lora_model") kwargs["model"].save_pretrained(peft_model_path) kwargs["tokenizer"].save_pretrained(peft_model_path) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ model_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." ) }, ) tokenizer_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The tokenizer for weights initialization.Don't set if you want to train a model from scratch." ) }, ) config_overrides: Optional[str] = field( default=None, metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) }, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) torch_dtype: Optional[str] = field( default=None, metadata={ "help": ( "Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the " "dtype will be automatically derived from the model's weights." ), "choices": ["auto", "bfloat16", "float16", "float32"], }, ) def __post_init__(self): if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( "--config_overrides can't be used in combination with --config_name or --model_name_or_path" ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_dir: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) validation_split_percentage: Optional[float] = field( default=0.05, metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" }, ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) keep_linebreaks: bool = field( default=True, metadata={"help": "Whether to keep line breaks when using TXT files or not."} ) data_cache_dir: Optional[str] = field(default=None, metadata={"help": "The datasets processed stored"}) max_seq_length: Optional[int] = field(default=512) @dataclass class MyTrainingArguments(TrainingArguments): trainable : Optional[str] = field(default="q_proj,v_proj") lora_rank : Optional[int] = field(default=8) lora_dropout : Optional[float] = field(default=0.1) lora_alpha : Optional[float] = field(default=32.) modules_to_save : Optional[str] = field(default=None) peft_path : Optional[str] = field(default=None) force_resize_embeddings: bool = field(default=False) logger = logging.getLogger(__name__) def main(): parser = HfArgumentParser((ModelArguments, DataTrainingArguments, MyTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() send_example_telemetry("run_clm", model_args, data_args) # Setup logging logging.basicConfig(format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, # if training_args.local_rank in [-1, 0] else logging.WARN, handlers=[logging.StreamHandler(sys.stdout)],) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # transformers.tokenization_utils.logging.set_verbosity_warning() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) config_kwargs = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name: config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.config_overrides is not None: logger.info(f"Overriding config: {model_args.config_overrides}") config.update_from_string(model_args.config_overrides) logger.info(f"New config: {config}") tokenizer_kwargs = { "cache_dir": model_args.cache_dir, "use_fast": model_args.use_fast_tokenizer, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs) elif model_args.tokenizer_name_or_path: tokenizer = LlamaTokenizer.from_pretrained(model_args.tokenizer_name_or_path, **tokenizer_kwargs) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if (len(tokenizer))!=49954: raise ValueError(f"The vocab size of the tokenizer must be 49954, but found {len(tokenizer)}.\n" "Please use Chinese Alpaca tokenizer!") if tokenizer.pad_token is None: print(f"Adding pad token {DEFAULT_PAD_TOKEN}") tokenizer.add_special_tokens(dict(pad_token=DEFAULT_PAD_TOKEN)) data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer) eval_dataset=None train_dataset = None if training_args.do_train: with training_args.main_process_first(desc="loading and tokenization"): path = Path(data_args.dataset_dir) files = [os.path.join(path,file.name) for file in path.glob("*.json")] logger.info(f"Training files: {' '.join(files)}") train_dataset = build_instruction_dataset( data_path=files, tokenizer=tokenizer, max_seq_length=data_args.max_seq_length, data_cache_dir = None, preprocessing_num_workers = data_args.preprocessing_num_workers) logger.info(f"Num train_samples {len(train_dataset)}") logger.info("training example:") logger.info(tokenizer.decode(train_dataset[0]['input_ids'])) if training_args.do_eval: with training_args.main_process_first(desc="loading and tokenization"): files = [data_args.validation_file] logger.info(f"Evaluation files: {' '.join(files)}") eval_dataset = build_instruction_dataset( data_path=files, tokenizer=tokenizer, max_seq_length=data_args.max_seq_length, data_cache_dir = None, preprocessing_num_workers = data_args.preprocessing_num_workers) logger.info(f"Num eval_samples {len(eval_dataset)}") logger.info("eval example:") logger.info(tokenizer.decode(eval_dataset[0]['input_ids'])) if model_args.model_name_or_path: torch_dtype = ( model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype) ) model = LlamaForCausalLM.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, torch_dtype=torch_dtype, low_cpu_mem_usage=True ) else: model = AutoModelForCausalLM.from_config(config) n_params = sum({p.data_ptr(): p.numel() for p in model.parameters()}.values()) logger.info(f"Training new model from scratch - Total size={n_params/2**20:.2f}M params") logger.info(f"len(tokenizer):{len(tokenizer)}") embedding_size = model.get_input_embeddings().weight.shape[0] if len(tokenizer) != embedding_size: logger.info("resize the embedding size by the size of the tokenizer") model.resize_token_embeddings(len(tokenizer)) if training_args.peft_path is not None: logger.info("Peft from pre-trained model") model = PeftModel.from_pretrained(model, training_args.peft_path) else: logger.info("Init new peft model") target_modules = training_args.trainable.split(',') modules_to_save = training_args.modules_to_save if modules_to_save is not None: modules_to_save = modules_to_save.split(',') lora_rank = training_args.lora_rank lora_dropout = training_args.lora_dropout lora_alpha = training_args.lora_alpha logger.info(f"target_modules: {target_modules}") logger.info(f"lora_rank: {lora_rank}") peft_config = LoraConfig( task_type=TaskType.CAUSAL_LM, target_modules=target_modules, inference_mode=False, r=lora_rank, lora_alpha=lora_alpha, lora_dropout=lora_dropout, modules_to_save=modules_to_save) model = get_peft_model(model, peft_config) #model.base_model.tie_weights() model.print_trainable_parameters() logger.info(f"model.modules_to_save: {model.modules_to_save}") old_state_dict = model.state_dict model.state_dict = ( lambda self, *_, **__: get_peft_model_state_dict(self, old_state_dict()) ).__get__(model, type(model)) # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, tokenizer=tokenizer, data_collator=data_collator, ) trainer.add_callback(SavePeftModelCallback) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics metrics["train_samples"] = len(train_dataset) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate() metrics["eval_samples"] =len(eval_dataset) try: perplexity = math.exp(metrics["eval_loss"]) except OverflowError: perplexity = float("inf") metrics["perplexity"] = perplexity trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) if __name__ == "__main__": main()
ymcui/Chinese-LLaMA-Alpaca
18,964
中文LLaMA&Alpaca大语言模型+本地CPU/GPU训练部署 (Chinese LLaMA & Alpaca LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/training/run_pt.sh
Shell
lr=2e-4 lora_rank=8 lora_alpha=32 lora_trainable="q_proj,v_proj,k_proj,o_proj,gate_proj,down_proj,up_proj" modules_to_save="embed_tokens,lm_head" lora_dropout=0.05 pretrained_model=path/to/hf/llama/dir chinese_tokenizer_path=path/to/chinese/llama/tokenizer/dir dataset_dir=path/to/pt/data/dir data_cache=temp_data_cache_dir per_device_train_batch_size=1 per_device_eval_batch_size=1 gradient_accumulation_steps=8 output_dir=output_dir deepspeed_config_file=ds_zero2_no_offload.json torchrun --nnodes 1 --nproc_per_node 1 run_clm_pt_with_peft.py \ --deepspeed ${deepspeed_config_file} \ --model_name_or_path ${pretrained_model} \ --tokenizer_name_or_path ${chinese_tokenizer_path} \ --dataset_dir ${dataset_dir} \ --data_cache_dir ${data_cache} \ --validation_split_percentage 0.001 \ --per_device_train_batch_size ${per_device_train_batch_size} \ --per_device_eval_batch_size ${per_device_eval_batch_size} \ --do_train \ --seed $RANDOM \ --fp16 \ --num_train_epochs 1 \ --lr_scheduler_type cosine \ --learning_rate ${lr} \ --warmup_ratio 0.05 \ --weight_decay 0.01 \ --logging_strategy steps \ --logging_steps 10 \ --save_strategy steps \ --save_total_limit 3 \ --save_steps 200 \ --gradient_accumulation_steps ${gradient_accumulation_steps} \ --preprocessing_num_workers 8 \ --block_size 512 \ --output_dir ${output_dir} \ --overwrite_output_dir \ --ddp_timeout 30000 \ --logging_first_step True \ --lora_rank ${lora_rank} \ --lora_alpha ${lora_alpha} \ --trainable ${lora_trainable} \ --modules_to_save ${modules_to_save} \ --lora_dropout ${lora_dropout} \ --torch_dtype float16 \ --gradient_checkpointing \ --ddp_find_unused_parameters False
ymcui/Chinese-LLaMA-Alpaca
18,964
中文LLaMA&Alpaca大语言模型+本地CPU/GPU训练部署 (Chinese LLaMA & Alpaca LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/training/run_sft.sh
Shell
lr=1e-4 lora_rank=8 lora_alpha=32 lora_trainable="q_proj,v_proj,k_proj,o_proj,gate_proj,down_proj,up_proj" modules_to_save="embed_tokens,lm_head" lora_dropout=0.05 pretrained_model=path/to/hf/llama/or/merged/llama/dir/or/model_id chinese_tokenizer_path=path/to/chinese/llama/tokenizer/dir dataset_dir=path/to/sft/data/dir per_device_train_batch_size=1 per_device_eval_batch_size=1 gradient_accumulation_steps=8 output_dir=output_dir peft_model=path/to/peft/model/dir validation_file=validation_file_name deepspeed_config_file=ds_zero2_no_offload.json torchrun --nnodes 1 --nproc_per_node 1 run_clm_sft_with_peft.py \ --deepspeed ${deepspeed_config_file} \ --model_name_or_path ${pretrained_model} \ --tokenizer_name_or_path ${chinese_tokenizer_path} \ --dataset_dir ${dataset_dir} \ --validation_split_percentage 0.001 \ --per_device_train_batch_size ${per_device_train_batch_size} \ --per_device_eval_batch_size ${per_device_eval_batch_size} \ --do_train \ --do_eval \ --seed $RANDOM \ --fp16 \ --num_train_epochs 1 \ --lr_scheduler_type cosine \ --learning_rate ${lr} \ --warmup_ratio 0.03 \ --weight_decay 0 \ --logging_strategy steps \ --logging_steps 10 \ --save_strategy steps \ --save_total_limit 3 \ --evaluation_strategy steps \ --eval_steps 100 \ --save_steps 200 \ --gradient_accumulation_steps ${gradient_accumulation_steps} \ --preprocessing_num_workers 8 \ --max_seq_length 512 \ --output_dir ${output_dir} \ --overwrite_output_dir \ --ddp_timeout 30000 \ --logging_first_step True \ --lora_rank ${lora_rank} \ --lora_alpha ${lora_alpha} \ --trainable ${lora_trainable} \ --modules_to_save ${modules_to_save} \ --lora_dropout ${lora_dropout} \ --torch_dtype float16 \ --validation_file ${validation_file} \ --peft_path ${peft_model} \ --gradient_checkpointing \ --ddp_find_unused_parameters False
ymcui/Chinese-LLaMA-Alpaca
18,964
中文LLaMA&Alpaca大语言模型+本地CPU/GPU训练部署 (Chinese LLaMA & Alpaca LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/ceval/eval.py
Python
# This code is modified from C-Eval Project: https://github.com/SJTU-LIT/ceval import os import argparse import pandas as pd import torch import json from llama_evaluator import Llama_Evaluator import time choices = ["A", "B", "C", "D"] def main(args, evaluator, take): assert os.path.exists("subject_mapping.json"), "subject_mapping.json not found!" with open("subject_mapping.json") as f: subject_mapping = json.load(f) filenames = os.listdir("data/val") subject_list = [val_file.replace("_val.csv","") for val_file in filenames] accuracy, summary = {}, {} run_date=time.strftime('%Y-%m-%d_%H-%M-%S',time.localtime(time.time())) output_dir = args.output_dir save_result_dir=os.path.join(output_dir,f"take{take}") if not os.path.exists(save_result_dir): os.makedirs(save_result_dir,exist_ok=True) all_answers = {} for index,subject_name in enumerate(subject_list): print(f"{index/len(subject_list)} Inference starts at {run_date} on {args.model_path} with subject of {subject_name}!") val_file_path=os.path.join('data/val',f'{subject_name}_val.csv') dev_file_path=os.path.join('data/dev',f'{subject_name}_dev.csv') test_file_path=os.path.join('data/test',f'{subject_name}_test.csv') val_df=pd.read_csv(val_file_path) if args.do_test is False else pd.read_csv(test_file_path) dev_df=pd.read_csv(dev_file_path) if args.few_shot else None correct_ratio, answers = evaluator.eval_subject(subject_name, val_df, dev_df, save_result_dir=save_result_dir if args.do_save_csv else None, few_shot=args.few_shot, with_prompt=args.with_prompt, do_test=args.do_test) print(f"Subject: {subject_name}") print(f"Acc: {correct_ratio}") accuracy[subject_name] = correct_ratio summary[subject_name] = {"score":correct_ratio, "num":len(val_df), "correct":correct_ratio*len(val_df)/100} all_answers[subject_name] = answers json.dump(all_answers,open(save_result_dir+'/submission.json','w'),ensure_ascii=False,indent=4) print("Accuracy:") for k, v in accuracy.items(): print(k, ": ", v) total_num = 0 total_correct = 0 summary['grouped'] = { "STEM": {"correct": 0.0, "num": 0}, "Social Science": {"correct": 0.0, "num": 0}, "Humanities": {"correct": 0.0, "num": 0}, "Other": {"correct": 0.0, "num": 0} } for subj, info in subject_mapping.items(): group = info[2] summary['grouped'][group]["num"] += summary[subj]['num'] summary['grouped'][group]["correct"] += summary[subj]['correct'] for group, info in summary['grouped'].items(): info['score'] = info["correct"] / info["num"] total_num += info["num"] total_correct += info["correct"] summary['All'] = {"score": total_correct / total_num, "num": total_num, "correct": total_correct} json.dump(summary,open(save_result_dir+'/summary.json','w'),ensure_ascii=False,indent=2) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--model_path", type=str) parser.add_argument("--few_shot", choices=["False","True"], default="True") parser.add_argument("--ntrain", "-k", type=int, default=5) parser.add_argument("--with_prompt", choices=["False","True"], default="False") parser.add_argument("--use_flash_attention_2", action="store_true") parser.add_argument("--n_times", default=1,type=int) parser.add_argument("--do_save_csv", choices=["False","True"], default="False") parser.add_argument("--output_dir", type=str) parser.add_argument("--do_test", choices=["False","True"], default="False") parser.add_argument("--verbose", action="store_true", help="Print detailed information of each example.") args = parser.parse_args() args.few_shot = args.few_shot == "True" args.with_prompt = args.with_prompt == "True" args.do_test = args.do_test == "True" args.do_save_csv = args.do_save_csv == "True" args.n_times=max(args.n_times,1) print(args) # Move the model to the MPS device if available if torch.backends.mps.is_available(): device = torch.device("mps") else: device = torch.device(0) print(f"Using device: {device}") evaluator=Llama_Evaluator( choices=choices, k=args.ntrain, model_path=args.model_path, device=device, use_flash_attention_2=args.use_flash_attention_2, verbose=args.verbose ) for i in range(args.n_times): main(args,evaluator=evaluator,take=i)
ymcui/Chinese-LLaMA-Alpaca-3
1,964
中文羊驼大模型三期项目 (Chinese Llama-3 LLMs) developed from Meta Llama 3
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/ceval/llama_evaluator.py
Python
# This code is modified from C-Eval Project: https://github.com/SJTU-LIT/ceval import os import re from tqdm import tqdm import random import numpy as np import torch from transformers import AutoModelForCausalLM, AutoTokenizer from transformers import GenerationConfig DEFAULT_SYSTEM_PROMPT = """You are a helpful assistant. 你是一个乐于助人的助手。""" system_format='<|start_header_id|>system<|end_header_id|>\n\n{content}<|eot_id|>' user_format='<|start_header_id|>user<|end_header_id|>\n\n{content}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n' assistant_format='{content}<|eot_id|>' TASK2DESC = { "high_school_physics": "高中物理", "fire_engineer": "注册消防工程师", "computer_network": "计算机网络", "advanced_mathematics": "高等数学", "logic": "逻辑学", "middle_school_physics": "初中物理", "clinical_medicine": "临床医学", "probability_and_statistics": "概率统计", "ideological_and_moral_cultivation": "思想道德修养与法律基础", "operating_system": "操作系统", "middle_school_mathematics": "初中数学", "chinese_language_and_literature": "中国语言文学", "electrical_engineer": "注册电气工程师", "business_administration": "工商管理", "high_school_geography": "高中地理", "modern_chinese_history": "近代史纲要", "legal_professional": "法律职业资格", "middle_school_geography": "初中地理", "middle_school_chemistry": "初中化学", "high_school_biology": "高中生物", "high_school_chemistry": "高中化学", "physician": "医师资格", "high_school_chinese": "高中语文", "tax_accountant": "税务师", "high_school_history": "高中历史", "mao_zedong_thought": "毛泽东思想和中国特色社会主义理论概论", "high_school_mathematics": "高中数学", "professional_tour_guide": "导游资格", "veterinary_medicine": "兽医学", "environmental_impact_assessment_engineer": "环境影响评价工程师", "basic_medicine": "基础医学", "education_science": "教育学", "urban_and_rural_planner": "注册城乡规划师", "middle_school_biology": "初中生物", "plant_protection": "植物保护", "middle_school_history": "初中历史", "high_school_politics": "高中政治", "metrology_engineer": "注册计量师", "art_studies": "艺术学", "college_economics": "大学经济学", "college_chemistry": "大学化学", "law": "法学", "sports_science": "体育学", "civil_servant": "公务员", "college_programming": "大学编程", "middle_school_politics": "初中政治", "teacher_qualification": "教师资格", "computer_architecture": "计算机组成", "college_physics": "大学物理", "discrete_mathematics": "离散数学", "marxism": "马克思主义基本原理", "accountant": "注册会计师", } class Llama_Evaluator(): def __init__(self, choices, k, model_path, device, use_flash_attention_2=False, verbose=False): load_type = torch.float16 self.choices = choices self.k = k self.device = device self.verbose = verbose self.use_flash_attention_2 = use_flash_attention_2 self.tokenizer = AutoTokenizer.from_pretrained(model_path) self.model = AutoModelForCausalLM.from_pretrained( model_path, torch_dtype=load_type, low_cpu_mem_usage=True, device_map='auto', attn_implementation="flash_attention_2" if self.use_flash_attention_2 else "sdpa" ) self.generation_config = GenerationConfig( temperature=0.2, top_k=0, top_p=1.0, do_sample=True, num_beams=1, repetition_penalty=1.1, max_new_tokens=1, output_scores=True, return_dict_in_generate=True ) self.sA_id = self.tokenizer.encode("A", add_special_tokens=False)[0] self.sB_id = self.tokenizer.encode("B", add_special_tokens=False)[0] self.sC_id = self.tokenizer.encode("C", add_special_tokens=False)[0] self.sD_id = self.tokenizer.encode("D", add_special_tokens=False)[0] self.A_id = self.tokenizer.encode(":A")[-1] self.B_id = self.tokenizer.encode(":B")[-1] self.C_id = self.tokenizer.encode(":C")[-1] self.D_id = self.tokenizer.encode(":D")[-1] def eval_subject(self, subject_name, test_df, dev_df=None, few_shot=False, save_result_dir=None, with_prompt=False, do_test=False): all_answers = {} correct_num = 0 if save_result_dir: result = [] score = [] history = f"以下是中国关于{TASK2DESC[subject_name]}考试的单项选择题,请选出其中的正确答案。\n\n" if few_shot: if with_prompt: history = self.generate_alpaca3_few_shot_prompt(history, dev_df, subject=TASK2DESC[subject_name]) else: history = self.generate_llama3_few_shot_prompt(history, dev_df) answers = ['NA'] * len(test_df) if do_test is True else list(test_df['answer']) for row_index, row in tqdm(test_df.iterrows(), total=len(test_df)): question = self.format_example(row, few_shot=False) instruction = history + question if with_prompt: if few_shot: instruction = history + user_format.format_map({'content': question}) else: instruction = system_format.format(content=DEFAULT_SYSTEM_PROMPT) + user_format.format_map({'content': instruction}) inputs = self.tokenizer(instruction, return_tensors="pt") terminators = [ self.tokenizer.eos_token_id, self.tokenizer.convert_tokens_to_ids("<|eot_id|>") ] generation_output = self.model.generate( input_ids = inputs["input_ids"].to(self.device), attention_mask = inputs['attention_mask'].to(self.device), eos_token_id=terminators, pad_token_id=self.tokenizer.eos_token_id, generation_config = self.generation_config ) logits = generation_output.scores[0][0] logits = logits.float().cpu().detach() choices1_logits = logits[[self.sA_id,self.sB_id,self.sC_id,self.sD_id]] choices2_logits = logits[[self.A_id,self.B_id,self.C_id,self.D_id]] choicesAll_logits = (choices1_logits + choices2_logits).numpy() assert not (np.any(np.isinf(choicesAll_logits)) or np.any(np.isnan(choicesAll_logits))) ans = {0: "A", 1: "B", 2: "C", 3: "D"}[np.argmax(choicesAll_logits)] response = self.tokenizer.decode([logits.argmax(-1).item()]) if ans == answers[row_index]: correct_num += 1 correct = 1 else: correct = 0 if self.verbose is True: print(f"\n======={str(row_index)}=======") print(f"question: {question}\n") print(f"instruction: {instruction}\n") print(f"response: {response}\n") print(f"extracted answer: {ans}") print(f"ground truth: {answers[row_index]} \n") if save_result_dir: result.append(response) score.append(correct) all_answers[str(row_index)] = ans correct_ratio = 100*correct_num/len(answers) if save_result_dir: test_df['model_output'] = result test_df['correctness'] = score test_df.to_csv(os.path.join(save_result_dir, f'{subject_name}_test.csv')) return correct_ratio, all_answers def format_example(self, line, few_shot=False): example = line['question'] for choice in self.choices: example += f'\n{choice}. {line[f"{choice}"]}' if few_shot: example += '\n答案:' + line["answer"] + '\n\n' else: example += '\n答案:' return example def generate_llama3_few_shot_prompt(self, history, dev_df): prompt = history k = self.k if self.k == -1: k = dev_df.shape[0] for i in range(k): prompt += self.format_example(dev_df.iloc[i, :], few_shot=True) return prompt def generate_alpaca3_few_shot_prompt(self, history, dev_df, subject=None): prompt = history prompt_template = ( "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{system_prompt}<|eot_id|>" "<|start_header_id|>user<|end_header_id|>\n\n{instruction}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n" "好的,我会结合{subject}相关知识回答<|eot_id|>" ) prompt = prompt_template.format_map({'instruction':prompt, 'system_prompt':DEFAULT_SYSTEM_PROMPT, 'subject':subject}) k = self.k if self.k == -1: k = dev_df.shape[0] for i in range(k): line = dev_df.iloc[i, :] q=line['question'] for choice in self.choices: q += f'\n{choice}. {line[f"{choice}"]}' a = line['answer'] q += "\n答案:" prompt += user_format.format(content=q) + assistant_format.format(content=a) return prompt
ymcui/Chinese-LLaMA-Alpaca-3
1,964
中文羊驼大模型三期项目 (Chinese Llama-3 LLMs) developed from Meta Llama 3
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/cmmlu/categories.py
Python
# This code is modified from CMMLU Project: https://github.com/haonan-li/CMMLU name_en2zh = { "agronomy": "农学", "anatomy": "解剖学", "ancient_chinese": "古汉语", "arts": "艺术学", "astronomy": "天文学", "business_ethics": "商业伦理", "chinese_civil_service_exam": "中国公务员考试", "chinese_driving_rule": "中国驾驶规则", "chinese_food_culture": "中国饮食文化", "chinese_foreign_policy": "中国外交政策", "chinese_history":"中国历史", "chinese_literature": "中国文学", "chinese_teacher_qualification": "中国教师资格", "clinical_knowledge": "临床知识", "college_actuarial_science":"大学精算学", "college_education":"大学教育学", "college_engineering_hydrology": "大学工程水文学", "college_law": "大学法律", "college_mathematics": "大学数学", "college_medical_statistics":"大学医学统计", "college_medicine": "大学医学", "computer_science": "计算机科学", "computer_security": "计算机安全", "conceptual_physics": "概念物理学", "construction_project_management": "建设工程管理", "economics": "经济学", "education": "教育学", "electrical_engineering": "电气工程", "elementary_chinese":"小学语文", "elementary_commonsense":"小学常识", "elementary_information_and_technology": "小学信息技术", "elementary_mathematics": "初等数学", "ethnology": "民族学", "food_science": "食品科学", "genetics": "遗传学", "global_facts": "全球事实", "high_school_biology": "高中生物", "high_school_chemistry": "高中化学", "high_school_geography": "高中地理", "high_school_mathematics": "高中数学", "high_school_physics": "高中物理学", "high_school_politics": "高中政治", "human_sexuality": "人类性行为", "international_law": "国际法学", "journalism": "新闻学", "jurisprudence": "法理学", "legal_and_moral_basis": "法律与道德基础", "logical": "逻辑学", "machine_learning": "机器学习", "management": "管理学", "marketing": "市场营销", "marxist_theory": "马克思主义理论", "modern_chinese": "现代汉语", "nutrition": "营养学", "philosophy": "哲学", "professional_accounting": "专业会计", "professional_law": "专业法学", "professional_medicine": "专业医学", "professional_psychology": "专业心理学", "public_relations": "公共关系", "security_study":"安全研究", "sociology": "社会学", "sports_science": "体育学", "traditional_chinese_medicine": "中医中药", "virology": "病毒学", "world_history":"世界历史", "world_religions": "世界宗教", } subcategories = { "agronomy": ['other'], "anatomy": ['biology'], "ancient_chinese": ['linguistics','china specific'], "arts": ['arts'], "astronomy": ['physics'], "business_ethics": ['business'], "chinese_civil_service_exam": ['politics','china specific'], "chinese_driving_rule": ['other','china specific'], "chinese_food_culture": ['culture','china specific'], "chinese_foreign_policy": ['politics','china specific'], "chinese_history":['history','china specific'], "chinese_literature": ['literature','china specific'], "chinese_teacher_qualification": ['education','china specific'], "college_actuarial_science":['math'], "college_education":['education'], "college_engineering_hydrology": ['engineering'], "college_law": ['law'], "college_mathematics": ['math'], "college_medical_statistics":['statistics'], "clinical_knowledge": ['other'], "college_medicine": ['other'], "computer_science": ['computer science'], "computer_security": ['other'], "conceptual_physics": ['physics'], "construction_project_management": ['other','china specific'], "economics": ['economics'], "education": ['education'], "elementary_chinese":['linguistics','china specific'], "elementary_commonsense":['other','china specific'], "elementary_information_and_technology": ['other'], "electrical_engineering": ['engineering'], "elementary_mathematics": ['math'], "ethnology": ['culture','china specific'], "food_science": ['other'], "genetics": ['biology'], "global_facts": ['global'], "high_school_biology": ['biology'], "high_school_chemistry": ['chemistry'], "high_school_geography": ['geography'], "high_school_mathematics": ['math'], "high_school_physics": ['physics'], "high_school_politics": ['politics','china specific'], "human_sexuality": ['other'], "international_law": ['law'], "journalism": ['sociology'], "jurisprudence": ['law'], "legal_and_moral_basis": ['other'], "logical": ['philosophy'], "machine_learning": ['computer science'], "management": ['business'], "marketing": ['business'], "marxist_theory": ['philosophy'], "modern_chinese": ['linguistics','china specific'], "nutrition": ['other'], "philosophy": ['philosophy'], "professional_accounting": ['business'], "professional_law": ['law'], "professional_medicine": ['other'], "professional_psychology": ['psychology'], "public_relations": ['politics'], "security_study": ['politics'], "sociology": ['culture'], "sports_science": ['other'], "traditional_chinese_medicine": ['other','china specific'], "virology": ['biology'], "world_history":['history'], "world_religions": ['global'], } categories = { "STEM": ["physics", "chemistry", "biology", "computer science", "math", "engineering", "statistics"], "Humanities": ["history", "philosophy", "law", "arts", "literature", "global"], "Social Science": ['linguistics',"business", "politics", "culture", "economics", "geography", "psychology", "education", "sociology"], "Other":["other"], "China specific": ["china specific"], }
ymcui/Chinese-LLaMA-Alpaca-3
1,964
中文羊驼大模型三期项目 (Chinese Llama-3 LLMs) developed from Meta Llama 3
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/cmmlu/eval.py
Python
# This code is modified from C-Eval Project: https://github.com/SJTU-LIT/ceval import os import argparse import pandas as pd import torch import json from llama_evaluator import Llama_Evaluator from glob import glob import time from collections import defaultdict from categories import name_en2zh, subcategories, categories choices = ["A", "B", "C", "D"] category2subject = defaultdict(list) for k,v in categories.items(): for subject, subcat in subcategories.items(): for c in subcat: if c in v: category2subject[k].append(subject) category2subject_list = defaultdict(list) for key,value in category2subject.items(): for val in value: category2subject_list[val]=[val,name_en2zh[val],key] category2subject= category2subject_list choices = ["A", "B", "C", "D"] def main(args, evaluator,take): subject_mapping = category2subject #json.load(f) filenames = [s.split('/')[-1] for s in glob(args.input_dir+"/test/*csv")] subject_list = [val_file.replace(".csv","") for val_file in filenames] accuracy, summary = {}, {} run_date=time.strftime('%Y-%m-%d_%H-%M-%S',time.localtime(time.time())) output_dir = args.output_dir save_result_dir=os.path.join(output_dir,f"take{take}") if not os.path.exists(save_result_dir): os.makedirs(save_result_dir,exist_ok=True) all_answers = {} for index,subject_name in enumerate(subject_list): print(f"{index/len(subject_list)} Inference starts at {run_date} on {args.model_path} with subject of {subject_name}!") val_file_path=os.path.join(args.input_dir+'/test',f'{subject_name}.csv') dev_file_path=os.path.join(args.input_dir+'/dev',f'{subject_name}.csv') val_df=pd.read_csv(val_file_path) dev_df=pd.read_csv(dev_file_path) if args.few_shot else None correct_ratio, answers = evaluator.eval_subject(name_en2zh[subject_name], val_df, dev_df, save_result_dir=save_result_dir if args.do_save_csv else None, few_shot=args.few_shot, with_prompt=args.with_prompt, do_test=False) print(f"Subject: {subject_name}") print(f"Acc: {correct_ratio}") accuracy[subject_name] = correct_ratio summary[subject_name] = {"score":correct_ratio, "num":len(val_df), "correct":correct_ratio*len(val_df)/100} all_answers[subject_name] = answers json.dump(all_answers,open(save_result_dir+'/submission.json','w'),ensure_ascii=False,indent=4) print("\n\nModel:",args.model_path) print("Accuracy:") for k, v in accuracy.items(): print(k, ": ", v) total_num = 0 total_correct = 0 summary['grouped'] = { "China specific": {"correct": 0.0, "num": 0}, "STEM": {"correct": 0.0, "num": 0}, "Social Science": {"correct": 0.0, "num": 0}, "Humanities": {"correct": 0.0, "num": 0}, "Other": {"correct": 0.0, "num": 0} } for subj, info in subject_mapping.items(): group = info[2] summary['grouped'][group]["num"] += summary[subj]['num'] summary['grouped'][group]["correct"] += summary[subj]['correct'] for group, info in summary['grouped'].items(): info['score'] = info["correct"] / info["num"] total_num += info["num"] total_correct += info["correct"] summary['All'] = {"score": total_correct / total_num, "num": total_num, "correct": total_correct} json.dump(summary,open(save_result_dir+'/summary.json','w'),ensure_ascii=False,indent=2) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--ntrain", "-k", type=int, default=5) parser.add_argument("--model_path", type=str) parser.add_argument("--few_shot", choices=["False","True"], default="True") parser.add_argument("--with_prompt", choices=["False","True"], default="False") parser.add_argument("--use_flash_attention_2", action="store_true") parser.add_argument("--n_times", default=1,type=int) parser.add_argument("--do_save_csv", choices=["False","True"], default="False") parser.add_argument("--output_dir", type=str) parser.add_argument("--input_dir", type=str) parser.add_argument("--verbose", action="store_true", help="Print detailed information of each example.") args = parser.parse_args() args.few_shot = args.few_shot == "True" args.with_prompt = args.with_prompt == "True" args.do_save_csv = args.do_save_csv == "True" args.n_times=max(args.n_times,1) print(args) # Move the model to the MPS device if available if torch.backends.mps.is_available(): device = torch.device("mps") else: device = torch.device(0) print(f"Using device: {device}") evaluator=Llama_Evaluator( choices=choices, k=args.ntrain, model_path=args.model_path, device=device, use_flash_attention_2=args.use_flash_attention_2, verbose=args.verbose ) for i in range(args.n_times): main(args,evaluator=evaluator,take=i)
ymcui/Chinese-LLaMA-Alpaca-3
1,964
中文羊驼大模型三期项目 (Chinese Llama-3 LLMs) developed from Meta Llama 3
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/cmmlu/llama_evaluator.py
Python
# This code is modified from C-Eval Project: https://github.com/SJTU-LIT/ceval import os import re from tqdm import tqdm import random import numpy as np import torch from transformers import AutoModelForCausalLM, AutoTokenizer from transformers import GenerationConfig DEFAULT_SYSTEM_PROMPT = """You are a helpful assistant. 你是一个乐于助人的助手。""" system_format='<|start_header_id|>system<|end_header_id|>\n\n{content}<|eot_id|>' user_format='<|start_header_id|>user<|end_header_id|>\n\n{content}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n' assistant_format='{content}<|eot_id|>' class Llama_Evaluator(): def __init__(self, choices, k, model_path, device, use_flash_attention_2=False, verbose=False): load_type = torch.float16 self.choices = choices self.k = k self.device = device self.verbose = verbose self.use_flash_attention_2 = use_flash_attention_2 self.tokenizer = AutoTokenizer.from_pretrained(model_path) self.model = AutoModelForCausalLM.from_pretrained( model_path, torch_dtype=load_type, low_cpu_mem_usage=True, device_map='auto', attn_implementation="flash_attention_2" if self.use_flash_attention_2 else "sdpa" ) self.generation_config = GenerationConfig( temperature=0.2, top_k=0, top_p=1.0, do_sample=True, num_beams=1, repetition_penalty=1.1, max_new_tokens=1, output_scores=True, return_dict_in_generate=True ) self.sA_id = self.tokenizer.encode("A", add_special_tokens=False)[0] self.sB_id = self.tokenizer.encode("B", add_special_tokens=False)[0] self.sC_id = self.tokenizer.encode("C", add_special_tokens=False)[0] self.sD_id = self.tokenizer.encode("D", add_special_tokens=False)[0] self.A_id = self.tokenizer.encode(":A")[-1] self.B_id = self.tokenizer.encode(":B")[-1] self.C_id = self.tokenizer.encode(":C")[-1] self.D_id = self.tokenizer.encode(":D")[-1] def eval_subject(self, subject_name, test_df, dev_df=None, few_shot=False, save_result_dir=None, with_prompt=False, do_test=False): all_answers = {} correct_num = 0 if save_result_dir: result = [] score = [] history = f"以下是中国关于{subject_name}考试的单项选择题,请选出其中的正确答案。\n\n" if few_shot: if with_prompt: history = self.generate_alpaca3_few_shot_prompt(history, dev_df, subject=subject_name) else: history = self.generate_llama3_few_shot_prompt(history, dev_df) answers = ['NA'] * len(test_df) if do_test is True else list(test_df['Answer']) for row_index, row in tqdm(test_df.iterrows(), total=len(test_df)): question = self.format_example(row, few_shot=False) instruction = history + question if with_prompt: if few_shot: instruction = history + user_format.format_map({'content': question}) else: instruction = system_format.format(content=DEFAULT_SYSTEM_PROMPT) + user_format.format_map({'content': instruction}) inputs = self.tokenizer(instruction, return_tensors="pt") terminators = [ self.tokenizer.eos_token_id, self.tokenizer.convert_tokens_to_ids("<|eot_id|>") ] generation_output = self.model.generate( input_ids = inputs["input_ids"].to(self.device), attention_mask = inputs['attention_mask'].to(self.device), eos_token_id=terminators, pad_token_id=self.tokenizer.eos_token_id, generation_config = self.generation_config ) logits = generation_output.scores[0][0] logits = logits.float().cpu().detach() choices1_logits = logits[[self.sA_id,self.sB_id,self.sC_id,self.sD_id]] choices2_logits = logits[[self.A_id,self.B_id,self.C_id,self.D_id]] choicesAll_logits = (choices1_logits + choices2_logits).numpy() assert not (np.any(np.isinf(choicesAll_logits)) or np.any(np.isnan(choicesAll_logits))) ans = {0: "A", 1: "B", 2: "C", 3: "D"}[np.argmax(choicesAll_logits)] response = self.tokenizer.decode([logits.argmax(-1).item()]) if ans == answers[row_index]: correct_num += 1 correct = 1 else: correct = 0 if self.verbose is True: print(f"\n======={str(row_index)}=======") print(f"question: {question}\n") print(f"instruction: {instruction}\n") print(f"response: {response}\n") print(f"extracted answer: {ans}") print(f"ground truth: {answers[row_index]} \n") if save_result_dir: result.append(response) score.append(correct) all_answers[str(row_index)] = ans correct_ratio = 100*correct_num/len(answers) if save_result_dir: test_df['model_output'] = result test_df['correctness'] = score test_df.to_csv(os.path.join(save_result_dir, f'{subject_name}_test.csv')) return correct_ratio, all_answers def format_example(self, line, few_shot=False): example = line['Question'] for choice in self.choices: example += f'\n{choice}. {line[f"{choice}"]}' if few_shot: example += '\n答案:' + line["Answer"] + '\n\n' else: example += '\n答案:' return example def generate_llama3_few_shot_prompt(self, history, dev_df): prompt = history k = self.k if self.k == -1: k = dev_df.shape[0] for i in range(k): prompt += self.format_example(dev_df.iloc[i, :], few_shot=True) return prompt def generate_alpaca3_few_shot_prompt(self, history, dev_df, subject=None): prompt = history prompt_template = ( "<|start_header_id|>system<|end_header_id|>\n\n{system_prompt}<|eot_id|>" "<|start_header_id|>user<|end_header_id|>\n\n{instruction}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n" "好的,我会结合{subject}相关知识回答<|eot_id|>" ) prompt = prompt_template.format_map({'instruction':prompt, 'system_prompt':DEFAULT_SYSTEM_PROMPT, 'subject':subject}) k = self.k if self.k == -1: k = dev_df.shape[0] for i in range(k): line = dev_df.iloc[i, :] q=line['Question'] for choice in self.choices: q += f'\n{choice}. {line[f"{choice}"]}' a = line['Answer'] q += "\n答案:" prompt += user_format.format(content=q) + assistant_format.format(content=a) return prompt
ymcui/Chinese-LLaMA-Alpaca-3
1,964
中文羊驼大模型三期项目 (Chinese Llama-3 LLMs) developed from Meta Llama 3
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/inference/inference_hf.py
Python
import argparse import json, os import torch from transformers import AutoModelForCausalLM, AutoTokenizer from transformers import GenerationConfig from transformers import BitsAndBytesConfig DEFAULT_SYSTEM_PROMPT = """You are a helpful assistant. 你是一个乐于助人的助手。""" system_format='<|start_header_id|>system<|end_header_id|>\n\n{content}<|eot_id|>' user_format='<|start_header_id|>user<|end_header_id|>\n\n{content}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n' assistant_format='{content}<|eot_id|>' parser = argparse.ArgumentParser() parser.add_argument('--base_model', default=None, type=str, required=True) parser.add_argument('--tokenizer_path', default=None, type=str) parser.add_argument('--data_file', default=None, type=str, help="A file that contains instructions (one instruction per line)") parser.add_argument('--with_prompt', action='store_true', help="wrap the input with the prompt automatically") parser.add_argument('--interactive', action='store_true', help="run in the instruction mode (single-turn)") parser.add_argument('--predictions_file', default='./predictions.json', type=str) parser.add_argument('--gpus', default="0", type=str) parser.add_argument('--only_cpu', action='store_true', help='only use CPU for inference') parser.add_argument('--load_in_8bit', action='store_true', help="Load the LLM in the 8bit mode") parser.add_argument('--load_in_4bit', action='store_true', help="Load the LLM in the 4bit mode") parser.add_argument("--use_vllm", action='store_true', help="Use vLLM as back-end LLM service.") parser.add_argument('--use_flash_attention_2', action='store_true', help="Use flash attention to replace the Llama attention") args = parser.parse_args() if args.use_vllm: if args.load_in_8bit or args.load_in_4bit: raise ValueError("vLLM currently does not support quantization, please use fp16 (default) or unuse --use_vllm.") if args.only_cpu: raise ValueError("vLLM requires GPUs with compute capability not less than 7.0. If you want to run only on CPU, please unuse --use_vllm.") if args.load_in_8bit and args.load_in_4bit: raise ValueError("Only one quantization method can be chosen for inference. Please check your arguments") if args.only_cpu is True: args.gpus = "" if args.load_in_8bit or args.load_in_4bit: raise ValueError("Quantization is unavailable on CPU.") os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus if args.use_vllm: from vllm import LLM, SamplingParams if args.use_vllm: generation_config = dict( temperature=0.2, top_k=40, top_p=0.9, max_tokens=400, presence_penalty=1.0, ) else: generation_config = GenerationConfig( temperature=0.2, top_k=40, top_p=0.9, do_sample=True, num_beams=1, repetition_penalty=1.1, max_new_tokens=400 ) sample_data = ["为什么要减少污染,保护环境?"] def generate_prompt(instruction): return system_format.format(content=DEFAULT_SYSTEM_PROMPT) + user_format.format(content=instruction) if __name__ == '__main__': load_type = torch.float16 # Move the model to the MPS device if available if torch.backends.mps.is_available(): device = torch.device("mps") else: if torch.cuda.is_available(): device = torch.device(0) else: device = torch.device('cpu') print(f"Using device: {device}") if args.tokenizer_path is None: args.tokenizer_path = args.base_model tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_path) terminators = [ tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids("<|eot_id|>") ] if args.use_vllm: model = LLM(model=args.base_model, tokenizer=args.tokenizer_path, tensor_parallel_size=len(args.gpus.split(',')), dtype=load_type ) generation_config["stop_token_ids"] = terminators generation_config["stop"] = ["<|eot_id|>", "<|end_of_text|>"] else: if args.load_in_4bit or args.load_in_8bit: quantization_config = BitsAndBytesConfig( load_in_4bit=args.load_in_4bit, load_in_8bit=args.load_in_8bit, bnb_4bit_compute_dtype=load_type, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4" ) model = AutoModelForCausalLM.from_pretrained( args.base_model, torch_dtype=load_type, low_cpu_mem_usage=True, device_map='auto', quantization_config=quantization_config if (args.load_in_4bit or args.load_in_8bit) else None, attn_implementation="flash_attention_2" if args.use_flash_attention_2 else "sdpa" ) if device==torch.device('cpu'): model.float() model.eval() # test data if args.data_file is None: examples = sample_data else: with open(args.data_file,'r') as f: examples = [line.strip() for line in f.readlines()] print("first 10 examples:") for example in examples[:10]: print(example) with torch.no_grad(): if args.interactive: print("Start inference with instruction mode.") print('='*85) print("+ 该模式下仅支持单轮问答,无多轮对话能力。\n" "+ 如要进行多轮对话,请使用llama.cpp") print('-'*85) print("+ This mode only supports single-turn QA.\n" "+ If you want to experience multi-turn dialogue, please use llama.cpp") print('='*85) while True: raw_input_text = input("Input:") if len(raw_input_text.strip())==0: break if args.with_prompt: input_text = generate_prompt(instruction=raw_input_text) else: input_text = raw_input_text if args.use_vllm: output = model.generate([input_text], SamplingParams(**generation_config), use_tqdm=False) response = output[0].outputs[0].text else: inputs = tokenizer(input_text,return_tensors="pt") #add_special_tokens=False ? generation_output = model.generate( input_ids = inputs["input_ids"].to(device), attention_mask = inputs['attention_mask'].to(device), eos_token_id=terminators, pad_token_id=tokenizer.eos_token_id, generation_config = generation_config ) s = generation_output[0] output = tokenizer.decode(s, skip_special_tokens=True) if args.with_prompt: response = output.split("assistant\n\n")[-1].strip() else: response = output print("Response: ",response) print("\n") else: print("Start inference.") results = [] if args.use_vllm: if args.with_prompt is True: inputs = [generate_prompt(example) for example in examples] else: inputs = examples outputs = model.generate(inputs, SamplingParams(**generation_config)) for index, (example, output) in enumerate(zip(examples, outputs)): response = output.outputs[0].text print(f"======={index}=======") print(f"Input: {example}\n") print(f"Output: {response}\n") results.append({"Input":example,"Output":response}) else: for index, example in enumerate(examples): if args.with_prompt: input_text = generate_prompt(instruction=example) else: input_text = example inputs = tokenizer(input_text,return_tensors="pt") #add_special_tokens=False ? generation_output = model.generate( input_ids = inputs["input_ids"].to(device), attention_mask = inputs['attention_mask'].to(device), eos_token_id=terminators, pad_token_id=tokenizer.eos_token_id, generation_config = generation_config ) s = generation_output[0] output = tokenizer.decode(s,skip_special_tokens=True) if args.with_prompt: response = output.split("assistant\n\n")[1].strip() else: response = output print(f"======={index}=======") print(f"Input: {example}\n") print(f"Output: {response}\n") results.append({"Input":input_text,"Output":response}) dirname = os.path.dirname(args.predictions_file) os.makedirs(dirname,exist_ok=True) with open(args.predictions_file,'w') as f: json.dump(results,f,ensure_ascii=False,indent=2) if args.use_vllm: with open(dirname+'/generation_config.json','w') as f: json.dump(generation_config,f,ensure_ascii=False,indent=2) else: generation_config.save_pretrained('./')
ymcui/Chinese-LLaMA-Alpaca-3
1,964
中文羊驼大模型三期项目 (Chinese Llama-3 LLMs) developed from Meta Llama 3
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/llama_cpp/chat.sh
Shell
#!/bin/bash # script to chat with Llama-3-Chinese-Instruct model # usage: ./chat.sh llama-3-chinese-instruct-gguf-model-path your-first-instruction # WARNING: the hyperparameters are not optimal, please tune them yourself FIRST_INSTRUCTION=$2 SYSTEM_PROMPT="You are a helpful assistant. 你是一个乐于助人的助手。" ./main -m $1 --color -i \ -c 0 -t 6 --temp 0.2 --repeat_penalty 1.1 -ngl 999 \ -r '<|eot_id|>' \ --in-prefix '<|start_header_id|>user<|end_header_id|>\n\n' \ --in-suffix '<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n' \ -p "<|start_header_id|>system<|end_header_id|>\n\n$SYSTEM_PROMPT<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n$FIRST_INSTRUCTION<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
ymcui/Chinese-LLaMA-Alpaca-3
1,964
中文羊驼大模型三期项目 (Chinese Llama-3 LLMs) developed from Meta Llama 3
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/longbench/eval.py
Python
# The script is from https://github.com/THUDM/LongBench import os import json import argparse import numpy as np from metrics import ( qa_f1_score, rouge_zh_score, qa_f1_zh_score, rouge_score, classification_score, retrieval_score, retrieval_zh_score, count_score, code_sim_score, ) dataset2metric = { "narrativeqa": qa_f1_score, "qasper": qa_f1_score, "multifieldqa_en": qa_f1_score, "multifieldqa_zh": qa_f1_zh_score, "hotpotqa": qa_f1_score, "2wikimqa": qa_f1_score, "musique": qa_f1_score, "dureader": rouge_zh_score, "gov_report": rouge_score, "qmsum": rouge_score, "multi_news": rouge_score, "vcsum": rouge_zh_score, "trec": classification_score, "triviaqa": qa_f1_score, "samsum": rouge_score, "lsht": classification_score, "passage_retrieval_en": retrieval_score, "passage_count": count_score, "passage_retrieval_zh": retrieval_zh_score, "lcc": code_sim_score, "repobench-p": code_sim_score, } def parse_args(args=None): parser = argparse.ArgumentParser() parser.add_argument('--output_dir') parser.add_argument('--e', action='store_true', help="Evaluate on LongBench-E") return parser.parse_args(args) def scorer_e(dataset, predictions, answers, lengths, all_classes): scores = {"0-4k": [], "4-8k": [], "8k+": []} for (prediction, ground_truths, length) in zip(predictions, answers, lengths): score = 0. if dataset in ["trec", "triviaqa", "samsum", "lsht"]: prediction = prediction.lstrip('\n').split('\n')[0] for ground_truth in ground_truths: score = max(score, dataset2metric[dataset](prediction, ground_truth, all_classes=all_classes)) if length < 4000: scores["0-4k"].append(score) elif length < 8000: scores["4-8k"].append(score) else: scores["8k+"].append(score) for key in scores.keys(): scores[key] = round(100 * np.mean(scores[key]), 2) return scores def scorer(dataset, predictions, answers, all_classes): total_score = 0. for (prediction, ground_truths) in zip(predictions, answers): score = 0. if dataset in ["trec", "triviaqa", "samsum", "lsht"]: prediction = prediction.lstrip('\n').split('\n')[0] for ground_truth in ground_truths: score = max(score, dataset2metric[dataset](prediction, ground_truth, all_classes=all_classes)) total_score += score return round(100 * total_score / len(predictions), 2) if __name__ == '__main__': args = parse_args() scores = dict() if args.e: path = f"{args.output_dir}/pred_e/" else: path = f"{args.output_dir}/pred/" all_files = os.listdir(path) print("Evaluating on:", all_files) for filename in all_files: if not filename.endswith("jsonl"): continue predictions, answers, lengths = [], [], [] dataset = filename.split('.')[0] with open(f"{path}{filename}", "r", encoding="utf-8") as f: print(filename) for line in f: data = json.loads(line) predictions.append(data["pred"]) answers.append(data["answers"]) all_classes = data["all_classes"] if "length" in data: lengths.append(data["length"]) if args.e: score = scorer_e(dataset, predictions, answers, lengths, all_classes) else: score = scorer(dataset, predictions, answers, all_classes) scores[dataset] = score if args.e: out_path = f"{args.output_dir}/pred_e/result.json" else: out_path = f"{args.output_dir}/pred/result.json" with open(out_path, "w") as f: json.dump(scores, f, ensure_ascii=False, indent=4)
ymcui/Chinese-LLaMA-Alpaca-3
1,964
中文羊驼大模型三期项目 (Chinese Llama-3 LLMs) developed from Meta Llama 3
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/longbench/metrics.py
Python
# The script is from https://github.com/THUDM/LongBench import re import string import jieba from fuzzywuzzy import fuzz import difflib from collections import Counter from rouge import Rouge def normalize_answer(s): """Lower text and remove punctuation, articles and extra whitespace.""" def remove_articles(text): return re.sub(r"\b(a|an|the)\b", " ", text) def white_space_fix(text): return " ".join(text.split()) def remove_punc(text): exclude = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(text): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(s)))) def normalize_zh_answer(s): """Lower text and remove punctuation, extra whitespace.""" def white_space_fix(text): return "".join(text.split()) def remove_punc(text): cn_punctuation = "!?。。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏." all_punctuation = set(string.punctuation + cn_punctuation) return "".join(ch for ch in text if ch not in all_punctuation) def lower(text): return text.lower() return white_space_fix(remove_punc(lower(s))) def count_score(prediction, ground_truth, **kwargs): numbers = re.findall(r"\d+", prediction) right_num = 0 for number in numbers: if str(number) == str(ground_truth): right_num += 1 final_score = 0.0 if len(numbers) == 0 else right_num / len(numbers) return float(final_score) def retrieval_score(prediction, ground_truth, **kwargs): pattern = r'Paragraph (\d+)' matches = re.findall(pattern, ground_truth) ground_truth_id = matches[0] numbers = re.findall(r"\d+", prediction) right_num = 0 for number in numbers: if str(number) == str(ground_truth_id): right_num += 1 final_score = 0.0 if len(numbers) == 0 else right_num / len(numbers) return float(final_score) def retrieval_zh_score(prediction, ground_truth, **kwargs): pattern = r'段落(\d+)' matches = re.findall(pattern, ground_truth) ground_truth_id = matches[0] numbers = re.findall(r"\d+", prediction) right_num = 0 for number in numbers: if str(number) == str(ground_truth_id): right_num += 1 final_score = 0.0 if len(numbers) == 0 else right_num / len(numbers) return float(final_score) def code_sim_score(prediction, ground_truth, **kwargs): all_lines = prediction.lstrip('\n').split('\n') prediction = "" for line in all_lines: if ('`' not in line) and ('#' not in line) and ('//' not in line): prediction = line break return (fuzz.ratio(prediction, ground_truth) / 100) def classification_score(prediction, ground_truth, **kwargs): em_match_list = [] all_classes = kwargs["all_classes"] for class_name in all_classes: if class_name in prediction: em_match_list.append(class_name) for match_term in em_match_list: if match_term in ground_truth and match_term != ground_truth: em_match_list.remove(match_term) if em_match_list != 0: if ground_truth in em_match_list: score = (1.0 / len(em_match_list)) else: score = 0.0 else: best_match = None highest_similarity = 0 for string in all_classes: similarity = difflib.SequenceMatcher(None, string, prediction).ratio() if similarity > highest_similarity: highest_similarity = similarity best_match = string score = float(best_match == ground_truth) return score def rouge_score(prediction, ground_truth, **kwargs): rouge = Rouge() try: scores = rouge.get_scores([prediction], [ground_truth], avg=True) except Exception: return 0.0 return scores["rouge-l"]["f"] def rouge_zh_score(prediction, ground_truth, **kwargs): prediction = " ".join(list(jieba.cut(prediction, cut_all=False))) ground_truth = " ".join(list(jieba.cut(ground_truth, cut_all=False))) score = rouge_score(prediction, ground_truth) return score def f1_score(prediction, ground_truth, **kwargs): common = Counter(prediction) & Counter(ground_truth) num_same = sum(common.values()) if num_same == 0: return 0 precision = 1.0 * num_same / len(prediction) recall = 1.0 * num_same / len(ground_truth) f1 = (2 * precision * recall) / (precision + recall) return f1 def qa_f1_score(prediction, ground_truth, **kwargs): normalized_prediction = normalize_answer(prediction) normalized_ground_truth = normalize_answer(ground_truth) prediction_tokens = normalized_prediction.split() ground_truth_tokens = normalized_ground_truth.split() return f1_score(prediction_tokens, ground_truth_tokens) def qa_f1_zh_score(prediction, ground_truth, **kwargs): prediction_tokens = list(jieba.cut(prediction, cut_all=False)) ground_truth_tokens = list(jieba.cut(ground_truth, cut_all=False)) prediction_tokens = [normalize_zh_answer(token) for token in prediction_tokens] ground_truth_tokens = [normalize_zh_answer(token) for token in ground_truth_tokens] prediction_tokens = [token for token in prediction_tokens if len(token) > 0] ground_truth_tokens = [token for token in ground_truth_tokens if len(token) > 0] return f1_score(prediction_tokens, ground_truth_tokens)
ymcui/Chinese-LLaMA-Alpaca-3
1,964
中文羊驼大模型三期项目 (Chinese Llama-3 LLMs) developed from Meta Llama 3
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/longbench/pred.py
Python
# The script is modified from https://github.com/THUDM/LongBench/blob/main/pred.py from datasets import load_dataset import torch import random import numpy as np import json from transformers import AutoTokenizer, AutoModelForCausalLM from transformers import BitsAndBytesConfig from tqdm import tqdm import os import argparse dir_path = os.path.dirname(os.path.realpath(__file__)) DEFAULT_SYSTEM_PROMPT = """You are a helpful assistant. 你是一个乐于助人的助手。""" system_format='<|start_header_id|>system<|end_header_id|>\n\n{content}<|eot_id|>' user_format='<|start_header_id|>user<|end_header_id|>\n\n{content}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n' assistant_format='{content}<|eot_id|>' parser = argparse.ArgumentParser() parser.add_argument('--model_path', type=str) parser.add_argument('--predict_on',type=str, default='zh') parser.add_argument('--output_dir',type=str, default='pred') parser.add_argument('--gpus',type=str, default=None) parser.add_argument('--max_length',type=int, default=4096-512) parser.add_argument('--with_inst', choices=['true','false','auto'], default = 'false', help="Whether use the system prompt and template of Chinese-Alpaca-2 when constructing the instructions.") parser.add_argument('--e', action='store_true', help="Evaluate on LongBench-E") parser.add_argument('--use_flash_attention_2', action='store_true', help="Use flash attention to replace the LLaMA attention") args = parser.parse_args() model_path = args.model_path predict_on = args.predict_on output_dir = args.output_dir gpus=args.gpus max_length = args.max_length DO_SAMPLE =True TEMPERATURE = 0.2 REPETITION_PENALTY = 1.1 TOP_P = 0.95 TOP_K = 40 if gpus is not None: os.environ["CUDA_VISIBLE_DEVICES"] = gpus def fill_llama3_prompt_template(instruction, with_inst=True, system_prompt=DEFAULT_SYSTEM_PROMPT): if with_inst is False: return instruction else: return system_format.format(content=system_prompt) + user_format.format(content=instruction) def get_pred(model, tokenizer, data, max_length, max_gen, prompt_format, dataset, device): preds = [] for json_obj in tqdm(data): prompt = prompt_format.format(**json_obj) # truncate to fit max_length (we suggest truncate in the middle, since the left and right side may contain crucial instructions) tokenized_prompt = tokenizer(prompt, truncation=False, return_tensors="pt").input_ids[0] if len(tokenized_prompt) > max_length: half = int(max_length/2) prompt = tokenizer.decode(tokenized_prompt[:half], skip_special_tokens=True)+tokenizer.decode(tokenized_prompt[-half:], skip_special_tokens=True) if args.with_inst == 'auto': if dataset not in ["trec", "triviaqa", "samsum", "lsht", "lcc", "repobench-p"]: # chat models are better off without build prompts on these tasks prompt = fill_llama3_prompt_template(instruction=prompt) elif args.with_inst == 'true': prompt = fill_llama3_prompt_template(instruction=prompt, with_inst=True) else: prompt = fill_llama3_prompt_template(instruction=prompt, with_inst=False) input_data = tokenizer(prompt, truncation=False, return_tensors="pt").to(device) context_length = input_data.input_ids.shape[-1] if dataset == "samsum": # prevent illegal output on samsum (model endlessly repeat "\nDialogue"), might be a prompting issue output = model.generate( **input_data, max_new_tokens=max_gen, num_beams=1, do_sample=DO_SAMPLE, repetition_penalty = REPETITION_PENALTY, top_p = TOP_P, top_k = TOP_K, temperature=TEMPERATURE, min_length=context_length+1, eos_token_id=[tokenizer.eos_token_id, tokenizer.encode("\n", add_special_tokens=False)[-1]], )[0] else: output = model.generate( **input_data, max_new_tokens=max_gen, num_beams=1, do_sample=DO_SAMPLE, repetition_penalty=REPETITION_PENALTY, top_p=TOP_P, top_k=TOP_K, temperature=TEMPERATURE )[0] pred = tokenizer.decode(output[context_length:], skip_special_tokens=True) # print(pred) preds.append({"pred": pred, "answers": json_obj["answers"], "all_classes": json_obj["all_classes"], "length": json_obj["length"]}) return preds def seed_everything(seed): torch.manual_seed(seed) torch.cuda.manual_seed(seed) np.random.seed(seed) random.seed(seed) torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.cuda.manual_seed_all(seed) if __name__ == '__main__': seed_everything(42) load_type = torch.float16 # Move the model to the MPS device if available if torch.backends.mps.is_available(): device = torch.device("mps") else: if torch.cuda.is_available(): device = torch.device(0) else: device = torch.device('cpu') print(f"Using device: {device}") if args.e: en_datasets = [ "hotpotqa","2wikimqa", "qasper", "multifieldqa_en", "gov_report", "trec", "samsum", "triviaqa", "passage_count", "passage_retrieval_en", "multi_news"] zh_datasets = [] code_datasets = [ "lcc", "repobench-p" ] if not os.path.exists(f"{output_dir}/pred_e"): os.makedirs(f"{output_dir}/pred_e") else: en_datasets = [ "hotpotqa","2wikimqa", "musique", "narrativeqa", "qasper", "multifieldqa_en", "gov_report", "qmsum", "trec", "samsum", "triviaqa", "passage_count", "passage_retrieval_en", "multi_news"] zh_datasets = [ "dureader", "multifieldqa_zh", "vcsum","lsht", "passage_retrieval_zh"] code_datasets = [ "lcc", "repobench-p" ] if not os.path.exists(f"{output_dir}/pred"): os.makedirs(f"{output_dir}/pred") datasets = [] for data_type in predict_on.split(','): if data_type == 'zh': datasets += zh_datasets elif data_type == 'en': datasets += en_datasets elif data_type == 'code': datasets += code_datasets print(datasets) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') tokenizer = AutoTokenizer.from_pretrained(model_path) model = AutoModelForCausalLM.from_pretrained( model_path, torch_dtype=load_type, low_cpu_mem_usage=True, device_map='auto', use_flash_attention_2=args.use_flash_attention_2, trust_remote_code=True ) model = model.eval() model_vocab_size = model.get_input_embeddings().weight.size(0) print(f"Vocab of the base model: {model_vocab_size}") tokenizer_vocab_size = len(tokenizer) print(f"Vocab of the tokenizer: {tokenizer_vocab_size}") # we design specific prompt format and max generation length for each task, feel free to modify them to optimize model output dataset2prompt = json.load(open(dir_path + "/config/dataset2prompt.json", "r")) dataset2maxlen = json.load(open(dir_path + "/config/dataset2maxlen.json", "r")) # predict on each dataset for dataset in datasets: print(f"Loading dataset {dataset}") if args.e: data = load_dataset('THUDM/LongBench', dataset+'_e', split='test') output_path = f"{output_dir}/pred_e/{dataset}.jsonl" else: data = load_dataset('THUDM/LongBench', dataset, split='test') output_path = f"{output_dir}/pred/{dataset}.jsonl" prompt_format = dataset2prompt[dataset] max_gen = dataset2maxlen[dataset] preds = get_pred(model, tokenizer, data, max_length, max_gen, prompt_format, dataset, device) with open(output_path, "w", encoding="utf-8") as f: for pred in preds: json.dump(pred, f, ensure_ascii=False) f.write('\n')
ymcui/Chinese-LLaMA-Alpaca-3
1,964
中文羊驼大模型三期项目 (Chinese Llama-3 LLMs) developed from Meta Llama 3
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/merge_llama3_with_chinese_lora_low_mem.py
Python
""" Usage: python merge_llama3_with_chinese_lora_low_mem.py \ --base_model path/to/llama-3-hf-model \ --lora_model path/to/llama-3-chinese-lora \ --output_type [huggingface|pth|] \ --output_dir path/to/output-dir """ import argparse import json import os import gc import torch import peft from transformers import AutoTokenizer from transformers.modeling_utils import dtype_byte_size from huggingface_hub import snapshot_download import re import shutil import safetensors from safetensors.torch import load_file as safe_load_file parser = argparse.ArgumentParser(description='Script to merge Llama-3-hf with Llama-3-Chinese or Llama-3-Chinese-Instruct LoRA weights') parser.add_argument('--base_model', default=None, required=True, type=str, help="Base model path (basically Llama-3-hf)") parser.add_argument('--lora_model', default=None, required=True, type=str, help="LoRA model path (Llama-3-Chinese-LoRA, Llama-3-Chinese-Instruct-LoRA)") parser.add_argument('--output_type', default='huggingface',choices=['huggingface', 'pth'], type=str, help="Output model type can be 'huggingface' (default) or 'pth' format") parser.add_argument('--output_dir', default='./merged_model', type=str, help="Output path for the merged model") parser.add_argument('--verbose', default=False, action='store_true', help="Show detailed debugging messages") WEIGHTS_NAME = "adapter_model.bin" SAFETENSORS_WEIGHTS_NAME = "adapter_model.safetensors" layers_to_model_size = { 32 : '8B', 80 : '70B', } num_shards_of_models = {'8B': 1, '70B': 8} params_of_models = { '8B': { "dim": 4096, "n_heads": 32, "n_layers": 32, "norm_eps": 1e-05, "vocab_size": -1, }, '70B': { "dim": 8192, "n_heads": 64, "n_layers": 80, "norm_eps": 1e-05, "vocab_size": -1, }, } def transpose(weight, fan_in_fan_out): return weight.T if fan_in_fan_out else weight def jsonload(filename): with open(filename, "r") as file: d = json.load(file) return d # Borrowed and modified from https://github.com/tloen/alpaca-lora def translate_state_dict_key(k): k = k.replace("base_model.model.", "") if k == "model.embed_tokens.weight": return "tok_embeddings.weight" elif k == "model.norm.weight": return "norm.weight" elif k == "lm_head.weight": return "output.weight" elif k.startswith("model.layers."): layer = k.split(".")[2] if k.endswith(".self_attn.q_proj.weight"): return f"layers.{layer}.attention.wq.weight" elif k.endswith(".self_attn.k_proj.weight"): return f"layers.{layer}.attention.wk.weight" elif k.endswith(".self_attn.v_proj.weight"): return f"layers.{layer}.attention.wv.weight" elif k.endswith(".self_attn.o_proj.weight"): return f"layers.{layer}.attention.wo.weight" elif k.endswith(".mlp.gate_proj.weight"): return f"layers.{layer}.feed_forward.w1.weight" elif k.endswith(".mlp.down_proj.weight"): return f"layers.{layer}.feed_forward.w2.weight" elif k.endswith(".mlp.up_proj.weight"): return f"layers.{layer}.feed_forward.w3.weight" elif k.endswith(".input_layernorm.weight"): return f"layers.{layer}.attention_norm.weight" elif k.endswith(".post_attention_layernorm.weight"): return f"layers.{layer}.ffn_norm.weight" elif k.endswith("rotary_emb.inv_freq") or "lora" in k: return None else: print(layer, k) raise NotImplementedError else: print(k) raise NotImplementedError def unpermute(w): return ( w.view(n_heads, 2, dim // n_heads // 2, dim).transpose(1, 2).reshape(dim, dim) ) def save_shards(model_sd, num_shards: int, prefix="", verbose=False): """ Convert and save the HF format weights to PTH format weights """ with torch.no_grad(): if num_shards == 1: new_state_dict = {} for k, v in model_sd.items(): new_k = translate_state_dict_key(k) if new_k is not None: if "wq" in new_k or "wk" in new_k: new_state_dict[new_k] = unpermute(v) else: new_state_dict[new_k] = v os.makedirs(output_dir, exist_ok=True) print(f"Saving shard 1 of {num_shards} into {output_dir}/{prefix}consolidated.00.pth") torch.save(new_state_dict, output_dir + f"/{prefix}consolidated.00.pth") else: new_state_dicts = [dict() for _ in range(num_shards)] for k in list(model_sd.keys()): v = model_sd[k] new_k = translate_state_dict_key(k) if new_k is not None: if new_k=='tok_embeddings.weight': assert v.size(1)%num_shards==0 splits = v.split(v.size(1)//num_shards,dim=1) elif new_k=='output.weight': if v.size(0)%num_shards==0: splits = v.split(v.size(0)//num_shards,dim=0) else: size_list = [v.size(0)//num_shards] * num_shards size_list[-1] += v.size(0)%num_shards splits = v.split(size_list, dim=0) elif new_k=='norm.weight': splits = [v] * num_shards elif 'ffn_norm.weight' in new_k: splits = [v] * num_shards elif 'attention_norm.weight' in new_k: splits = [v] * num_shards elif 'w1.weight' in new_k: splits = v.split(v.size(0)//num_shards,dim=0) elif 'w2.weight' in new_k: splits = v.split(v.size(1)//num_shards,dim=1) elif 'w3.weight' in new_k: splits = v.split(v.size(0)//num_shards,dim=0) elif 'wo.weight' in new_k: splits = v.split(v.size(1)//num_shards,dim=1) elif 'wv.weight' in new_k: splits = v.split(v.size(0)//num_shards,dim=0) elif "wq.weight" in new_k or "wk.weight" in new_k: v = unpermute(v) splits = v.split(v.size(0)//num_shards,dim=0) else: print(f"Unexpected key {new_k}") raise ValueError if verbose: print(f"Processing {new_k}") for sd,split in zip(new_state_dicts,splits): sd[new_k] = split.clone() del split del splits del model_sd[k],v gc.collect() # Effectively enforce garbage collection os.makedirs(output_dir, exist_ok=True) for i,new_state_dict in enumerate(new_state_dicts): print(f"Saving shard {i+1} of {num_shards} into {output_dir}/{prefix}consolidated.0{i}.pth") torch.save(new_state_dict, output_dir + f"/{prefix}consolidated.0{i}.pth") def merge_shards(output_dir, num_shards: int): ckpt_filenames = sorted([f for f in os.listdir(output_dir) if re.match('L(\d+)-consolidated.(\d+).pth',f)]) for i in range(num_shards): shards_filenames = sorted([f for f in ckpt_filenames if re.match(f'L(\d+)-consolidated.0{i}.pth',f)]) print(f"Loading {shards_filenames} ...") shards_dicts = [torch.load(os.path.join(output_dir,fn)) for fn in shards_filenames] shards_merged = {} for d in shards_dicts: shards_merged |= d print(f"Saving the merged shard to " + os.path.join(output_dir, f"consolidated.0{i}.pth")) torch.save(shards_merged, os.path.join(output_dir, f"consolidated.0{i}.pth")) print("Cleaning up...") del shards_merged for d in shards_dicts: del d del shards_dicts gc.collect() # Effectively enforce garbage collection for fn in shards_filenames: os.remove(os.path.join(output_dir,fn)) if __name__=='__main__': args = parser.parse_args() base_model_path = args.base_model lora_model_path = args.lora_model output_dir = args.output_dir output_type = args.output_type os.makedirs(output_dir, exist_ok=True) print(f"="*80) print(f"Base model: {base_model_path}") print(f"LoRA model: {lora_model_path}") tokenizers_and_loras = [] print(f"Loading {lora_model_path}") if not os.path.exists(lora_model_path): print("Cannot find lora model on the disk. Downloading lora model from hub...") lora_model_path = snapshot_download(repo_id=lora_model_path) tokenizer = AutoTokenizer.from_pretrained(lora_model_path) lora_config = peft.LoraConfig.from_pretrained(lora_model_path) if os.path.exists(os.path.join(lora_model_path, SAFETENSORS_WEIGHTS_NAME)): lora_filename = os.path.join(lora_model_path, SAFETENSORS_WEIGHTS_NAME) use_safetensors = True elif os.path.exists(os.path.join(lora_model_path, WEIGHTS_NAME)): lora_filename = os.path.join(lora_model_path, WEIGHTS_NAME) use_safetensors = False else: raise ValueError( f"Please check that the file {WEIGHTS_NAME} or {SAFETENSORS_WEIGHTS_NAME} is present at {lora_model_path}." ) if use_safetensors: lora_state_dict = safe_load_file(lora_filename, device="cpu") else: lora_state_dict = torch.load(lora_filename, map_location='cpu') # lora_state_dict = torch.load(os.path.join(lora_model_path,'adapter_model.bin'), map_location='cpu') if 'base_model.model.model.embed_tokens.weight' in lora_state_dict: lora_vocab_size = lora_state_dict['base_model.model.model.embed_tokens.weight'].shape[0] assert lora_vocab_size == len(tokenizer), \ (f"The vocab size of the tokenizer {len(tokenizer)} does not match the vocab size of the LoRA weight {lora_vocab_size}!\n") tokenizers_and_loras.append( { "tokenizer" :tokenizer, "state_dict" :lora_state_dict, "config": lora_config, "scaling": lora_config.lora_alpha / lora_config.r, "fan_in_fan_out" : lora_config.fan_in_fan_out, }) if not os.path.exists(base_model_path): print("Cannot find lora model on the disk. Downloading lora model from hub...") base_model_path = snapshot_download(repo_id=base_model_path) if os.path.exists(os.path.join(base_model_path, "pytorch_model.bin")): ckpt_filenames = ["pytorch_model.bin"] elif os.path.exists(os.path.join(base_model_path, "model.safetensors.index.json")): ckpt_filenames = sorted([f for f in os.listdir(base_model_path) if re.match('model-(\d+)-of-(\d+).safetensors',f)]) elif os.path.exists(os.path.join(base_model_path, "pytorch_model.index.json")): ckpt_filenames = sorted([f for f in os.listdir(base_model_path) if re.match('pytorch_model-(\d+)-of-(\d+).bin',f)]) if len(ckpt_filenames) == 0: raise FileNotFoundError(f"Cannot find base model checkpoints in ${base_model_path}. Please make sure the checkpoints are saved in the HF format.") layers = jsonload(os.path.join(base_model_path, "config.json"))["num_hidden_layers"] model_size = None total_size = 0 for index, filename in enumerate(ckpt_filenames): print(f"Loading ckpt {filename}") if re.match('(.*).safetensors', filename): state_dict = safe_load_file(os.path.join(base_model_path,filename), device="cpu") else: state_dict = torch.load(os.path.join(base_model_path,filename), map_location='cpu') # state_dict = torch.load(os.path.join(base_model_path,filename), map_location='cpu') if index == 0: model_size = layers_to_model_size[layers] if output_type == 'pth': params = params_of_models[model_size] num_shards = num_shards_of_models[model_size] n_layers = params["n_layers"] n_heads = params["n_heads"] dim = params["dim"] dims_per_head = dim // n_heads base = 500000.0 # llama-3 inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head)) print("Merging...") for k in state_dict: for tl_idx, t_and_l in enumerate(tokenizers_and_loras): saved_key = 'base_model.model.'+k lora_key_A = saved_key.replace('.weight','.lora_A.weight') if saved_key in t_and_l['state_dict']: if args.verbose: print(f"copying {saved_key} from {tl_idx}-th LoRA weight to {k}") state_dict[k] = t_and_l['state_dict'][saved_key].half().clone() # do we need half()? if lora_key_A in t_and_l['state_dict']: lora_key_B = lora_key_A.replace('lora_A.weight','lora_B.weight') if args.verbose: print(f"merging {lora_key_A} and lora_B.weight form {tl_idx}-th LoRA weight to {k}") state_dict[k] += ( transpose( t_and_l['state_dict'][lora_key_B].float() @ t_and_l['state_dict'][lora_key_A].float(), t_and_l['fan_in_fan_out']) * t_and_l['scaling'] ) weight_size = state_dict[k].numel() * dtype_byte_size(state_dict[k].dtype) total_size += weight_size if output_type == 'huggingface': print(f"Saving ckpt {filename} to {output_dir} in HF format...") if use_safetensors: safetensors.torch.save_file( state_dict, os.path.join(output_dir, filename), metadata={"format": "pt"} ) else: torch.save(state_dict, os.path.join(output_dir, filename)) elif output_type == 'pth': print(f"Converting to pth format...") save_shards(model_sd=state_dict, num_shards=num_shards,prefix=f"L{index+1}-", verbose=args.verbose) del state_dict gc.collect() # Effectively enforce garbage collection print(f"Saving tokenizer") tokenizers_and_loras[-1]['tokenizer'].save_pretrained(output_dir) if output_type == 'pth': with open(output_dir + "/params.json", "w") as f: print(f"Saving params.json into {output_dir}/params.json") json.dump(params, f) merge_shards(output_dir, num_shards=num_shards) if output_type=='huggingface': configs = ('config.json', 'generation_config.json', 'pytorch_model.bin.index.json', "model.safetensors.index.json") if model_size == "1.3B": configs = ('config.json', 'generation_config.json') for config in configs: if os.path.exists(os.path.join(lora_model_path, config)): print(f"Saving {config} from {lora_model_path}") with open(os.path.join(lora_model_path, config),'r') as f: obj = json.load(f) else: if os.path.exists(os.path.join(base_model_path, config)): print(f"Saving {config} from {base_model_path}") with open(os.path.join(base_model_path, config),'r') as f: obj = json.load(f) if config == 'config.json': obj['vocab_size'] = len(tokenizers_and_loras[-1]['tokenizer']) if config == 'pytorch_model.bin.index.json' or config == "model.safetensors.index.json": obj['metadata']['total_size'] = total_size if os.path.exists(os.path.join(base_model_path, config)): with open(os.path.join(output_dir, config), 'w') as f: json.dump(obj, f, indent=2) # for f in os.listdir(lora_model_path): # if re.match('(.*).py', f): # shutil.copy2(os.path.join(lora_model_path, f), output_dir) print("Done.") print(f"Check output dir: {output_dir}")
ymcui/Chinese-LLaMA-Alpaca-3
1,964
中文羊驼大模型三期项目 (Chinese Llama-3 LLMs) developed from Meta Llama 3
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/mmlu/categories.py
Python
subcategories = { "abstract_algebra": ["math"], "anatomy": ["health"], "astronomy": ["physics"], "business_ethics": ["business"], "clinical_knowledge": ["health"], "college_biology": ["biology"], "college_chemistry": ["chemistry"], "college_computer_science": ["computer science"], "college_mathematics": ["math"], "college_medicine": ["health"], "college_physics": ["physics"], "computer_security": ["computer science"], "conceptual_physics": ["physics"], "econometrics": ["economics"], "electrical_engineering": ["engineering"], "elementary_mathematics": ["math"], "formal_logic": ["philosophy"], "global_facts": ["other"], "high_school_biology": ["biology"], "high_school_chemistry": ["chemistry"], "high_school_computer_science": ["computer science"], "high_school_european_history": ["history"], "high_school_geography": ["geography"], "high_school_government_and_politics": ["politics"], "high_school_macroeconomics": ["economics"], "high_school_mathematics": ["math"], "high_school_microeconomics": ["economics"], "high_school_physics": ["physics"], "high_school_psychology": ["psychology"], "high_school_statistics": ["math"], "high_school_us_history": ["history"], "high_school_world_history": ["history"], "human_aging": ["health"], "human_sexuality": ["culture"], "international_law": ["law"], "jurisprudence": ["law"], "logical_fallacies": ["philosophy"], "machine_learning": ["computer science"], "management": ["business"], "marketing": ["business"], "medical_genetics": ["health"], "miscellaneous": ["other"], "moral_disputes": ["philosophy"], "moral_scenarios": ["philosophy"], "nutrition": ["health"], "philosophy": ["philosophy"], "prehistory": ["history"], "professional_accounting": ["other"], "professional_law": ["law"], "professional_medicine": ["health"], "professional_psychology": ["psychology"], "public_relations": ["politics"], "security_studies": ["politics"], "sociology": ["culture"], "us_foreign_policy": ["politics"], "virology": ["health"], "world_religions": ["philosophy"], } categories = { "STEM": ["physics", "chemistry", "biology", "computer science", "math", "engineering"], "humanities": ["history", "philosophy", "law"], "social sciences": ["politics", "culture", "economics", "geography", "psychology"], "other (business, health, misc.)": ["other", "business", "health"], }
ymcui/Chinese-LLaMA-Alpaca-3
1,964
中文羊驼大模型三期项目 (Chinese Llama-3 LLMs) developed from Meta Llama 3
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/mmlu/eval.py
Python
# modified from https://github.com/baichuan-inc/Baichuan-7B/blob/main/evaluation/evaluate_mmlu.py import argparse import os import torch import numpy as np import pandas as pd from categories import subcategories, categories from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig choices = ["A", "B", "C", "D"] def format_subject(subject): line = subject.split("_") s = "" for entry in line: s += " " + entry return s def format_example(df, idx, include_answer=True): prompt = df.iloc[idx, 0] k = df.shape[1] - 2 for j in range(k): prompt += "\n{}. {}".format(choices[j], df.iloc[idx, j + 1]) prompt += "\nAnswer:" if include_answer: prompt += " {}\n\n".format(df.iloc[idx, k + 1]) return prompt def gen_prompt(train_df, subject, k=-1): prompt = "The following are multiple choice questions (with answers) about {}.\n\n".format( format_subject(subject) ) if k == -1: k = train_df.shape[0] for i in range(k): prompt += format_example(train_df, i) return prompt @torch.no_grad() def mmlu_eval(args, subject, model, tokenizer, dev_df, test_df, device): cors = [] all_probs = [] for i in range(test_df.shape[0]): # get prompt and make sure it fits k = args.ntrain prompt_end = format_example(test_df, i, include_answer=False) train_prompt = gen_prompt(dev_df, subject, k) prompt = train_prompt + prompt_end input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(device) label = test_df.iloc[i, test_df.shape[1] - 1] logits = model( input_ids=input_ids, ).logits[:,-1].flatten() probs = ( torch.nn.functional.softmax( torch.tensor( [ logits[tokenizer("A").input_ids[-1]], logits[tokenizer("B").input_ids[-1]], logits[tokenizer("C").input_ids[-1]], logits[tokenizer("D").input_ids[-1]], ] ).to(device), dim=0, ) .detach() .cpu() .to(torch.float32) .numpy() ) pred = {0: "A", 1: "B", 2: "C", 3: "D"}[np.argmax(probs)] cor = pred == label cors.append(cor) all_probs.append(probs) acc = np.mean(cors) cors = np.array(cors) all_probs = np.array(all_probs) print("Average accuracy {:.3f} - {}".format(acc, subject)) return cors, acc, all_probs def main(args): # Move the model to the MPS device if available if torch.backends.mps.is_available(): device = torch.device("mps") else: if torch.cuda.is_available(): device = torch.device(0) else: device = torch.device('cpu') print(f"Using device: {device}") tokenizer = AutoTokenizer.from_pretrained(args.model_path) model = AutoModelForCausalLM.from_pretrained( args.model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map='auto', attn_implementation="flash_attention_2" if args.use_flash_attention_2 else "sdpa" ).to(device).eval() subjects = sorted( [ f.split("_test.csv")[0] for f in os.listdir(os.path.join(args.data_dir, "test")) if "_test.csv" in f ] ) if not os.path.exists(args.save_dir): os.makedirs(args.save_dir) if not os.path.exists(os.path.join(args.save_dir, "results")): os.makedirs(os.path.join(args.save_dir, "results")) all_cors = [] subcat_cors = { subcat: [] for subcat_lists in subcategories.values() for subcat in subcat_lists } cat_cors = {cat: [] for cat in categories} for subject in subjects: dev_df = pd.read_csv( os.path.join(args.data_dir, "dev", subject + "_dev.csv"), header=None )[: args.ntrain] if args.do_test: test_df = pd.read_csv( os.path.join(args.data_dir, "test", subject + "_test.csv"), header=None ) else: test_df = pd.read_csv( os.path.join(args.data_dir, "val", subject + "_val.csv"), header=None ) cors, _, probs = mmlu_eval(args, subject, model, tokenizer, dev_df, test_df, device) subcats = subcategories[subject] for subcat in subcats: subcat_cors[subcat].append(cors) for key in categories.keys(): if subcat in categories[key]: cat_cors[key].append(cors) all_cors.append(cors) test_df["correct"] = cors for j in range(probs.shape[1]): choice = choices[j] test_df["choice{}_probs".format(choice)] = probs[:, j] test_df.to_csv( os.path.join( args.save_dir, "results", f"{subject}.csv" ), index=None, ) for subcat in subcat_cors: subcat_acc = np.mean(np.concatenate(subcat_cors[subcat])) print("Average accuracy {:.3f} - {}".format(subcat_acc, subcat)) for cat in cat_cors: cat_acc = np.mean(np.concatenate(cat_cors[cat])) print("Average accuracy {:.3f} - {}".format(cat_acc, cat)) weighted_acc = np.mean(np.concatenate(all_cors)) print("Average accuracy: {:.3f}".format(weighted_acc)) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--ntrain", "-k", type=int, default=5) parser.add_argument("--data_dir", "-d", type=str, default="data") parser.add_argument("--save_dir", "-s", type=str, default="results") parser.add_argument( "--model_path", "-m", type=str, ) parser.add_argument( "--do_test", action="store_true" ) parser.add_argument( "--use_flash_attention_2", action="store_true" ) args = parser.parse_args() main(args)
ymcui/Chinese-LLaMA-Alpaca-3
1,964
中文羊驼大模型三期项目 (Chinese Llama-3 LLMs) developed from Meta Llama 3
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/oai_api_demo/openai_api_protocol.py
Python
from typing import Optional, List, Dict, Any, Union, Literal import time import shortuuid from pydantic import BaseModel, Field class ChatCompletionRequest(BaseModel): model: str = "llama-3-chinese" messages: Union[str, List[Dict[str, str]]] temperature: Optional[float] = 0.2 top_p: Optional[float] = 0.9 top_k: Optional[int] = 40 n: Optional[int] = 1 max_tokens: Optional[int] = 512 num_beams: Optional[int] = 1 stop: Optional[Union[str, List[str]]] = None stream: Optional[bool] = False repetition_penalty: Optional[float] = 1.1 user: Optional[str] = None do_sample: Optional[bool] = True class ChatMessage(BaseModel): role: str content: str class DeltaMessage(BaseModel): role: Optional[Literal["user", "assistant", "system"]] = None content: Optional[str] = None class ChatCompletionResponseChoice(BaseModel): index: int message: ChatMessage class ChatCompletionResponseStreamChoice(BaseModel): index: int delta: DeltaMessage finish_reason: Optional[Literal["stop", "length"]] class ChatCompletionResponse(BaseModel): id: str = Field(default_factory=lambda: f"chatcmpl-{shortuuid.random()}") object: str = "chat.completion" created: int = Field(default_factory=lambda: int(time.time())) model: str = "llama-3-chinese" choices: List[ Union[ChatCompletionResponseChoice, ChatCompletionResponseStreamChoice] ] class EmbeddingsRequest(BaseModel): input: Union[str, List[Any]] user: Optional[str] = None class EmbeddingsResponse(BaseModel): object: str = "list" data: List[Dict[str, Any]] model: str = "llama-3-chinese" class CompletionRequest(BaseModel): prompt: Union[str, List[Any]] temperature: Optional[float] = 0.2 n: Optional[int] = 1 max_tokens: Optional[int] = 512 stop: Optional[Union[str, List[str]]] = None stream: Optional[bool] = False top_p: Optional[float] = 0.9 top_k: Optional[int] = 40 num_beams: Optional[int] = 1 logprobs: Optional[int] = None echo: Optional[bool] = False repetition_penalty: Optional[float] = 1.1 user: Optional[str] = None do_sample: Optional[bool] = True class CompletionResponseChoice(BaseModel): index: int text: str class CompletionResponse(BaseModel): id: Optional[str] = Field(default_factory=lambda: f"cmpl-{shortuuid.random()}") object: Optional[str] = "text_completion" created: Optional[int] = Field(default_factory=lambda: int(time.time())) model: Optional[str] = "llama-3-chinese" choices: List[CompletionResponseChoice]
ymcui/Chinese-LLaMA-Alpaca-3
1,964
中文羊驼大模型三期项目 (Chinese Llama-3 LLMs) developed from Meta Llama 3
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/oai_api_demo/openai_api_server.py
Python
import argparse import os from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware import uvicorn from threading import Thread from sse_starlette.sse import EventSourceResponse parser = argparse.ArgumentParser() parser.add_argument('--base_model', default=None, type=str, required=True) parser.add_argument('--lora_model', default=None, type=str,help="If None, perform inference on the base model") parser.add_argument('--tokenizer_path',default=None,type=str) parser.add_argument('--gpus', default="0", type=str) parser.add_argument('--load_in_8bit',action='store_true', help='Load the model in 8bit mode') parser.add_argument('--load_in_4bit',action='store_true', help='Load the model in 4bit mode') parser.add_argument('--only_cpu',action='store_true',help='Only use CPU for inference') parser.add_argument('--use_flash_attention_2', action='store_true', help="Use flash-attention2 to accelerate inference") args = parser.parse_args() if args.only_cpu is True: args.gpus = "" if args.load_in_8bit or args.load_in_4bit: raise ValueError("Quantization is unavailable on CPU.") if args.load_in_8bit and args.load_in_4bit: raise ValueError("Only one quantization method can be chosen for inference. Please check your arguments") os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus import torch import torch.nn.functional as F from transformers import ( AutoModelForCausalLM, AutoTokenizer, GenerationConfig, TextIteratorStreamer, BitsAndBytesConfig ) from peft import PeftModel import sys parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(parent_dir) from openai_api_protocol import ( ChatCompletionRequest, ChatCompletionResponse, ChatMessage, ChatCompletionResponseChoice, CompletionRequest, CompletionResponse, CompletionResponseChoice, EmbeddingsRequest, EmbeddingsResponse, ChatCompletionResponseStreamChoice, DeltaMessage, ) load_type = torch.float16 # Move the model to the MPS device if available if torch.backends.mps.is_available(): device = torch.device("mps") else: if torch.cuda.is_available(): device = torch.device(0) else: device = torch.device('cpu') print(f"Using device: {device}") if args.tokenizer_path is None: args.tokenizer_path = args.lora_model if args.lora_model is None: args.tokenizer_path = args.base_model tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_path, legacy=True) if args.load_in_4bit or args.load_in_8bit: quantization_config = BitsAndBytesConfig( load_in_4bit=args.load_in_4bit, load_in_8bit=args.load_in_8bit, bnb_4bit_compute_dtype=load_type, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4" ) base_model = AutoModelForCausalLM.from_pretrained( args.base_model, torch_dtype=load_type, low_cpu_mem_usage=True, device_map='auto' if not args.only_cpu else None, #load_in_4bit=args.load_in_4bit, #load_in_8bit=args.load_in_8bit, quantization_config=quantization_config if (args.load_in_4bit or args.load_in_8bit) else None, attn_implementation="flash_attention_2" if args.use_flash_attention_2 else "sdpa", trust_remote_code=True ) model_vocab_size = base_model.get_input_embeddings().weight.size(0) tokenizer_vocab_size = len(tokenizer) print(f"Vocab of the base model: {model_vocab_size}") print(f"Vocab of the tokenizer: {tokenizer_vocab_size}") if model_vocab_size != tokenizer_vocab_size: print("Resize model embeddings to fit tokenizer") base_model.resize_token_embeddings(tokenizer_vocab_size) if args.lora_model is not None: print("loading peft model") model = PeftModel.from_pretrained( base_model, args.lora_model, torch_dtype=load_type, device_map="auto", ) else: model = base_model if device == torch.device("cpu"): model.float() model.eval() DEFAULT_SYSTEM_PROMPT = "You are a helpful assistant. 你是一个乐于助人的助手。" TEMPLATE_WITH_SYSTEM_PROMPT = ( """<|start_header_id|>system<|end_header_id|>\n\n{system_prompt}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{message}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n""" ) TEMPLATE_WITHOUT_SYSTEM_PROMPT = """<|start_header_id|>user<|end_header_id|>\n\n{message}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n""" def generate_prompt( message, response="", with_system_prompt=False, system_prompt=None ): if with_system_prompt is True: if system_prompt is None: system_prompt = DEFAULT_SYSTEM_PROMPT prompt = TEMPLATE_WITH_SYSTEM_PROMPT.format_map( {"message": message, "system_prompt": system_prompt} ) else: prompt = TEMPLATE_WITHOUT_SYSTEM_PROMPT.format_map({"message": message}) if len(response) > 0: prompt += " " + response return prompt def generate_completion_prompt(message: str): """Generate prompt for completion""" return generate_prompt(message, response="", with_system_prompt=False) def generate_chat_prompt(messages: list): """Generate prompt for chat completion""" system_msg = None for msg in messages: if msg.role == "system": system_msg = msg.content prompt = "" is_first_user_content = True for msg in messages: if msg.role == "system": continue if msg.role == "user": if is_first_user_content is True: prompt += generate_prompt( msg.content, with_system_prompt=True, system_prompt=system_msg ) is_first_user_content = False else: prompt += generate_prompt(msg.content, with_system_prompt=False) if msg.role == "assistant": prompt += f"{msg.content}" + "<|eot_id|>" return prompt def predict( input, max_new_tokens=1024, top_p=0.9, temperature=0.2, top_k=40, num_beams=1, repetition_penalty=1.1, do_sample=True, **kwargs, ): """ Main inference method type(input) == str -> /v1/completions type(input) == list -> /v1/chat/completions """ if isinstance(input, str): prompt = generate_completion_prompt(input) else: prompt = generate_chat_prompt(input) inputs = tokenizer(prompt, return_tensors="pt") input_ids = inputs["input_ids"].to(device) attention_mask = inputs['attention_mask'].to(device) generation_config = GenerationConfig( temperature=temperature, top_p=top_p, top_k=top_k, num_beams=num_beams, do_sample=do_sample, **kwargs, ) generation_config.return_dict_in_generate = True generation_config.output_scores = False generation_config.max_new_tokens = max_new_tokens generation_config.repetition_penalty = float(repetition_penalty) # c.f. llama-3-instruct generation_config llama3_eos_ids = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids("<|eot_id|>")] # For the reason why pad_token_id = eos_token_id, see: # https://github.com/meta-llama/llama-recipes/blob/f7aa02af9f2c427ebb70853191b72636130b9df5/src/llama_recipes/finetuning.py#L141 with torch.no_grad(): generation_output = model.generate( input_ids=input_ids, attention_mask=attention_mask, eos_token_id=llama3_eos_ids, pad_token_id=tokenizer.eos_token_id, generation_config=generation_config, ) s = generation_output.sequences[0] output = tokenizer.decode(s, skip_special_tokens=True) #output = output.split("<|eot_id|>")[-1].strip() output = output.split("assistant\n\n")[-1].strip() return output def stream_predict( input, max_new_tokens=1024, top_p=0.9, temperature=0.2, top_k=40, num_beams=4, repetition_penalty=1.1, do_sample=True, model_id="llama-3-chinese", **kwargs, ): choice_data = ChatCompletionResponseStreamChoice( index=0, delta=DeltaMessage(role="assistant"), finish_reason=None ) chunk = ChatCompletionResponse( model=model_id, choices=[choice_data], object="chat.completion.chunk", ) yield "{}".format(chunk.json(exclude_unset=True, ensure_ascii=False)) if isinstance(input, str): prompt = generate_completion_prompt(input) else: prompt = generate_chat_prompt(input) inputs = tokenizer(prompt, return_tensors="pt") input_ids = inputs["input_ids"].to(device) generation_config = GenerationConfig( temperature=temperature, top_p=top_p, top_k=top_k, num_beams=num_beams, do_sample=do_sample, **kwargs, ) streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) generation_kwargs = dict( streamer=streamer, input_ids=input_ids, generation_config=generation_config, return_dict_in_generate=True, output_scores=False, max_new_tokens=max_new_tokens, repetition_penalty=float(repetition_penalty), ) Thread(target=model.generate, kwargs=generation_kwargs).start() for new_text in streamer: choice_data = ChatCompletionResponseStreamChoice( index=0, delta=DeltaMessage(content=new_text), finish_reason=None ) chunk = ChatCompletionResponse( model=model_id, choices=[choice_data], object="chat.completion.chunk" ) yield "{}".format(chunk.json(exclude_unset=True, ensure_ascii=False)) choice_data = ChatCompletionResponseStreamChoice( index=0, delta=DeltaMessage(), finish_reason="stop" ) chunk = ChatCompletionResponse( model=model_id, choices=[choice_data], object="chat.completion.chunk" ) yield "{}".format(chunk.json(exclude_unset=True, ensure_ascii=False)) yield "[DONE]" def get_embedding(input): """Get embedding main function""" with torch.no_grad(): encoding = tokenizer(input, padding=True, return_tensors="pt") input_ids = encoding["input_ids"].to(device) attention_mask = encoding["attention_mask"].to(device) model_output = model(input_ids, attention_mask, output_hidden_states=True) data = model_output.hidden_states[-1] mask = attention_mask.unsqueeze(-1).expand(data.size()).float() masked_embeddings = data * mask sum_embeddings = torch.sum(masked_embeddings, dim=1) seq_length = torch.sum(mask, dim=1) embedding = sum_embeddings / seq_length normalized_embeddings = F.normalize(embedding, p=2, dim=1) ret = normalized_embeddings.squeeze(0).tolist() return ret app = FastAPI() app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) @app.post("/v1/chat/completions") async def create_chat_completion(request: ChatCompletionRequest): """Creates a completion for the chat message""" msgs = request.messages if isinstance(msgs, str): msgs = [ChatMessage(role="user", content=msgs)] else: msgs = [ChatMessage(role=x["role"], content=x["content"]) for x in msgs] if request.stream: generate = stream_predict( input=msgs, max_new_tokens=request.max_tokens, top_p=request.top_p, top_k=request.top_k, temperature=request.temperature, num_beams=request.num_beams, repetition_penalty=request.repetition_penalty, do_sample=request.do_sample, ) return EventSourceResponse(generate, media_type="text/event-stream") output = predict( input=msgs, max_new_tokens=request.max_tokens, top_p=request.top_p, top_k=request.top_k, temperature=request.temperature, num_beams=request.num_beams, repetition_penalty=request.repetition_penalty, do_sample=request.do_sample, ) choices = [ ChatCompletionResponseChoice(index=i, message=msg) for i, msg in enumerate(msgs) ] choices += [ ChatCompletionResponseChoice( index=len(choices), message=ChatMessage(role="assistant", content=output) ) ] return ChatCompletionResponse(choices=choices) @app.post("/v1/completions") async def create_completion(request: CompletionRequest): """Creates a completion""" output = predict( input=request.prompt, max_new_tokens=request.max_tokens, top_p=request.top_p, top_k=request.top_k, temperature=request.temperature, num_beams=request.num_beams, repetition_penalty=request.repetition_penalty, do_sample=request.do_sample, ) choices = [CompletionResponseChoice(index=0, text=output)] return CompletionResponse(choices=choices) @app.post("/v1/embeddings") async def create_embeddings(request: EmbeddingsRequest): """Creates text embedding""" embedding = get_embedding(request.input) data = [{"object": "embedding", "embedding": embedding, "index": 0}] return EmbeddingsResponse(data=data) if __name__ == "__main__": log_config = uvicorn.config.LOGGING_CONFIG log_config["formatters"]["access"][ "fmt" ] = "%(asctime)s - %(levelname)s - %(message)s" log_config["formatters"]["default"][ "fmt" ] = "%(asctime)s - %(levelname)s - %(message)s" uvicorn.run(app, host="0.0.0.0", port=19327, workers=1, log_config=log_config)
ymcui/Chinese-LLaMA-Alpaca-3
1,964
中文羊驼大模型三期项目 (Chinese Llama-3 LLMs) developed from Meta Llama 3
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/training/build_dataset.py
Python
import logging import os from typing import Union, List import datasets import torch from datasets import load_dataset, concatenate_datasets import transformers IGNORE_INDEX = -100 logger = logging.getLogger('__name__') DEFAULT_SYSTEM_PROMPT = """You are a helpful assistant. 你是一个乐于助人的助手。""" system_format='<|start_header_id|>system<|end_header_id|>\n\n{content}<|eot_id|>' user_format='<|start_header_id|>user<|end_header_id|>\n\n{content}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n' assistant_format='{content}<|eot_id|>' def build_instruction_dataset(data_path: Union[List[str],str], tokenizer: transformers.PreTrainedTokenizer, max_seq_length: int, data_cache_dir = None, preprocessing_num_workers = None, ): def tokenization(examples): sources = [] targets = [] for instruction, input_text, output in zip(examples['instruction'],examples['input'],examples['output']): if input_text is not None and input_text !="": instruction = instruction+'\n'+input_text source = system_format.format(content=DEFAULT_SYSTEM_PROMPT) + user_format.format(content=instruction) target = assistant_format.format(content=output) sources.append(source) targets.append(target) tokenized_sources = tokenizer(sources, return_attention_mask=False, add_special_tokens=False) tokenized_targets = tokenizer(targets, return_attention_mask=False, add_special_tokens=False) all_input_ids = [] all_labels = [] for s,t in zip(tokenized_sources['input_ids'],tokenized_targets['input_ids']): input_ids = torch.LongTensor(s + t)[:max_seq_length] labels = torch.LongTensor([IGNORE_INDEX] * len(s) + t)[:max_seq_length] all_input_ids.append(input_ids) all_labels.append(labels) results = {'input_ids':all_input_ids, 'labels': all_labels} return results logging.warning("building dataset...") all_datasets = [] if not isinstance(data_path,(list,tuple)): data_path = [data_path] for file in data_path: if data_cache_dir is None: data_cache_dir = str(os.path.dirname(file)) cache_path = os.path.join(data_cache_dir,os.path.basename(file).split('.')[0]+f"_{max_seq_length}") os.makedirs(cache_path, exist_ok=True) try: processed_dataset = datasets.load_from_disk(cache_path) logger.info(f'training datasets-{file} has been loaded from disk') except Exception: raw_dataset = load_dataset("json", data_files=file, cache_dir=cache_path) tokenization_func = tokenization tokenized_dataset = raw_dataset.map( tokenization_func, batched=True, num_proc=preprocessing_num_workers, remove_columns=["instruction","input","output"], keep_in_memory=False, desc="preprocessing on dataset", ) processed_dataset = tokenized_dataset processed_dataset.save_to_disk(cache_path) processed_dataset.set_format('torch') all_datasets.append(processed_dataset['train']) all_datasets = concatenate_datasets(all_datasets) return all_datasets
ymcui/Chinese-LLaMA-Alpaca-3
1,964
中文羊驼大模型三期项目 (Chinese Llama-3 LLMs) developed from Meta Llama 3
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/training/run_clm_pt_with_peft.py
Python
#!/usr/bin/env python # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset. Here is the full list of checkpoints on the hub that can be fine-tuned by this script: https://huggingface.co/models?filter=text-generation """ # You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments. import logging import numpy as np import math import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, List, Dict, Any, Mapping from pathlib import Path import datasets import torch from datasets import load_dataset, concatenate_datasets import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_CAUSAL_LM_MAPPING, AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, is_torch_xla_available, set_seed, BitsAndBytesConfig ) from transformers.testing_utils import CaptureLogger from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version from sklearn.metrics import accuracy_score from peft import LoraConfig, TaskType, get_peft_model, PeftModel, prepare_model_for_kbit_training # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.40.0") def accuracy(predictions, references, normalize=True, sample_weight=None): return { "accuracy": float( accuracy_score(references, predictions, normalize=normalize, sample_weight=sample_weight) ) } def compute_metrics(eval_preds): preds, labels = eval_preds # preds have the same shape as the labels, after the argmax(-1) has been calculated # by preprocess_logits_for_metrics but we need to shift the labels labels = labels[:, 1:].reshape(-1) preds = preds[:, :-1].reshape(-1) return accuracy(predictions=preds, references=labels) def preprocess_logits_for_metrics(logits, labels): if isinstance(logits, tuple): # Depending on the model and config, logits may contain extra tensors, # like past_key_values, but logits always come first logits = logits[0] return logits.argmax(dim=-1) def fault_tolerance_data_collator(features: List) -> Dict[str, Any]: if not isinstance(features[0], Mapping): features = [vars(f) for f in features] first = features[0] batch = {} # Special handling for labels. # Ensure that tensor is created with the correct type # (it should be automatically the case, but let's make sure of it.) if "label" in first and first["label"] is not None: label = first["label"].item() if isinstance(first["label"], torch.Tensor) else first["label"] dtype = torch.long if isinstance(label, int) else torch.float batch["labels"] = torch.tensor([f["label"] for f in features], dtype=dtype) elif "label_ids" in first and first["label_ids"] is not None: if isinstance(first["label_ids"], torch.Tensor): batch["labels"] = torch.stack([f["label_ids"] for f in features]) else: dtype = torch.long if isinstance(first["label_ids"][0], int) else torch.float batch["labels"] = torch.tensor([f["label_ids"] for f in features], dtype=dtype) # Handling of all other possible keys. # Again, we will use the first element to figure out which key/values are not None for this model. try: for k, v in first.items(): if k not in ("label", "label_ids") and v is not None and not isinstance(v, str): if isinstance(v, torch.Tensor): batch[k] = torch.stack([f[k] for f in features]) elif isinstance(v, np.ndarray): batch[k] = torch.tensor(np.stack([f[k] for f in features])) else: batch[k] = torch.tensor([f[k] for f in features]) except ValueError: # quick fix by simply take the first example for k, v in first.items(): if k not in ("label", "label_ids") and v is not None and not isinstance(v, str): if isinstance(v, torch.Tensor): batch[k] = torch.stack([features[0][k]] * len(features)) elif isinstance(v, np.ndarray): batch[k] = torch.tensor(np.stack([features[0][k]] * len(features))) else: batch[k] = torch.tensor([features[0][k]] * len(features)) return batch MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ model_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The model checkpoint for weights initialization. Don't set if you want to train a model from scratch." ) }, ) tokenizer_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The tokenizer for weights initialization.Don't set if you want to train a model from scratch." ) }, ) model_type: Optional[str] = field( default=None, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, ) config_overrides: Optional[str] = field( default=None, metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) }, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=False, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) torch_dtype: Optional[str] = field( default=None, metadata={ "help": ( "Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the " "dtype will be automatically derived from the model's weights." ), "choices": ["auto", "bfloat16", "float16", "float32"], }, ) low_cpu_mem_usage: bool = field( default=False, metadata={ "help": ( "It is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded. " "set True will benefit LLM loading time and RAM consumption." ) }, ) def __post_init__(self): if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( "--config_overrides can't be used in combination with --config_name or --model_name_or_path" ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_dir: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) streaming: bool = field(default=False, metadata={"help": "Enable streaming mode"}) block_size: Optional[int] = field( default=None, metadata={ "help": ( "Optional input sequence length after tokenization. " "The training dataset will be truncated in block of this size for training. " "Default to the model max input length for single sentence inputs (take into account special tokens)." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) validation_split_percentage: Optional[float] = field( default=0.05, metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" }, ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) keep_linebreaks: bool = field( default=True, metadata={"help": "Whether to keep line breaks when using TXT files or not."} ) data_cache_dir: Optional[str] = field(default="./", metadata={"help": "The datasets processed stored"}) def __post_init__(self): if self.streaming: require_version("datasets>=2.0.0", "The streaming feature requires `datasets>=2.0.0`") @dataclass class MyTrainingArguments(TrainingArguments): trainable : Optional[str] = field(default="q_proj,v_proj") lora_rank : Optional[int] = field(default=8) lora_dropout : Optional[float] = field(default=0.1) lora_alpha : Optional[float] = field(default=32.) modules_to_save : Optional[str] = field(default=None) debug_mode : Optional[bool] = field(default=False) peft_path : Optional[str] = field(default=None) use_flash_attention_2 : Optional[bool] = field(default=False) double_quant: Optional[bool] = field(default=True) quant_type: Optional[str] = field(default="nf4") load_in_kbits: Optional[int] = field(default=16) full_finetuning : Optional[bool] = field(default=False) logger = logging.getLogger(__name__) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, MyTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_clm", model_args, data_args) # Setup logging logging.basicConfig(format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, # if training_args.local_rank in [-1, 0] else logging.WARN, handlers=[logging.StreamHandler(sys.stdout)],) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) config_kwargs = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name: config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.config_overrides is not None: logger.info(f"Overriding config: {model_args.config_overrides}") config.update_from_string(model_args.config_overrides) logger.info(f"New config: {config}") tokenizer_kwargs = { "cache_dir": model_args.cache_dir, "use_fast": model_args.use_fast_tokenizer, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs) elif model_args.tokenizer_name_or_path: tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name_or_path, **tokenizer_kwargs) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) # Preprocessing the datasets. # First we tokenize all the texts. # since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base") def tokenize_function(examples): with CaptureLogger(tok_logger) as cl: output = tokenizer(examples["text"]) # clm input could be much much longer than block_size if "Token indices sequence length is longer than the" in cl.out: tok_logger.warning( "^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits" " before being passed to the model." ) return output if data_args.block_size is None: block_size = tokenizer.model_max_length if block_size > 1024: logger.warning( "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" " override this default with `--block_size xxx`." ) block_size = 1024 else: if data_args.block_size > tokenizer.model_max_length: logger.warning( f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model" f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}." ) block_size = min(data_args.block_size, tokenizer.model_max_length) # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size. def group_texts(examples): # Concatenate all texts. concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can # customize this part to your needs. if total_length >= block_size: total_length = (total_length // block_size) * block_size # Split by chunks of max_len. result = { k: [t[i : i + block_size] for i in range(0, total_length, block_size)] for k, t in concatenated_examples.items() } result["labels"] = result["input_ids"].copy() return result with training_args.main_process_first(desc="dataset map tokenization and grouping"): lm_datasets = [] path = Path(data_args.dataset_dir) files = [file.name for file in path.glob("*.txt")] if training_args.debug_mode is True: files = [files[0]] for idx, file in enumerate(files): data_file = os.path.join(path, file) filename = ''.join(file.split(".")[:-1]) cache_path = os.path.join(data_args.data_cache_dir, filename+f"_{block_size}") os.makedirs(cache_path, exist_ok=True) try: processed_dataset = datasets.load_from_disk(cache_path, keep_in_memory=False) logger.info(f'training datasets-{filename} has been loaded from disk') except Exception: cache_dir = os.path.join(data_args.data_cache_dir, filename+f"_text_{block_size}") os.makedirs(cache_dir, exist_ok=True) raw_dataset = load_dataset("text", data_files=data_file, cache_dir=cache_dir, keep_in_memory=False) logger.info(f"{file} has been loaded") tokenized_dataset = raw_dataset.map( tokenize_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns="text", load_from_cache_file=True, keep_in_memory=False, cache_file_names = {k: os.path.join(cache_dir, 'tokenized.arrow') for k in raw_dataset}, desc="Running tokenizer on dataset", ) grouped_datasets = tokenized_dataset.map( group_texts, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=True, keep_in_memory=False, cache_file_names = {k: os.path.join(cache_dir, 'grouped.arrow') for k in tokenized_dataset}, desc=f"Grouping texts in chunks of {block_size}", ) processed_dataset = grouped_datasets processed_dataset.save_to_disk(cache_path) if idx == 0: lm_datasets = processed_dataset['train'] else: assert lm_datasets.features.type == processed_dataset["train"].features.type lm_datasets = concatenate_datasets([lm_datasets, processed_dataset["train"]]) lm_datasets = lm_datasets.train_test_split(test_size = data_args.validation_split_percentage) if training_args.do_train: train_dataset = lm_datasets['train'] if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) logger.info(f"Num train_samples {len(train_dataset)}") logger.info("Training example:") logger.info(tokenizer.decode(train_dataset[0]['input_ids'])) if training_args.do_eval: eval_dataset = lm_datasets["test"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) logger.info(f"Num eval_samples {len(eval_dataset)}") logger.info("Evaluation example:") logger.info(tokenizer.decode(eval_dataset[0]['input_ids'])) compute_dtype = (torch.float16 if training_args.fp16 else (torch.bfloat16 if training_args.bf16 else torch.float32)) if training_args.load_in_kbits in [4, 8]: if training_args.modules_to_save is not None: load_in_8bit_skip_modules = training_args.modules_to_save.split(',') else: load_in_8bit_skip_modules = None quantization_config = BitsAndBytesConfig( load_in_4bit=training_args.load_in_kbits == 4, load_in_8bit=training_args.load_in_kbits == 8, llm_int8_threshold=6.0, load_in_8bit_skip_modules=load_in_8bit_skip_modules, bnb_4bit_compute_dtype=compute_dtype, bnb_4bit_use_double_quant=training_args.double_quant, bnb_4bit_quant_type=training_args.quant_type # {'fp4', 'nf4'} ) else: quantization_config = None if quantization_config is not None: logger.info(f"quantization_config:{quantization_config.to_dict()}") if model_args.model_name_or_path: torch_dtype = ( model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype) ) device_map = {"":int(os.environ.get("LOCAL_RANK") or 0)} model = AutoModelForCausalLM.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, torch_dtype=torch_dtype, low_cpu_mem_usage=model_args.low_cpu_mem_usage, device_map=device_map, quantization_config=quantization_config, attn_implementation="flash_attention_2" if training_args.use_flash_attention_2 else "sdpa" ) else: model = AutoModelForCausalLM.from_config(config) n_params = sum({p.data_ptr(): p.numel() for p in model.parameters()}.values()) logger.info(f"Training new model from scratch - Total size={n_params/2**20:.2f}M params") if training_args.load_in_kbits in [4, 8]: model = prepare_model_for_kbit_training(model, use_gradient_checkpointing=training_args.gradient_checkpointing) model.config.use_cache = False model_vocab_size = model.get_output_embeddings().weight.size(0) tokenizer_vocab_size = len(tokenizer) logger.info(f"Model vocab size: {model_vocab_size}") logger.info(f"Tokenizer vocab size: {tokenizer_vocab_size}") if model_vocab_size != tokenizer_vocab_size: logger.info(f"Resize model vocab size to {tokenizer_vocab_size}") model.resize_token_embeddings(len(tokenizer)) if not training_args.full_finetuning: if training_args.peft_path is not None: logger.info("Peft from pre-trained model") model = PeftModel.from_pretrained(model, training_args.peft_path, device_map=device_map, is_trainable=True) else: logger.info("Init new peft model") target_modules = training_args.trainable.split(',') modules_to_save = training_args.modules_to_save if modules_to_save is not None: modules_to_save = modules_to_save.split(',') lora_rank = training_args.lora_rank lora_dropout = training_args.lora_dropout lora_alpha = training_args.lora_alpha logger.info(f"target_modules: {target_modules}") logger.info(f"lora_rank: {lora_rank}") logger.info(f"modules_to_save: {modules_to_save}") peft_config = LoraConfig( task_type=TaskType.CAUSAL_LM, target_modules=target_modules, inference_mode=False, r=lora_rank, lora_alpha=lora_alpha, lora_dropout=lora_dropout, modules_to_save=modules_to_save) model = get_peft_model(model, peft_config) model.print_trainable_parameters() # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=tokenizer, data_collator=fault_tolerance_data_collator, compute_metrics=compute_metrics if training_args.do_eval and not is_torch_xla_available() else None, preprocess_logits_for_metrics=preprocess_logits_for_metrics if training_args.do_eval and not is_torch_xla_available() else None, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate() max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) try: perplexity = math.exp(metrics["eval_loss"]) except OverflowError: perplexity = float("inf") metrics["perplexity"] = perplexity trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) if __name__ == "__main__": main()
ymcui/Chinese-LLaMA-Alpaca-3
1,964
中文羊驼大模型三期项目 (Chinese Llama-3 LLMs) developed from Meta Llama 3
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/training/run_clm_sft_with_peft.py
Python
#!/usr/bin/env python # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset. Here is the full list of checkpoints on the hub that can be fine-tuned by this script: https://huggingface.co/models?filter=text-generation """ # You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments. import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from pathlib import Path import datasets import torch from build_dataset import build_instruction_dataset import transformers from transformers import ( CONFIG_MAPPING, AutoConfig, BitsAndBytesConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, set_seed, DataCollatorForSeq2Seq ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version from peft import LoraConfig, TaskType, get_peft_model, PeftModel, prepare_model_for_kbit_training # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.40.0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt") @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ model_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." ) }, ) tokenizer_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The tokenizer for weights initialization.Don't set if you want to train a model from scratch." ) }, ) config_overrides: Optional[str] = field( default=None, metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) }, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=False, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) torch_dtype: Optional[str] = field( default=None, metadata={ "help": ( "Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the " "dtype will be automatically derived from the model's weights." ), "choices": ["auto", "bfloat16", "float16", "float32"], }, ) low_cpu_mem_usage: bool = field( default=False, metadata={ "help": ( "It is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded. " "set True will benefit LLM loading time and RAM consumption." ) }, ) def __post_init__(self): if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( "--config_overrides can't be used in combination with --config_name or --model_name_or_path" ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_dir: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) validation_split_percentage: Optional[float] = field( default=0.05, metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" }, ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) keep_linebreaks: bool = field( default=True, metadata={"help": "Whether to keep line breaks when using TXT files or not."} ) data_cache_dir: Optional[str] = field(default=None, metadata={"help": "The datasets processed stored"}) max_seq_length: Optional[int] = field(default=1024) @dataclass class MyTrainingArguments(TrainingArguments): trainable : Optional[str] = field(default="q_proj,v_proj") lora_rank : Optional[int] = field(default=8) lora_dropout : Optional[float] = field(default=0.1) lora_alpha : Optional[float] = field(default=32.) modules_to_save : Optional[str] = field(default=None) peft_path : Optional[str] = field(default=None) use_flash_attention_2 : Optional[bool] = field(default=False) double_quant: Optional[bool] = field(default=True) quant_type: Optional[str] = field(default="nf4") load_in_kbits: Optional[int] = field(default=16) full_finetuning : Optional[bool] = field(default=False) logger = logging.getLogger(__name__) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, MyTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() send_example_telemetry("run_clm", model_args, data_args) # Setup logging logging.basicConfig(format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, # if training_args.local_rank in [-1, 0] else logging.WARN, handlers=[logging.StreamHandler(sys.stdout)],) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16 or training_args.bf16}" ) # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) config_kwargs = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name: config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.config_overrides is not None: logger.info(f"Overriding config: {model_args.config_overrides}") config.update_from_string(model_args.config_overrides) logger.info(f"New config: {config}") tokenizer_kwargs = { "cache_dir": model_args.cache_dir, "use_fast": model_args.use_fast_tokenizer, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs) elif model_args.tokenizer_name_or_path: tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name_or_path, **tokenizer_kwargs) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if tokenizer.pad_token_id is None: tokenizer.pad_token = tokenizer.eos_token data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer) eval_dataset=None train_dataset = None if training_args.do_train: with training_args.main_process_first(desc="loading and tokenization"): path = Path(data_args.dataset_dir) files = [os.path.join(path,file.name) for file in path.glob("*.json")] logger.info(f"Training files: {' '.join(files)}") train_dataset = build_instruction_dataset( data_path=files, tokenizer=tokenizer, max_seq_length=data_args.max_seq_length, data_cache_dir=None, preprocessing_num_workers = data_args.preprocessing_num_workers) logger.info(f"Num train_samples {len(train_dataset)}") logger.info("Training example:") logger.info(tokenizer.decode(train_dataset[0]['input_ids'])) if training_args.do_eval: with training_args.main_process_first(desc="loading and tokenization"): files = [data_args.validation_file] logger.info(f"Evaluation files: {' '.join(files)}") eval_dataset = build_instruction_dataset( data_path=files, tokenizer=tokenizer, max_seq_length=data_args.max_seq_length, data_cache_dir = None, preprocessing_num_workers = data_args.preprocessing_num_workers) logger.info(f"Num eval_samples {len(eval_dataset)}") logger.info("Evaluation example:") logger.info(tokenizer.decode(eval_dataset[0]['input_ids'])) torch_dtype = ( model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype) ) compute_dtype = (torch.float16 if training_args.fp16 else (torch.bfloat16 if training_args.bf16 else torch.float32)) if training_args.load_in_kbits in [4, 8]: if training_args.modules_to_save is not None: load_in_8bit_skip_modules = training_args.modules_to_save.split(',') else: load_in_8bit_skip_modules = None quantization_config = BitsAndBytesConfig( load_in_4bit=training_args.load_in_kbits == 4, load_in_8bit=training_args.load_in_kbits == 8, llm_int8_threshold=6.0, load_in_8bit_skip_modules=load_in_8bit_skip_modules, bnb_4bit_compute_dtype=compute_dtype, bnb_4bit_use_double_quant=training_args.double_quant, bnb_4bit_quant_type=training_args.quant_type # {'fp4', 'nf4'} ) else: quantization_config = None if quantization_config is not None: logger.info(f"quantization_config:{quantization_config.to_dict()}") device_map = {"":int(os.environ.get("LOCAL_RANK") or 0)} model = AutoModelForCausalLM.from_pretrained( model_args.model_name_or_path, config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, torch_dtype=torch_dtype, low_cpu_mem_usage=model_args.low_cpu_mem_usage, device_map=device_map, quantization_config=quantization_config, attn_implementation="flash_attention_2" if training_args.use_flash_attention_2 else "sdpa" ) if training_args.load_in_kbits in [4, 8]: model = prepare_model_for_kbit_training(model, use_gradient_checkpointing=training_args.gradient_checkpointing) model.config.use_cache = False model_vocab_size = model.get_input_embeddings().weight.shape[0] logger.info(f"Model vocab size: {model_vocab_size}") logger.info(f"len(tokenizer):{len(tokenizer)}") if model_vocab_size != len(tokenizer): logger.info(f"Resize model vocab size to {len(tokenizer)}") model.resize_token_embeddings(len(tokenizer)) if not training_args.full_finetuning: if training_args.peft_path is not None: logger.info("Peft from pre-trained model") model = PeftModel.from_pretrained(model, training_args.peft_path, device_map=device_map, is_trainable=True) else: logger.info("Init new peft model") target_modules = training_args.trainable.split(',') modules_to_save = training_args.modules_to_save if modules_to_save is not None: modules_to_save = modules_to_save.split(',') lora_rank = training_args.lora_rank lora_dropout = training_args.lora_dropout lora_alpha = training_args.lora_alpha logger.info(f"target_modules: {target_modules}") logger.info(f"lora_rank: {lora_rank}") peft_config = LoraConfig( task_type=TaskType.CAUSAL_LM, target_modules=target_modules, inference_mode=False, r=lora_rank, lora_alpha=lora_alpha, lora_dropout=lora_dropout, modules_to_save=modules_to_save) model = get_peft_model(model, peft_config) model.print_trainable_parameters() # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, tokenizer=tokenizer, data_collator=data_collator, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload metrics = train_result.metrics metrics["train_samples"] = len(train_dataset) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate() metrics["eval_samples"] =len(eval_dataset) try: perplexity = math.exp(metrics["eval_loss"]) except OverflowError: perplexity = float("inf") metrics["perplexity"] = perplexity trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) if __name__ == "__main__": main()
ymcui/Chinese-LLaMA-Alpaca-3
1,964
中文羊驼大模型三期项目 (Chinese Llama-3 LLMs) developed from Meta Llama 3
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/training/run_pt.sh
Shell
#!/bin/bash ## 运行脚本前请仔细阅读wiki(https://github.com/ymcui/Chinese-LLaMA-Alpaca-3/wiki/pt_scripts_zh) ## Read the wiki(https://github.com/ymcui/Chinese-LLaMA-Alpaca-3/wiki/pt_scripts_en) carefully before running the script lr=1e-4 lora_rank=64 lora_alpha=128 lora_trainable="q_proj,v_proj,k_proj,o_proj,gate_proj,down_proj,up_proj" modules_to_save="embed_tokens,lm_head" lora_dropout=0.05 pretrained_model=path/to/hf/meta-llama-3-8b/dir tokenizer_name_or_path=${pretrained_model} dataset_dir=path/to/pt/data/dir data_cache=temp_data_cache_dir per_device_train_batch_size=1 gradient_accumulation_steps=8 block_size=1024 output_dir=output_dir torchrun --nnodes 1 --nproc_per_node 1 run_clm_pt_with_peft.py \ --model_name_or_path ${pretrained_model} \ --tokenizer_name_or_path ${tokenizer_name_or_path} \ --dataset_dir ${dataset_dir} \ --data_cache_dir ${data_cache} \ --validation_split_percentage 0.001 \ --per_device_train_batch_size ${per_device_train_batch_size} \ --do_train \ --low_cpu_mem_usage \ --seed $RANDOM \ --bf16 \ --num_train_epochs 1 \ --lr_scheduler_type cosine \ --learning_rate ${lr} \ --warmup_ratio 0.05 \ --weight_decay 0.01 \ --logging_strategy steps \ --logging_steps 10 \ --save_strategy steps \ --save_total_limit 3 \ --save_steps 200 \ --gradient_accumulation_steps ${gradient_accumulation_steps} \ --preprocessing_num_workers 8 \ --block_size ${block_size} \ --output_dir ${output_dir} \ --overwrite_output_dir \ --ddp_timeout 30000 \ --logging_first_step True \ --lora_rank ${lora_rank} \ --lora_alpha ${lora_alpha} \ --trainable ${lora_trainable} \ --lora_dropout ${lora_dropout} \ --modules_to_save ${modules_to_save} \ --torch_dtype bfloat16 \ --load_in_kbits 16 \ --ddp_find_unused_parameters False
ymcui/Chinese-LLaMA-Alpaca-3
1,964
中文羊驼大模型三期项目 (Chinese Llama-3 LLMs) developed from Meta Llama 3
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/training/run_sft.sh
Shell
#!/bin/bash ## 运行脚本前请仔细阅读wiki(https://github.com/ymcui/Chinese-LLaMA-Alpaca-3/wiki/sft_scripts_zh) ## Read the wiki(https://github.com/ymcui/Chinese-LLaMA-Alpaca-3/wiki/sft_scripts_en) carefully before running the script lr=1e-4 lora_rank=64 lora_alpha=128 lora_trainable="q_proj,v_proj,k_proj,o_proj,gate_proj,down_proj,up_proj" modules_to_save="embed_tokens,lm_head" lora_dropout=0.05 pretrained_model=path/to/hf/meta-llama-3-8b/or/llama-3-chinese-8b/dir/or/model_id tokenizer_name_or_path=${pretrained_model} dataset_dir=path/to/sft/data/dir per_device_train_batch_size=1 per_device_eval_batch_size=1 gradient_accumulation_steps=8 max_seq_length=512 output_dir=output_dir validation_file=validation_file_name torchrun --nnodes 1 --nproc_per_node 1 run_clm_sft_with_peft.py \ --model_name_or_path ${pretrained_model} \ --tokenizer_name_or_path ${tokenizer_name_or_path} \ --dataset_dir ${dataset_dir} \ --per_device_train_batch_size ${per_device_train_batch_size} \ --per_device_eval_batch_size ${per_device_eval_batch_size} \ --do_train \ --low_cpu_mem_usage \ --do_eval \ --seed $RANDOM \ --bf16 \ --num_train_epochs 3 \ --lr_scheduler_type cosine \ --learning_rate ${lr} \ --warmup_ratio 0.03 \ --logging_strategy steps \ --logging_steps 10 \ --save_strategy steps \ --save_total_limit 3 \ --evaluation_strategy steps \ --eval_steps 100 \ --save_steps 200 \ --gradient_accumulation_steps ${gradient_accumulation_steps} \ --preprocessing_num_workers 8 \ --max_seq_length ${max_seq_length} \ --output_dir ${output_dir} \ --overwrite_output_dir \ --ddp_timeout 30000 \ --logging_first_step True \ --lora_rank ${lora_rank} \ --lora_alpha ${lora_alpha} \ --trainable ${lora_trainable} \ --lora_dropout ${lora_dropout} \ --modules_to_save ${modules_to_save} \ --torch_dtype bfloat16 \ --validation_file ${validation_file} \ --load_in_kbits 16 \ --ddp_find_unused_parameters False
ymcui/Chinese-LLaMA-Alpaca-3
1,964
中文羊驼大模型三期项目 (Chinese Llama-3 LLMs) developed from Meta Llama 3
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/ceval/eval.py
Python
# This code is modified from C-Eval Project: https://github.com/SJTU-LIT/ceval import os import argparse import pandas as pd import torch import json from mixtral_evaluator import Mixtral_Evaluator import time choices = ["A", "B", "C", "D"] def main(args, evaluator,take): assert os.path.exists("subject_mapping.json"), "subject_mapping.json not found!" with open("subject_mapping.json") as f: subject_mapping = json.load(f) filenames = os.listdir("data/val") subject_list = [val_file.replace("_val.csv","") for val_file in filenames] accuracy, summary = {}, {} run_date=time.strftime('%Y-%m-%d_%H-%M-%S',time.localtime(time.time())) output_dir = args.output_dir save_result_dir=os.path.join(output_dir,f"take{take}") if not os.path.exists(save_result_dir): os.makedirs(save_result_dir,exist_ok=True) all_answers = {} for index,subject_name in enumerate(subject_list): print(f"{index/len(subject_list)} Inference starts at {run_date} on {args.model_path} with subject of {subject_name}!") val_file_path=os.path.join('data/val',f'{subject_name}_val.csv') dev_file_path=os.path.join('data/dev',f'{subject_name}_dev.csv') test_file_path=os.path.join('data/test',f'{subject_name}_test.csv') val_df=pd.read_csv(val_file_path) if args.do_test is False else pd.read_csv(test_file_path) dev_df=pd.read_csv(dev_file_path) if args.few_shot else None correct_ratio, answers = evaluator.eval_subject(subject_name, val_df, dev_df, save_result_dir=save_result_dir if args.do_save_csv else None, few_shot=args.few_shot, cot=args.cot, with_prompt=args.with_prompt, constrained_decoding=args.constrained_decoding, do_test=args.do_test) print(f"Subject: {subject_name}") print(f"Acc: {correct_ratio}") accuracy[subject_name] = correct_ratio summary[subject_name] = {"score":correct_ratio, "num":len(val_df), "correct":correct_ratio*len(val_df)/100} all_answers[subject_name] = answers json.dump(all_answers,open(save_result_dir+'/submission.json','w'),ensure_ascii=False,indent=4) print("Accuracy:") for k, v in accuracy.items(): print(k, ": ", v) total_num = 0 total_correct = 0 summary['grouped'] = { "STEM": {"correct": 0.0, "num": 0}, "Social Science": {"correct": 0.0, "num": 0}, "Humanities": {"correct": 0.0, "num": 0}, "Other": {"correct": 0.0, "num": 0} } for subj, info in subject_mapping.items(): group = info[2] summary['grouped'][group]["num"] += summary[subj]['num'] summary['grouped'][group]["correct"] += summary[subj]['correct'] for group, info in summary['grouped'].items(): info['score'] = info["correct"] / info["num"] total_num += info["num"] total_correct += info["correct"] summary['All'] = {"score": total_correct / total_num, "num": total_num, "correct": total_correct} json.dump(summary,open(save_result_dir+'/summary.json','w'),ensure_ascii=False,indent=2) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--model_path", type=str) parser.add_argument("--cot",choices=["False","True"], default="False") parser.add_argument("--few_shot", choices=["False","True"], default="True") parser.add_argument("--ntrain", "-k", type=int, default=5) parser.add_argument("--with_prompt", choices=["False","True"], default="False") parser.add_argument("--constrained_decoding", choices=["False","True"], default="True") parser.add_argument("--temperature",type=float,default=0.2) parser.add_argument("--n_times", default=1,type=int) parser.add_argument("--do_save_csv", choices=["False","True"], default="False") parser.add_argument("--output_dir", type=str) parser.add_argument("--do_test", choices=["False","True"], default="False") parser.add_argument("--verbose", action="store_true", help="Print detailed information of each example.") parser.add_argument("--load_in_4bit", action="store_true", help="The model was loaded by 4-bit quantization") parser.add_argument("--use_flash_attention_2", action="store_true", help="Use flash_attention2 to replace the mixtral attention") args = parser.parse_args() args.cot = args.cot == "True" args.few_shot = args.few_shot == "True" args.with_prompt = args.with_prompt == "True" args.constrained_decoding = args.constrained_decoding == "True" args.do_test = args.do_test == "True" args.do_save_csv = args.do_save_csv == "True" if args.constrained_decoding is True: args.n_times=max(args.n_times,1) print(args) device = torch.device(0) print(device) evaluator=Mixtral_Evaluator( choices=choices, k=args.ntrain, model_path=args.model_path, device=device, temperature=args.temperature, load_in_4bit=args.load_in_4bit, use_flash_attention_2=args.use_flash_attention_2, verbose=args.verbose ) for i in range(args.n_times): main(args,evaluator=evaluator,take=i)
ymcui/Chinese-Mixtral
609
中文Mixtral混合专家大模型(Chinese Mixtral MoE LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/ceval/evaluator.py
Python
# This code is modified from C-Eval Project: https://github.com/SJTU-LIT/ceval import string class Evaluator: def __init__(self, choices, model_name, k=-1): self.choices = choices self.model_name = model_name self.k = k self.puncs = list(string.punctuation) def format_example(self, line, include_answer=True): example = line['question'] for choice in self.choices: example += f'\n{choice}. {line[f"{choice}"]}' example += '\n答案:' if include_answer: example += f'{line["answer"]}\n\n' return example def generate_few_shot_prompt(self, subject, dev_df): prompt = f"以下是中国关于{subject}考试的单项选择题,请选出其中的正确答案。\n\n" k = self.k if self.k == -1: k = dev_df.shape[0] for i in range(k): prompt += self.format_example(dev_df.iloc[i, :]) return prompt def eval_subject(self, subject_name, test_df, dev_df=None, few_shot=False, save_result_dir=None): pass def normalize_answer(self,s): def white_space_fix(text): return ' '.join(text.split()) def remove_punc(text): exclude=set(self.puncs) return ''.join(ch for ch in text if ch not in exclude) def lower(text): return text.lower() return white_space_fix(remove_punc(lower(s))) def exact_match(self,pred, target): return self.normalize_answer(pred)==self.normalize_answer(target)
ymcui/Chinese-Mixtral
609
中文Mixtral混合专家大模型(Chinese Mixtral MoE LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/ceval/mixtral_evaluator.py
Python
# This code is modified from C-Eval Project: https://github.com/SJTU-LIT/ceval import os import re from tqdm import tqdm import random import numpy as np import torch from transformers import AutoModelForCausalLM, LlamaTokenizer, BitsAndBytesConfig from transformers import GenerationConfig from evaluator import Evaluator class Mixtral_Evaluator(Evaluator): def __init__(self, choices, k, model_path, device, temperature=0.2, load_in_4bit=False, use_flash_attention_2=False, verbose=False): super(Mixtral_Evaluator, self).__init__(choices, model_path, k) load_type = torch.float16 self.model_path = model_path self.device = device self.verbose = verbose self.load_in_4bit = load_in_4bit self.use_flash_attention_2 = use_flash_attention_2 self.tokenizer = LlamaTokenizer.from_pretrained(model_path, legacy=True) quantization_config = BitsAndBytesConfig( load_in_4bit=True, load_in_8bit=False, bnb_4bit_compute_dtype=load_type, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4" ) self.model = AutoModelForCausalLM.from_pretrained( model_path, quantization_config=quantization_config if self.load_in_4bit else None, torch_dtype=load_type, low_cpu_mem_usage=True, device_map='auto', attn_implementation="flash_attention_2" if self.use_flash_attention_2 else "sdpa" ) self.generation_config = GenerationConfig( temperature=temperature, top_k=40, top_p=0.9, do_sample=True, num_beams=1, repetition_penalty=1.1, max_new_tokens=20 ) self.sA_id = self.tokenizer.encode("A", add_special_tokens=False)[0] self.sB_id = self.tokenizer.encode("B", add_special_tokens=False)[0] self.sC_id = self.tokenizer.encode("C", add_special_tokens=False)[0] self.sD_id = self.tokenizer.encode("D", add_special_tokens=False)[0] self.A_id = self.tokenizer.encode(":A")[-1] self.B_id = self.tokenizer.encode(":B")[-1] self.C_id = self.tokenizer.encode(":C")[-1] self.D_id = self.tokenizer.encode(":D")[-1] def eval_subject(self, subject_name, test_df, dev_df=None, few_shot=False, cot=False, save_result_dir=None, with_prompt=False, constrained_decoding=False, do_test=False): all_answers = {} if constrained_decoding is True: self.generation_config.output_scores = True self.generation_config.return_dict_in_generate = True self.generation_config.max_new_tokens = 1 self.generation_config.top_p = 1.0 self.generation_config.top_k = 0 correct_num = 0 if save_result_dir: result = [] score = [] if few_shot: if with_prompt: history = self.generate_mixtral_inst_few_shot_prompt(subject_name, dev_df, cot=cot) else: history = self.generate_mixtral_few_shot_prompt(subject_name, dev_df, cot=cot) else: history = '' answers = ['NA'] * len(test_df) if do_test is True else list(test_df['answer']) for row_index, row in tqdm(test_df.iterrows(), total=len(test_df)): question = self.format_example(row, include_answer=False, cot=cot,with_prompt=with_prompt) instruction = question if with_prompt: prompt_template = ( "[INST] {instruction} [/INST]" ) instruction = prompt_template.format_map({'instruction': instruction}) instruction = history + instruction inputs = self.tokenizer(instruction, return_tensors="pt") generation_output = self.model.generate( input_ids = inputs["input_ids"].to(self.device), attention_mask = inputs['attention_mask'].to(self.device), eos_token_id=self.tokenizer.eos_token_id, pad_token_id=self.tokenizer.eos_token_id, generation_config = self.generation_config ) _, length = inputs.input_ids.shape if constrained_decoding is True: logits = generation_output.scores[0][0] logits = logits.float().cpu().detach() choices1_logits = logits[[self.sA_id,self.sB_id,self.sC_id,self.sD_id]] choices2_logits = logits[[self.A_id,self.B_id,self.C_id,self.D_id]] choicesAll_logits = (choices1_logits + choices2_logits).numpy() assert not (np.any(np.isinf(choicesAll_logits)) or np.any(np.isnan(choicesAll_logits))) ans = {0: "A", 1: "B", 2: "C", 3: "D"}[np.argmax(choicesAll_logits)] response = self.tokenizer.decode([logits.argmax(-1).item()]) else: response = self.tokenizer.decode(generation_output[0, length:], skip_special_tokens=True) ans, _ = self.extract_answer(row, response) if ans == answers[row_index]: correct_num += 1 correct = 1 else: correct = 0 if self.verbose is True: print(f"\n======={str(row_index)}=======") print(f"question: {question}\n") print(f"response: {response}\n") print(f"extracted answer: {ans}") print(f"ground truth: {answers[row_index]} \n") if save_result_dir: result.append(response) score.append(correct) all_answers[str(row_index)] = ans correct_ratio = 100*correct_num/len(answers) if save_result_dir: test_df['model_output'] = result test_df['correctness'] = score test_df.to_csv(os.path.join(save_result_dir, f'{subject_name}_test.csv')) return correct_ratio, all_answers def format_example(self, line, include_answer=True, cot=False, with_prompt=False): example = line['question'] for choice in self.choices: example += f'\n{choice}. {line[f"{choice}"]}' if include_answer: if cot: example += "\n答案:让我们一步一步思考,\n" + \ line["explanation"] + f"\n所以答案是{line['answer']}。\n\n" else: example += '\n答案:' + line["answer"] + '\n\n' else: if with_prompt is False: if cot: example += "\n答案:让我们一步一步思考,\n1." else: example += '\n答案:' else: if cot: example += "\n答案是什么?让我们一步一步思考,\n1." else: example += '\n答案:' return example def generate_mixtral_few_shot_prompt(self, subject, dev_df, cot=False): prompt = f"以下是中国关于{subject}考试的单项选择题,请选出其中的正确答案。\n\n" k = self.k if self.k == -1: k = dev_df.shape[0] for i in range(k): prompt += self.format_example( dev_df.iloc[i, :], include_answer=True, cot=cot ) return prompt def generate_mixtral_inst_few_shot_prompt(self, subject, dev_df, cot=False): prompt = f"以下是中国关于{subject}考试的单项选择题,请选出其中的正确答案。\n\n" prompt_template = ( "[INST] {instruction} [/INST]好的,我会结合{subject}相关知识回答" ) prompt = prompt_template.format_map({'instruction':prompt, 'subject':subject}) k = self.k if self.k == -1: k = dev_df.shape[0] for i in range(k): line = dev_df.iloc[i, :] q=line['question'] for choice in self.choices: q += f'\n{choice}. {line[f"{choice}"]}' a = line['answer'] prompt += "[INST] "+q+"\n答案: [/INST]"+a+"\n" return prompt def extract_answer(self, line, gen_ans): m = re.findall(r'所以答案是(.+?)。', gen_ans, re.M) if len(m) > 0 and m[-1] in self.choices: return m[-1], True answer_patterns = [ r'([ABCD])是正确的', r'选项([ABCD])正确', r'答案为([ABCD])', r'答案是([ABCD])', r'答案([ABCD])', r'选择([ABCD])', r'答案:([ABCD])', r'选择答案([ABCD])' ] # RE extraction for answer_pattern in answer_patterns: m = re.search(answer_pattern, gen_ans, re.M) if m: answer = m.group(1) return answer, False # only containing one choice-character m = re.findall(r'[ABCD]', gen_ans, re.M) if len(m) >= 1: answer = m[0] return answer, False # only containing one choice-context choices_dict = {} pattern = "" for c in self.choices: choices_dict[str(line[f'{c}'])] = c pattern += re.escape(str(line[f'{c}']))+"|" pattern = pattern[:-1] m = re.findall(pattern, gen_ans, re.M) print("w/ escape:",repr(pattern),gen_ans,(len(m)>=1)) if len(m) >= 1: answer = choices_dict[m[0]] return answer, False return random.sample('ABCD', 1)[0], False
ymcui/Chinese-Mixtral
609
中文Mixtral混合专家大模型(Chinese Mixtral MoE LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/cmmlu/categories.py
Python
# This code is modified from CMMLU Project: https://github.com/haonan-li/CMMLU name_en2zh = { "agronomy": "农学", "anatomy": "解剖学", "ancient_chinese": "古汉语", "arts": "艺术学", "astronomy": "天文学", "business_ethics": "商业伦理", "chinese_civil_service_exam": "中国公务员考试", "chinese_driving_rule": "中国驾驶规则", "chinese_food_culture": "中国饮食文化", "chinese_foreign_policy": "中国外交政策", "chinese_history":"中国历史", "chinese_literature": "中国文学", "chinese_teacher_qualification": "中国教师资格", "clinical_knowledge": "临床知识", "college_actuarial_science":"大学精算学", "college_education":"大学教育学", "college_engineering_hydrology": "大学工程水文学", "college_law": "大学法律", "college_mathematics": "大学数学", "college_medical_statistics":"大学医学统计", "college_medicine": "大学医学", "computer_science": "计算机科学", "computer_security": "计算机安全", "conceptual_physics": "概念物理学", "construction_project_management": "建设工程管理", "economics": "经济学", "education": "教育学", "electrical_engineering": "电气工程", "elementary_chinese":"小学语文", "elementary_commonsense":"小学常识", "elementary_information_and_technology": "小学信息技术", "elementary_mathematics": "初等数学", "ethnology": "民族学", "food_science": "食品科学", "genetics": "遗传学", "global_facts": "全球事实", "high_school_biology": "高中生物", "high_school_chemistry": "高中化学", "high_school_geography": "高中地理", "high_school_mathematics": "高中数学", "high_school_physics": "高中物理学", "high_school_politics": "高中政治", "human_sexuality": "人类性行为", "international_law": "国际法学", "journalism": "新闻学", "jurisprudence": "法理学", "legal_and_moral_basis": "法律与道德基础", "logical": "逻辑学", "machine_learning": "机器学习", "management": "管理学", "marketing": "市场营销", "marxist_theory": "马克思主义理论", "modern_chinese": "现代汉语", "nutrition": "营养学", "philosophy": "哲学", "professional_accounting": "专业会计", "professional_law": "专业法学", "professional_medicine": "专业医学", "professional_psychology": "专业心理学", "public_relations": "公共关系", "security_study":"安全研究", "sociology": "社会学", "sports_science": "体育学", "traditional_chinese_medicine": "中医中药", "virology": "病毒学", "world_history":"世界历史", "world_religions": "世界宗教", } subcategories = { "agronomy": ['other'], "anatomy": ['biology'], "ancient_chinese": ['linguistics','china specific'], "arts": ['arts'], "astronomy": ['physics'], "business_ethics": ['business'], "chinese_civil_service_exam": ['politics','china specific'], "chinese_driving_rule": ['other','china specific'], "chinese_food_culture": ['culture','china specific'], "chinese_foreign_policy": ['politics','china specific'], "chinese_history":['history','china specific'], "chinese_literature": ['literature','china specific'], "chinese_teacher_qualification": ['education','china specific'], "college_actuarial_science":['math'], "college_education":['education'], "college_engineering_hydrology": ['engineering'], "college_law": ['law'], "college_mathematics": ['math'], "college_medical_statistics":['statistics'], "clinical_knowledge": ['other'], "college_medicine": ['other'], "computer_science": ['computer science'], "computer_security": ['other'], "conceptual_physics": ['physics'], "construction_project_management": ['other','china specific'], "economics": ['economics'], "education": ['education'], "elementary_chinese":['linguistics','china specific'], "elementary_commonsense":['other','china specific'], "elementary_information_and_technology": ['other'], "electrical_engineering": ['engineering'], "elementary_mathematics": ['math'], "ethnology": ['culture','china specific'], "food_science": ['other'], "genetics": ['biology'], "global_facts": ['global'], "high_school_biology": ['biology'], "high_school_chemistry": ['chemistry'], "high_school_geography": ['geography'], "high_school_mathematics": ['math'], "high_school_physics": ['physics'], "high_school_politics": ['politics','china specific'], "human_sexuality": ['other'], "international_law": ['law'], "journalism": ['sociology'], "jurisprudence": ['law'], "legal_and_moral_basis": ['other'], "logical": ['philosophy'], "machine_learning": ['computer science'], "management": ['business'], "marketing": ['business'], "marxist_theory": ['philosophy'], "modern_chinese": ['linguistics','china specific'], "nutrition": ['other'], "philosophy": ['philosophy'], "professional_accounting": ['business'], "professional_law": ['law'], "professional_medicine": ['other'], "professional_psychology": ['psychology'], "public_relations": ['politics'], "security_study": ['politics'], "sociology": ['culture'], "sports_science": ['other'], "traditional_chinese_medicine": ['other','china specific'], "virology": ['biology'], "world_history":['history'], "world_religions": ['global'], } categories = { "STEM": ["physics", "chemistry", "biology", "computer science", "math", "engineering", "statistics"], "Humanities": ["history", "philosophy", "law", "arts", "literature", "global"], "Social Science": ['linguistics',"business", "politics", "culture", "economics", "geography", "psychology", "education", "sociology"], "Other":["other"], "China specific": ["china specific"], }
ymcui/Chinese-Mixtral
609
中文Mixtral混合专家大模型(Chinese Mixtral MoE LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/cmmlu/eval.py
Python
# This code is modified from C-Eval Project: https://github.com/SJTU-LIT/ceval import os import argparse import pandas as pd import torch import json from mxitral_evaluator import Mixtral_Evaluator from glob import glob import time from collections import defaultdict from categories import name_en2zh, subcategories, categories choices = ["A", "B", "C", "D"] category2subject = defaultdict(list) for k,v in categories.items(): for subject, subcat in subcategories.items(): for c in subcat: if c in v: category2subject[k].append(subject) category2subject_list = defaultdict(list) for key,value in category2subject.items(): for val in value: category2subject_list[val]=[val,name_en2zh[val],key] category2subject=category2subject_list choices = ["A", "B", "C", "D"] def main(args, evaluator,take): subject_mapping = category2subject #json.load(f) filenames = [s.split('/')[-1] for s in glob(args.input_dir+"/test/*csv")] subject_list = [val_file.replace(".csv","") for val_file in filenames] accuracy, summary = {}, {} run_date=time.strftime('%Y-%m-%d_%H-%M-%S',time.localtime(time.time())) output_dir = args.output_dir save_result_dir=os.path.join(output_dir,f"take{take}") if not os.path.exists(save_result_dir): os.makedirs(save_result_dir,exist_ok=True) all_answers = {} for index,subject_name in enumerate(subject_list): print(f"{index/len(subject_list)} Inference starts at {run_date} on {args.model_path} with subject of {subject_name}!") val_file_path=os.path.join(args.input_dir+'/test',f'{subject_name}.csv') dev_file_path=os.path.join(args.input_dir+'/dev',f'{subject_name}.csv') val_df=pd.read_csv(val_file_path) dev_df=pd.read_csv(dev_file_path) if args.few_shot else None correct_ratio, answers = evaluator.eval_subject(subject_name, val_df, dev_df, save_result_dir=save_result_dir if args.do_save_csv else None, few_shot=args.few_shot, cot=args.cot, with_prompt=args.with_prompt, constrained_decoding=args.constrained_decoding, do_test=False) print(f"Subject: {subject_name}") print(f"Acc: {correct_ratio}") accuracy[subject_name] = correct_ratio summary[subject_name] = {"score":correct_ratio, "num":len(val_df), "correct":correct_ratio*len(val_df)/100} all_answers[subject_name] = answers json.dump(all_answers,open(save_result_dir+'/submission.json','w'),ensure_ascii=False,indent=4) print("\n\nModel:",args.model_path) print("Accuracy:") for k, v in accuracy.items(): print(k, ": ", v) total_num = 0 total_correct = 0 summary['grouped'] = { "China specific": {"correct": 0.0, "num": 0}, "STEM": {"correct": 0.0, "num": 0}, "Social Science": {"correct": 0.0, "num": 0}, "Humanities": {"correct": 0.0, "num": 0}, "Other": {"correct": 0.0, "num": 0} } for subj, info in subject_mapping.items(): group = info[2] summary['grouped'][group]["num"] += summary[subj]['num'] summary['grouped'][group]["correct"] += summary[subj]['correct'] for group, info in summary['grouped'].items(): info['score'] = info["correct"] / info["num"] total_num += info["num"] total_correct += info["correct"] summary['All'] = {"score": total_correct / total_num, "num": total_num, "correct": total_correct} json.dump(summary,open(save_result_dir+'/summary.json','w'),ensure_ascii=False,indent=2) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--ntrain", "-k", type=int, default=5) parser.add_argument("--model_path", type=str) parser.add_argument("--cot",choices=["False","True"], default="False") parser.add_argument("--few_shot", choices=["False","True"], default="True") parser.add_argument("--with_prompt", choices=["False","True"], default="False") parser.add_argument("--constrained_decoding", choices=["False","True"], default="False") parser.add_argument("--temperature",type=float,default=0.2) parser.add_argument("--n_times", default=1,type=int) parser.add_argument("--do_save_csv", choices=["False","True"], default="False") parser.add_argument("--output_dir", type=str) parser.add_argument("--input_dir", type=str) parser.add_argument("--verbose", action="store_true", help="Print detailed information of each example.") parser.add_argument("--load_in_4bit", action="store_true", help="The model was loaded by 4-bit quantization") parser.add_argument("--use_flash_attention_2", action="store_true", help="Use flash_attention2 to replace the mixtral attention") args = parser.parse_args() args.cot = args.cot == "True" args.few_shot = args.few_shot == "True" args.with_prompt = args.with_prompt == "True" args.do_save_csv = args.do_save_csv == "True" args.constrained_decoding = args.constrained_decoding == "True" if args.constrained_decoding is True: args.n_times=max(args.n_times,1) print(args) device = torch.device(0) print(device) evaluator=Mixtral_Evaluator( choices=choices, k=args.ntrain, model_path=args.model_path, device=device, temperature=args.temperature, load_in_4bit=args.load_in_4bit, use_flash_attention_2=args.use_flash_attention_2, verbose=args.verbose ) for i in range(args.n_times): main(args,evaluator=evaluator,take=i)
ymcui/Chinese-Mixtral
609
中文Mixtral混合专家大模型(Chinese Mixtral MoE LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/cmmlu/evaluator.py
Python
# This code is modified from C-Eval Project: https://github.com/SJTU-LIT/ceval import string class Evaluator: def __init__(self, choices, model_path, k=-1): self.choices = choices self.model_path = model_path self.k = k self.puncs = list(string.punctuation) def format_example(self, line, include_answer=True): example = line['question'] # print(example) for choice in self.choices: example += f'\n{choice}. {line[f"{choice}"]}' example += '\n答案:' if include_answer: example += f'{line["answer"]}\n\n' return example def generate_few_shot_prompt(self, subject, dev_df): prompt = f"以下是中国关于{subject}考试的单项选择题,请选出其中的正确答案。\n\n" k = self.k if self.k == -1: k = dev_df.shape[0] for i in range(k): prompt += self.format_example(dev_df.iloc[i, :]) return prompt def eval_subject(self, subject_name, test_df, dev_df=None, few_shot=False, save_result_dir=None): pass def normalize_answer(self,s): def white_space_fix(text): return ' '.join(text.split()) def remove_punc(text): exclude=set(self.puncs) return ''.join(ch for ch in text if ch not in exclude) def lower(text): return text.lower() return white_space_fix(remove_punc(lower(s))) def exact_match(self,pred, target): return self.normalize_answer(pred)==self.normalize_answer(target)
ymcui/Chinese-Mixtral
609
中文Mixtral混合专家大模型(Chinese Mixtral MoE LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/cmmlu/mixtral_evaluator.py
Python
# This code is modified from C-Eval Project: https://github.com/SJTU-LIT/ceval import os import re from tqdm import tqdm import random import numpy as np import torch from transformers import AutoModelForCausalLM, LlamaTokenizer, BitsAndBytesConfig from transformers import GenerationConfig from evaluator import Evaluator class Mixtral_Evaluator(Evaluator): def __init__(self, choices, k, model_path, device, temperature=0.2, load_in_4bit=False, use_flash_attention_2=False, verbose=False): super(Mixtral_Evaluator, self).__init__(choices, model_path, k) load_type = torch.float16 self.model_path = model_path self.device = device self.verbose = verbose self.load_in_4bit = load_in_4bit self.use_flash_attention_2 = use_flash_attention_2 self.tokenizer = LlamaTokenizer.from_pretrained(model_path, legacy=True) quantization_config = BitsAndBytesConfig( load_in_4bit=True, load_in_8bit=False, bnb_4bit_compute_dtype=load_type, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4" ) self.model = AutoModelForCausalLM.from_pretrained( model_path, quantization_config=quantization_config if self.load_in_4bit else None, torch_dtype=load_type, low_cpu_mem_usage=True, device_map='auto', attn_implementation="flash_attention_2" if self.use_flash_attention_2 else "sdpa" ) self.generation_config = GenerationConfig( temperature=temperature, top_k=40, top_p=0.9, do_sample=True, num_beams=1, repetition_penalty=1.1, max_new_tokens=20 ) self.sA_id = self.tokenizer.encode("A", add_special_tokens=False)[0] self.sB_id = self.tokenizer.encode("B", add_special_tokens=False)[0] self.sC_id = self.tokenizer.encode("C", add_special_tokens=False)[0] self.sD_id = self.tokenizer.encode("D", add_special_tokens=False)[0] self.A_id = self.tokenizer.encode(":A")[-1] self.B_id = self.tokenizer.encode(":B")[-1] self.C_id = self.tokenizer.encode(":C")[-1] self.D_id = self.tokenizer.encode(":D")[-1] def eval_subject(self, subject_name, test_df, dev_df=None, few_shot=False, cot=False, save_result_dir=None, with_prompt=False, constrained_decoding=False, do_test=False): all_answers = {} if constrained_decoding is True: self.generation_config.output_scores = True self.generation_config.return_dict_in_generate = True self.generation_config.max_new_tokens = 1 self.generation_config.top_p = 1.0 self.generation_config.top_k = 0 correct_num = 0 if save_result_dir: result = [] score = [] if few_shot: if with_prompt: history = self.generate_few_shot_prompt(subject_name, dev_df, cot=cot) else: history = self.generate_few_shot_noprompt(subject_name, dev_df, cot=cot) else: history = '' answers = ['NA'] * len(test_df) if do_test is True else list(test_df['Answer']) for row_index, row in tqdm(test_df.iterrows(), total=len(test_df)): question = self.format_example(row, include_answer=False, cot=cot,with_prompt=with_prompt) instruction = question if with_prompt: prompt_template = ( "[INST] {instruction} [/INST]" ) instruction = prompt_template.format_map({'instruction': instruction}) instruction=history+instruction inputs = self.tokenizer(instruction, return_tensors="pt") generation_output = self.model.generate( input_ids = inputs["input_ids"].to(self.device), attention_mask = inputs['attention_mask'].to(self.device), eos_token_id=self.tokenizer.eos_token_id, pad_token_id=self.tokenizer.eos_token_id, generation_config = self.generation_config ) _, length = inputs.input_ids.shape if constrained_decoding is True: logits = generation_output.scores[0][0] logits = logits.float().cpu().detach() choices1_logits = logits[[self.sA_id,self.sB_id,self.sC_id,self.sD_id]] choices2_logits = logits[[self.A_id,self.B_id,self.C_id,self.D_id]] choicesAll_logits = (choices1_logits + choices2_logits).numpy() assert not (np.any(np.isinf(choicesAll_logits)) or np.any(np.isnan(choicesAll_logits))) ans = {0: "A", 1: "B", 2: "C", 3: "D"}[np.argmax(choicesAll_logits)] response = self.tokenizer.decode([logits.argmax(-1).item()]) else: response = self.tokenizer.decode(generation_output[0, length:], skip_special_tokens=True) ans, _ = self.extract_answer(row, response) if ans == answers[row_index]: correct_num += 1 correct = 1 else: correct = 0 if self.verbose is True: print(f"\n======={str(row_index)}=======") print(f"question: {question}\n") print(f"response: {response}\n") print(f"extracted answer: {ans}") print(f"ground truth: {answers[row_index]} \n") if save_result_dir: result.append(response) score.append(correct) all_answers[str(row_index)] = ans correct_ratio = 100*correct_num/len(answers) if save_result_dir: test_df['model_output'] = result test_df['correctness'] = score test_df.to_csv(os.path.join(save_result_dir, f'{subject_name}_test.csv')) return correct_ratio, all_answers def format_example(self, line, include_answer=True, cot=False, with_prompt=False): example = line['Question'] suffix = "" for choice in self.choices: example += f'\n{choice}. {line[f"{choice}"]}' if include_answer: if cot: example += "\n答案:让我们一步一步思考,\n" + \ line["explanation"] + f"\n所以答案是{line['Answer']}。\n\n" else: example += '\n答案:' + suffix + line["Answer"] + '\n\n' else: if with_prompt is False: if cot: example += "\n答案:让我们一步一步思考,\n1." else: example += '\n答案:' + suffix else: if cot: example += "\n答案是什么?让我们一步一步思考,\n1." else: example += '\n答案:' return example def generate_few_shot_noprompt(self, subject, dev_df, cot=False): prompt = f"以下是中国关于{subject}考试的单项选择题,请选出其中的正确答案。\n\n" k = self.k if self.k == -1: k = dev_df.shape[0] for i in range(k): prompt += self.format_example( dev_df.iloc[i, :], include_answer=True, cot=cot ) return prompt def generate_few_shot_prompt(self, subject, dev_df, cot=False): prompt = f"以下是中国关于{subject}考试的单项选择题,请选出其中的正确答案。\n\n" prompt_template = ( "[INST] {instruction} [/INST]好的,我会结合{subject}相关知识回答" ) prompt = prompt_template.format_map({'instruction':prompt, "subject":subject}) k = self.k if self.k == -1: k = dev_df.shape[0] for i in range(k): line=dev_df.iloc[i, :] q=line['Question'] for choice in self.choices: q += f'\n{choice}. {line[f"{choice}"]}' a=line['Answer'] prompt+="[INST] "+q+"\n答案: [/INST]"+a+"\n" return prompt def extract_answer(self, line, gen_ans): m = re.findall(r'所以答案是(.+?)。', gen_ans, re.M) if len(m) > 0 and m[-1] in self.choices: return m[-1], True answer_patterns = [ r'([ABCD])是正确的', r'选项([ABCD])正确', r'答案为([ABCD])', r'答案是([ABCD])', r'答案([ABCD])', r'选择([ABCD])', r'答案:([ABCD])', r'选择答案([ABCD])' ] # RE extraction for answer_pattern in answer_patterns: m = re.search(answer_pattern, gen_ans, re.M) if m: answer = m.group(1) return answer, False # only containing one choice-character m = re.findall(r'[ABCD]', gen_ans, re.M) if len(m) >= 1: answer = m[0] return answer, False choices_dict = {} pattern = "" for c in self.choices: choices_dict[str(line[f'{c}'])] = c pattern += re.escape(str(line[f'{c}']))+"|" pattern = pattern[:-1] m = re.findall(pattern, gen_ans, re.M) print("w/ escape:",repr(pattern),gen_ans,(len(m)>=1)) if len(m) >= 1: answer = choices_dict[m[0]] return answer, False return random.sample('ABCD', 1)[0], False
ymcui/Chinese-Mixtral
609
中文Mixtral混合专家大模型(Chinese Mixtral MoE LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/inference/inference_hf.py
Python
import argparse import json, os TEMPLATE = ( "[INST] {instruction} [/INST]" ) parser = argparse.ArgumentParser() parser.add_argument('--base_model', default=None, type=str, required=True) parser.add_argument('--tokenizer_path', default=None, type=str) parser.add_argument('--data_file', default=None, type=str, help="A file that contains instructions (one instruction per line)") parser.add_argument('--with_prompt', action='store_true', help="wrap the input with the prompt automatically") parser.add_argument('--interactive', action='store_true', help="run in the instruction mode (single-turn)") parser.add_argument('--predictions_file', default='./predictions.json', type=str) parser.add_argument('--gpus', default="0", type=str) parser.add_argument('--only_cpu', action='store_true', help='only use CPU for inference') parser.add_argument('--load_in_8bit', action='store_true', help="Load the LLM in the 8bit mode") parser.add_argument('--load_in_4bit', action='store_true', help="Load the LLM in the 4bit mode") parser.add_argument("--use_vllm", action='store_true', help="Use vLLM as back-end LLM service.") parser.add_argument('--use_flash_attention_2', action='store_true', help="Use flash attention to replace the Mixtral attention") args = parser.parse_args() if args.use_vllm: if args.load_in_8bit or args.load_in_4bit: raise ValueError("vLLM currently does not support quantization, please use fp16 (default) or unuse --use_vllm.") if args.only_cpu: raise ValueError("vLLM requires GPUs with compute capability not less than 7.0. If you want to run only on CPU, please unuse --use_vllm.") if args.load_in_8bit and args.load_in_4bit: raise ValueError("Only one quantization method can be chosen for inference. Please check your arguments") if args.only_cpu is True: args.gpus = "" if args.load_in_8bit or args.load_in_4bit: raise ValueError("Quantization is unavailable on CPU.") os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus import torch from transformers import AutoModelForCausalLM, LlamaTokenizer from transformers import GenerationConfig from transformers import BitsAndBytesConfig if args.use_vllm: from vllm import LLM, SamplingParams import sys parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(parent_dir) if args.use_vllm: generation_config = dict( temperature=0.2, top_k=40, top_p=0.9, max_tokens=400, presence_penalty=1.0, ) else: generation_config = GenerationConfig( temperature=0.2, top_k=40, top_p=0.9, do_sample=True, num_beams=1, repetition_penalty=1.1, max_new_tokens=400 ) sample_data = ["为什么要减少污染,保护环境?"] def generate_prompt(instruction): return TEMPLATE.format_map({'instruction': instruction}) if __name__ == '__main__': load_type = torch.float16 if torch.cuda.is_available(): device = torch.device(0) else: device = torch.device('cpu') if args.tokenizer_path is None: args.tokenizer_path = args.base_model if args.use_vllm: model = LLM(model=args.base_model, tokenizer=args.tokenizer_path, tokenizer_mode='slow', tensor_parallel_size=len(args.gpus.split(',')) ) tokenizer = LlamaTokenizer.from_pretrained(args.tokenizer_path, legacy=True) else: tokenizer = LlamaTokenizer.from_pretrained(args.tokenizer_path, legacy=True) if args.load_in_4bit or args.load_in_8bit: quantization_config = BitsAndBytesConfig( load_in_4bit=args.load_in_4bit, load_in_8bit=args.load_in_8bit, bnb_4bit_compute_dtype=load_type, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4" ) model = AutoModelForCausalLM.from_pretrained( args.base_model, torch_dtype=load_type, low_cpu_mem_usage=True, device_map='auto', load_in_4bit=args.load_in_4bit, load_in_8bit=args.load_in_8bit, quantization_config=quantization_config if (args.load_in_4bit or args.load_in_8bit) else None, attn_implementation="flash_attention_2" if args.use_flash_attention_2 else "sdpa" ) if device==torch.device('cpu'): model.float() model.eval() # test data if args.data_file is None: examples = sample_data else: with open(args.data_file,'r') as f: examples = [line.strip() for line in f.readlines()] print("first 10 examples:") for example in examples[:10]: print(example) with torch.no_grad(): if args.interactive: print("Start inference with instruction mode.") print('='*85) print("+ 该模式下仅支持单轮问答,无多轮对话能力。\n" "+ 如要进行多轮对话,请使用llama.cpp") print('-'*85) print("+ This mode only supports single-turn QA.\n" "+ If you want to experience multi-turn dialogue, please use llama.cpp") print('='*85) while True: raw_input_text = input("Input:") if len(raw_input_text.strip())==0: break if args.with_prompt: input_text = generate_prompt(instruction=raw_input_text) else: input_text = raw_input_text if args.use_vllm: output = model.generate([input_text], SamplingParams(**generation_config), use_tqdm=False) response = output[0].outputs[0].text else: inputs = tokenizer(input_text,return_tensors="pt") #add_special_tokens=False ? generation_output = model.generate( input_ids = inputs["input_ids"].to(device), attention_mask = inputs['attention_mask'].to(device), eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.eos_token_id, generation_config = generation_config ) s = generation_output[0] output = tokenizer.decode(s,skip_special_tokens=True) if args.with_prompt: response = output.split("[/INST]")[-1].strip() else: response = output print("Response: ",response) print("\n") else: print("Start inference.") results = [] if args.use_vllm: if args.with_prompt is True: inputs = [generate_prompt(example) for example in examples] else: inputs = examples outputs = model.generate(inputs, SamplingParams(**generation_config)) for index, (example, output) in enumerate(zip(examples, outputs)): response = output.outputs[0].text print(f"======={index}=======") print(f"Input: {example}\n") print(f"Output: {response}\n") results.append({"Input":example,"Output":response}) else: for index, example in enumerate(examples): if args.with_prompt: input_text = generate_prompt(instruction=example) else: input_text = example inputs = tokenizer(input_text,return_tensors="pt") #add_special_tokens=False ? generation_output = model.generate( input_ids = inputs["input_ids"].to(device), attention_mask = inputs['attention_mask'].to(device), eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.eos_token_id, generation_config = generation_config ) s = generation_output[0] output = tokenizer.decode(s,skip_special_tokens=True) if args.with_prompt: response = output.split("[/INST]")[1].strip() else: response = output print(f"======={index}=======") print(f"Input: {example}\n") print(f"Output: {response}\n") results.append({"Input":input_text,"Output":response}) dirname = os.path.dirname(args.predictions_file) os.makedirs(dirname,exist_ok=True) with open(args.predictions_file,'w') as f: json.dump(results,f,ensure_ascii=False,indent=2) if args.use_vllm: with open(dirname+'/generation_config.json','w') as f: json.dump(generation_config,f,ensure_ascii=False,indent=2) else: generation_config.save_pretrained('./')
ymcui/Chinese-Mixtral
609
中文Mixtral混合专家大模型(Chinese Mixtral MoE LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/llamacpp/chat.sh
Shell
#!/bin/bash # script to chat with Chinese-Mixtral-Instruct model # usage: ./chat.sh chinese-mixtral-instruct-gguf-model-path # WARNING: the hyperparameters are not optimal, please tune them yourself ./main -m $1 --color --interactive-first \ -c 4096 -t 6 --temp 0.2 --repeat_penalty 1.1 -ngl 999 \ --in-prefix ' [INST] ' --in-suffix ' [/INST]'
ymcui/Chinese-Mixtral
609
中文Mixtral混合专家大模型(Chinese Mixtral MoE LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/longbench/eval.py
Python
# The script is from https://github.com/THUDM/LongBench import os import json import argparse import numpy as np from metrics import ( qa_f1_score, rouge_zh_score, qa_f1_zh_score, rouge_score, classification_score, retrieval_score, retrieval_zh_score, count_score, code_sim_score, ) dataset2metric = { "narrativeqa": qa_f1_score, "qasper": qa_f1_score, "multifieldqa_en": qa_f1_score, "multifieldqa_zh": qa_f1_zh_score, "hotpotqa": qa_f1_score, "2wikimqa": qa_f1_score, "musique": qa_f1_score, "dureader": rouge_zh_score, "gov_report": rouge_score, "qmsum": rouge_score, "multi_news": rouge_score, "vcsum": rouge_zh_score, "trec": classification_score, "triviaqa": qa_f1_score, "samsum": rouge_score, "lsht": classification_score, "passage_retrieval_en": retrieval_score, "passage_count": count_score, "passage_retrieval_zh": retrieval_zh_score, "lcc": code_sim_score, "repobench-p": code_sim_score, } def parse_args(args=None): parser = argparse.ArgumentParser() parser.add_argument('--output_dir') parser.add_argument('--e', action='store_true', help="Evaluate on LongBench-E") return parser.parse_args(args) def scorer_e(dataset, predictions, answers, lengths, all_classes): scores = {"0-4k": [], "4-8k": [], "8k+": []} for (prediction, ground_truths, length) in zip(predictions, answers, lengths): score = 0. if dataset in ["trec", "triviaqa", "samsum", "lsht"]: prediction = prediction.lstrip('\n').split('\n')[0] for ground_truth in ground_truths: score = max(score, dataset2metric[dataset](prediction, ground_truth, all_classes=all_classes)) if length < 4000: scores["0-4k"].append(score) elif length < 8000: scores["4-8k"].append(score) else: scores["8k+"].append(score) for key in scores.keys(): scores[key] = round(100 * np.mean(scores[key]), 2) return scores def scorer(dataset, predictions, answers, all_classes): total_score = 0. for (prediction, ground_truths) in zip(predictions, answers): score = 0. if dataset in ["trec", "triviaqa", "samsum", "lsht"]: prediction = prediction.lstrip('\n').split('\n')[0] for ground_truth in ground_truths: score = max(score, dataset2metric[dataset](prediction, ground_truth, all_classes=all_classes)) total_score += score return round(100 * total_score / len(predictions), 2) if __name__ == '__main__': args = parse_args() scores = dict() if args.e: path = f"{args.output_dir}/pred_e/" else: path = f"{args.output_dir}/pred/" all_files = os.listdir(path) print("Evaluating on:", all_files) for filename in all_files: if not filename.endswith("jsonl"): continue predictions, answers, lengths = [], [], [] dataset = filename.split('.')[0] with open(f"{path}{filename}", "r", encoding="utf-8") as f: print(filename) for line in f: data = json.loads(line) predictions.append(data["pred"]) answers.append(data["answers"]) all_classes = data["all_classes"] if "length" in data: lengths.append(data["length"]) if args.e: score = scorer_e(dataset, predictions, answers, lengths, all_classes) else: score = scorer(dataset, predictions, answers, all_classes) scores[dataset] = score if args.e: out_path = f"{args.output_dir}/pred_e/result.json" else: out_path = f"{args.output_dir}/pred/result.json" with open(out_path, "w") as f: json.dump(scores, f, ensure_ascii=False, indent=4)
ymcui/Chinese-Mixtral
609
中文Mixtral混合专家大模型(Chinese Mixtral MoE LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/longbench/metrics.py
Python
# The script is from https://github.com/THUDM/LongBench import re import string import jieba from fuzzywuzzy import fuzz import difflib from collections import Counter from rouge import Rouge def normalize_answer(s): """Lower text and remove punctuation, articles and extra whitespace.""" def remove_articles(text): return re.sub(r"\b(a|an|the)\b", " ", text) def white_space_fix(text): return " ".join(text.split()) def remove_punc(text): exclude = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(text): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(s)))) def normalize_zh_answer(s): """Lower text and remove punctuation, extra whitespace.""" def white_space_fix(text): return "".join(text.split()) def remove_punc(text): cn_punctuation = "!?。。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏." all_punctuation = set(string.punctuation + cn_punctuation) return "".join(ch for ch in text if ch not in all_punctuation) def lower(text): return text.lower() return white_space_fix(remove_punc(lower(s))) def count_score(prediction, ground_truth, **kwargs): numbers = re.findall(r"\d+", prediction) right_num = 0 for number in numbers: if str(number) == str(ground_truth): right_num += 1 final_score = 0.0 if len(numbers) == 0 else right_num / len(numbers) return float(final_score) def retrieval_score(prediction, ground_truth, **kwargs): pattern = r'Paragraph (\d+)' matches = re.findall(pattern, ground_truth) ground_truth_id = matches[0] numbers = re.findall(r"\d+", prediction) right_num = 0 for number in numbers: if str(number) == str(ground_truth_id): right_num += 1 final_score = 0.0 if len(numbers) == 0 else right_num / len(numbers) return float(final_score) def retrieval_zh_score(prediction, ground_truth, **kwargs): pattern = r'段落(\d+)' matches = re.findall(pattern, ground_truth) ground_truth_id = matches[0] numbers = re.findall(r"\d+", prediction) right_num = 0 for number in numbers: if str(number) == str(ground_truth_id): right_num += 1 final_score = 0.0 if len(numbers) == 0 else right_num / len(numbers) return float(final_score) def code_sim_score(prediction, ground_truth, **kwargs): all_lines = prediction.lstrip('\n').split('\n') prediction = "" for line in all_lines: if ('`' not in line) and ('#' not in line) and ('//' not in line): prediction = line break return (fuzz.ratio(prediction, ground_truth) / 100) def classification_score(prediction, ground_truth, **kwargs): em_match_list = [] all_classes = kwargs["all_classes"] for class_name in all_classes: if class_name in prediction: em_match_list.append(class_name) for match_term in em_match_list: if match_term in ground_truth and match_term != ground_truth: em_match_list.remove(match_term) if em_match_list != 0: if ground_truth in em_match_list: score = (1.0 / len(em_match_list)) else: score = 0.0 else: best_match = None highest_similarity = 0 for string in all_classes: similarity = difflib.SequenceMatcher(None, string, prediction).ratio() if similarity > highest_similarity: highest_similarity = similarity best_match = string score = float(best_match == ground_truth) return score def rouge_score(prediction, ground_truth, **kwargs): rouge = Rouge() try: scores = rouge.get_scores([prediction], [ground_truth], avg=True) except Exception: return 0.0 return scores["rouge-l"]["f"] def rouge_zh_score(prediction, ground_truth, **kwargs): prediction = " ".join(list(jieba.cut(prediction, cut_all=False))) ground_truth = " ".join(list(jieba.cut(ground_truth, cut_all=False))) score = rouge_score(prediction, ground_truth) return score def f1_score(prediction, ground_truth, **kwargs): common = Counter(prediction) & Counter(ground_truth) num_same = sum(common.values()) if num_same == 0: return 0 precision = 1.0 * num_same / len(prediction) recall = 1.0 * num_same / len(ground_truth) f1 = (2 * precision * recall) / (precision + recall) return f1 def qa_f1_score(prediction, ground_truth, **kwargs): normalized_prediction = normalize_answer(prediction) normalized_ground_truth = normalize_answer(ground_truth) prediction_tokens = normalized_prediction.split() ground_truth_tokens = normalized_ground_truth.split() return f1_score(prediction_tokens, ground_truth_tokens) def qa_f1_zh_score(prediction, ground_truth, **kwargs): prediction_tokens = list(jieba.cut(prediction, cut_all=False)) ground_truth_tokens = list(jieba.cut(ground_truth, cut_all=False)) prediction_tokens = [normalize_zh_answer(token) for token in prediction_tokens] ground_truth_tokens = [normalize_zh_answer(token) for token in ground_truth_tokens] prediction_tokens = [token for token in prediction_tokens if len(token) > 0] ground_truth_tokens = [token for token in ground_truth_tokens if len(token) > 0] return f1_score(prediction_tokens, ground_truth_tokens)
ymcui/Chinese-Mixtral
609
中文Mixtral混合专家大模型(Chinese Mixtral MoE LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/longbench/pred_mixtral.py
Python
# The script is modified from https://github.com/THUDM/LongBench/blob/main/pred.py from datasets import load_dataset import torch import random import numpy as np import json from transformers import LlamaTokenizer, AutoModelForCausalLM from transformers import BitsAndBytesConfig from tqdm import tqdm import os import argparse import sys parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(parent_dir) dir_path = os.path.dirname(os.path.realpath(__file__)) parser = argparse.ArgumentParser() parser.add_argument('--model_path', type=str) parser.add_argument('--load_in_4bit',action='store_true') parser.add_argument('--load_in_8bit',action='store_true') parser.add_argument('--predict_on',type=str, default='zh') parser.add_argument('--output_dir',type=str, default='pred') parser.add_argument('--gpus',type=str, default=None) parser.add_argument('--max_length',type=int, default=4096-512) parser.add_argument('--e', action='store_true', help="Evaluate on LongBench-E") parser.add_argument('--use_flash_attention_2', action='store_true', help="Use flash attention to replace the mixtral attention") args = parser.parse_args() model_path = args.model_path load_in_4bit = args.load_in_4bit load_in_8bit = args.load_in_8bit predict_on = args.predict_on output_dir = args.output_dir gpus=args.gpus max_length = args.max_length DO_SAMPLE =True TEMPERATURE = 0.2 REPETITION_PENALTY = 1.1 TOP_P = 0.95 TOP_K = 40 if gpus is not None: os.environ["CUDA_VISIBLE_DEVICES"] = gpus def get_pred(model, tokenizer, data, max_length, max_gen, prompt_format, dataset, device): preds = [] for json_obj in tqdm(data): prompt = prompt_format.format(**json_obj) # truncate to fit max_length (we suggest truncate in the middle, since the left and right side may contain crucial instructions) tokenized_prompt = tokenizer(prompt, truncation=False, return_tensors="pt").input_ids[0] if len(tokenized_prompt) > max_length: half = int(max_length/2) prompt = tokenizer.decode(tokenized_prompt[:half], skip_special_tokens=True)+tokenizer.decode(tokenized_prompt[-half:], skip_special_tokens=True) input_data = tokenizer(prompt, truncation=False, return_tensors="pt").to(device) context_length = input_data.input_ids.shape[-1] if dataset == "samsum": # prevent illegal output on samsum (model endlessly repeat "\nDialogue"), might be a prompting issue output = model.generate( **input_data, max_new_tokens=max_gen, num_beams=1, do_sample=DO_SAMPLE, repetition_penalty = REPETITION_PENALTY, top_p = TOP_P, top_k = TOP_K, temperature=TEMPERATURE, min_length=context_length+1, eos_token_id=[tokenizer.eos_token_id, tokenizer.encode("\n", add_special_tokens=False)[-1]], pad_token_id=tokenizer.eos_token_id )[0] else: output = model.generate( **input_data, max_new_tokens=max_gen, num_beams=1, do_sample=DO_SAMPLE, repetition_penalty = REPETITION_PENALTY, top_p = TOP_P, top_k = TOP_K, temperature=TEMPERATURE, pad_token_id=tokenizer.eos_token_id )[0] pred = tokenizer.decode(output[context_length:], skip_special_tokens=True) #print(pred) preds.append({"pred": pred, "answers": json_obj["answers"], "all_classes": json_obj["all_classes"], "length": json_obj["length"]}) return preds def seed_everything(seed): torch.manual_seed(seed) torch.cuda.manual_seed(seed) np.random.seed(seed) random.seed(seed) torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.cuda.manual_seed_all(seed) if __name__ == '__main__': seed_everything(42) load_type = torch.float16 if torch.cuda.is_available(): device = torch.device(0) else: device = torch.device('cpu') if args.e: en_datasets = [ "hotpotqa","2wikimqa", "qasper", "multifieldqa_en", "gov_report", "trec", "samsum", "triviaqa", "passage_count", "passage_retrieval_en", "multi_news"] zh_datasets = [] code_datasets = [ "lcc", "repobench-p" ] if not os.path.exists(f"{output_dir}/pred_e"): os.makedirs(f"{output_dir}/pred_e") else: en_datasets = [ "hotpotqa","2wikimqa", "musique", "narrativeqa", "qasper", "multifieldqa_en", "gov_report", "qmsum", "trec", "samsum", "triviaqa", "passage_count", "passage_retrieval_en", "multi_news"] zh_datasets = [ "dureader", "multifieldqa_zh", "vcsum","lsht", "passage_retrieval_zh"] code_datasets = [ "lcc", "repobench-p" ] if not os.path.exists(f"{output_dir}/pred"): os.makedirs(f"{output_dir}/pred") datasets = [] for data_type in predict_on.split(','): if data_type == 'zh': datasets += zh_datasets elif data_type == 'en': datasets += en_datasets elif data_type == 'code': datasets += code_datasets print(datasets) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') tokenizer = LlamaTokenizer.from_pretrained(model_path, legacy=True) model = None if args.load_in_4bit or args.load_in_8bit: quantization_config = BitsAndBytesConfig( load_in_4bit=args.load_in_4bit, load_in_8bit=args.load_in_8bit, bnb_4bit_compute_dtype=load_type, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4" ) model = AutoModelForCausalLM.from_pretrained( model_path, torch_dtype=load_type, low_cpu_mem_usage=True, device_map='auto', quantization_config=quantization_config if (args.load_in_4bit or args.load_in_8bit) else None, attn_implementation="flash_attention_2" if args.use_flash_attention_2 else "sdpa" ) model = model.eval() model_vocab_size = model.get_input_embeddings().weight.size(0) print(f"Vocab of the base model: {model_vocab_size}") tokenizer_vocab_size = len(tokenizer) print(f"Vocab of the tokenizer: {tokenizer_vocab_size}") # we design specific prompt format and max generation length for each task, feel free to modify them to optimize model output dataset2prompt = json.load(open(dir_path + "/config/dataset2prompt.json", "r")) dataset2maxlen = json.load(open(dir_path + "/config/dataset2maxlen.json", "r")) # predict on each dataset for dataset in datasets: print(f"Loading dataset {dataset}") if args.e: data = load_dataset('THUDM/LongBench', dataset+'_e', split='test') output_path = f"{output_dir}/pred_e/{dataset}.jsonl" else: data = load_dataset('THUDM/LongBench', dataset, split='test') output_path = f"{output_dir}/pred/{dataset}.jsonl" prompt_format = dataset2prompt[dataset] max_gen = dataset2maxlen[dataset] preds = get_pred(model, tokenizer, data, max_length, max_gen, prompt_format, dataset, device) with open(output_path, "w", encoding="utf-8") as f: for pred in preds: json.dump(pred, f, ensure_ascii=False) f.write('\n')
ymcui/Chinese-Mixtral
609
中文Mixtral混合专家大模型(Chinese Mixtral MoE LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/merge_mixtral_with_chinese_lora_low_mem.py
Python
""" Usage: python merge_mixtral_with_chinese_lora_low_mem.py \ --base_model path/to/Mixtral-8x7B-v0.1 \ --lora_model path/to/chinese-Mixtral-8x7B-v0.1-lora \ --output_dir path/to/output-dir """ import argparse import json import os import gc import torch import peft from transformers import LlamaTokenizer from transformers.modeling_utils import dtype_byte_size from huggingface_hub import snapshot_download import re import safetensors from safetensors.torch import load_file as safe_load_file parser = argparse.ArgumentParser(description='Script to merge Mixtral-8x7B-v0.1 with Chinese-Mixtral-LoRA weights') parser.add_argument('--base_model', default=None, required=True, type=str, help="Base model path (basically Mixtral-8x7B-v0.1)") parser.add_argument('--lora_model', default=None, required=True, type=str, help="LoRA model path (Chinese-Mixtral-LoRA, Chinese-Mixtral-Instruct-LoRA)") parser.add_argument('--output_dir', default='./merged_model', type=str, help="Output path for the merged model") parser.add_argument('--verbose', default=False, action='store_true', help="Show detailed debugging messages") WEIGHTS_NAME = "adapter_model.bin" SAFETENSORS_WEIGHTS_NAME = "adapter_model.safetensors" def transpose(weight, fan_in_fan_out): return weight.T if fan_in_fan_out else weight def jsonload(filename): with open(filename, "r") as file: d = json.load(file) return d if __name__=='__main__': args = parser.parse_args() base_model_path = args.base_model lora_model_path = args.lora_model output_dir = args.output_dir os.makedirs(output_dir, exist_ok=True) print(f"="*80) print(f"Base model: {base_model_path}") print(f"LoRA model: {lora_model_path}") tokenizers_and_loras = [] print(f"Loading {lora_model_path}") if not os.path.exists(lora_model_path): print("Cannot find lora model on the disk. Downloading lora model from hub...") lora_model_path = snapshot_download(repo_id=lora_model_path) tokenizer = LlamaTokenizer.from_pretrained(lora_model_path, legacy=True) lora_config = peft.LoraConfig.from_pretrained(lora_model_path) if os.path.exists(os.path.join(lora_model_path, SAFETENSORS_WEIGHTS_NAME)): lora_filename = os.path.join(lora_model_path, SAFETENSORS_WEIGHTS_NAME) use_safetensors = True elif os.path.exists(os.path.join(lora_model_path, WEIGHTS_NAME)): lora_filename = os.path.join(lora_model_path, WEIGHTS_NAME) use_safetensors = False else: raise ValueError( f"Please check that the file {WEIGHTS_NAME} or {SAFETENSORS_WEIGHTS_NAME} is present at {lora_model_path}." ) if use_safetensors: lora_state_dict = safe_load_file(lora_filename, device="cpu") else: lora_state_dict = torch.load(lora_filename, map_location='cpu') if 'base_model.model.model.embed_tokens.weight' in lora_state_dict: lora_vocab_size = lora_state_dict['base_model.model.model.embed_tokens.weight'].shape[0] assert lora_vocab_size == len(tokenizer), \ (f"The vocab size of the tokenizer {len(tokenizer)} does not match the vocab size of the LoRA weight {lora_vocab_size}!\n") tokenizers_and_loras.append( { "tokenizer" :tokenizer, "state_dict" :lora_state_dict, "config": lora_config, "scaling": lora_config.lora_alpha / lora_config.r, "fan_in_fan_out" : lora_config.fan_in_fan_out, }) if not os.path.exists(base_model_path): print("Cannot find lora model on the disk. Downloading lora model from hub...") base_model_path = snapshot_download(repo_id=base_model_path) ckpt_filenames = sorted([f for f in os.listdir(base_model_path) if re.match(r'model-(\d+)-of-(\d+).safetensors',f)]) if len(ckpt_filenames) == 0: raise FileNotFoundError(f"Cannot find base model checkpoints in ${base_model_path}. Please make sure the checkpoints are saved in the HF format.") total_size = 0 for index, filename in enumerate(ckpt_filenames): print(f"Loading ckpt {filename}") if re.match('(.*).safetensors', filename): state_dict = safe_load_file(os.path.join(base_model_path,filename), device="cpu") else: state_dict = torch.load(os.path.join(base_model_path,filename), map_location='cpu') print("Merging...") for k in state_dict: for tl_idx, t_and_l in enumerate(tokenizers_and_loras): saved_key = 'base_model.model.'+k lora_key_a = saved_key.replace('.weight','.lora_A.weight') if saved_key in t_and_l['state_dict']: if args.verbose: print(f"copying {saved_key} from {tl_idx}-th LoRA weight to {k}") state_dict[k] = t_and_l['state_dict'][saved_key].half().clone() # do we need half()? if lora_key_a in t_and_l['state_dict']: lora_key_b = lora_key_a.replace('lora_A.weight','lora_B.weight') if args.verbose: print(f"merging {lora_key_a} and lora_B.weight form {tl_idx}-th LoRA weight to {k}") state_dict[k] += ( transpose( t_and_l['state_dict'][lora_key_b].float() @ t_and_l['state_dict'][lora_key_a].float(), t_and_l['fan_in_fan_out']) * t_and_l['scaling'] ) weight_size = state_dict[k].numel() * dtype_byte_size(state_dict[k].dtype) total_size += weight_size print(f"Saving ckpt {filename} to {output_dir} in HF format...") if use_safetensors: safetensors.torch.save_file( state_dict, os.path.join(output_dir, filename), metadata={"format": "pt"} ) else: torch.save(state_dict, os.path.join(output_dir, filename)) del state_dict gc.collect() # Effectively enforce garbage collection print(f"Saving tokenizer") tokenizers_and_loras[-1]['tokenizer'].save_pretrained(output_dir) configs = ('config.json', 'generation_config.json', "model.safetensors.index.json") for config in configs: if os.path.exists(os.path.join(lora_model_path, config)): print(f"Saving {config} from {lora_model_path}") with open(os.path.join(lora_model_path, config),'r') as f: obj = json.load(f) else: if os.path.exists(os.path.join(base_model_path, config)): print(f"Saving {config} from {base_model_path}") with open(os.path.join(base_model_path, config),'r') as f: obj = json.load(f) if config == 'config.json': obj['vocab_size'] = len(tokenizers_and_loras[-1]['tokenizer']) if config == "model.safetensors.index.json": obj['metadata']['total_size'] = total_size if os.path.exists(os.path.join(base_model_path, config)): with open(os.path.join(output_dir, config), 'w') as f: json.dump(obj, f, indent=2) print("Done.") print(f"Check output dir: {output_dir}")
ymcui/Chinese-Mixtral
609
中文Mixtral混合专家大模型(Chinese Mixtral MoE LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/mmlu/categories.py
Python
subcategories = { "abstract_algebra": ["math"], "anatomy": ["health"], "astronomy": ["physics"], "business_ethics": ["business"], "clinical_knowledge": ["health"], "college_biology": ["biology"], "college_chemistry": ["chemistry"], "college_computer_science": ["computer science"], "college_mathematics": ["math"], "college_medicine": ["health"], "college_physics": ["physics"], "computer_security": ["computer science"], "conceptual_physics": ["physics"], "econometrics": ["economics"], "electrical_engineering": ["engineering"], "elementary_mathematics": ["math"], "formal_logic": ["philosophy"], "global_facts": ["other"], "high_school_biology": ["biology"], "high_school_chemistry": ["chemistry"], "high_school_computer_science": ["computer science"], "high_school_european_history": ["history"], "high_school_geography": ["geography"], "high_school_government_and_politics": ["politics"], "high_school_macroeconomics": ["economics"], "high_school_mathematics": ["math"], "high_school_microeconomics": ["economics"], "high_school_physics": ["physics"], "high_school_psychology": ["psychology"], "high_school_statistics": ["math"], "high_school_us_history": ["history"], "high_school_world_history": ["history"], "human_aging": ["health"], "human_sexuality": ["culture"], "international_law": ["law"], "jurisprudence": ["law"], "logical_fallacies": ["philosophy"], "machine_learning": ["computer science"], "management": ["business"], "marketing": ["business"], "medical_genetics": ["health"], "miscellaneous": ["other"], "moral_disputes": ["philosophy"], "moral_scenarios": ["philosophy"], "nutrition": ["health"], "philosophy": ["philosophy"], "prehistory": ["history"], "professional_accounting": ["other"], "professional_law": ["law"], "professional_medicine": ["health"], "professional_psychology": ["psychology"], "public_relations": ["politics"], "security_studies": ["politics"], "sociology": ["culture"], "us_foreign_policy": ["politics"], "virology": ["health"], "world_religions": ["philosophy"], } categories = { "STEM": ["physics", "chemistry", "biology", "computer science", "math", "engineering"], "humanities": ["history", "philosophy", "law"], "social sciences": ["politics", "culture", "economics", "geography", "psychology"], "other (business, health, misc.)": ["other", "business", "health"], }
ymcui/Chinese-Mixtral
609
中文Mixtral混合专家大模型(Chinese Mixtral MoE LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/mmlu/eval.py
Python
# modified from https://github.com/baichuan-inc/Baichuan-7B/blob/main/evaluation/evaluate_mmlu.py import argparse import os import torch import numpy as np import pandas as pd from categories import subcategories, categories from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig choices = ["A", "B", "C", "D"] def format_subject(subject): line = subject.split("_") s = "" for entry in line: s += " " + entry return s def format_example(df, idx, include_answer=True): prompt = df.iloc[idx, 0] k = df.shape[1] - 2 for j in range(k): prompt += "\n{}. {}".format(choices[j], df.iloc[idx, j + 1]) prompt += "\nAnswer:" if include_answer: prompt += " {}\n\n".format(df.iloc[idx, k + 1]) return prompt def gen_prompt(train_df, subject, k=-1): prompt = "The following are multiple choice questions (with answers) about {}.\n\n".format( format_subject(subject) ) if k == -1: k = train_df.shape[0] for i in range(k): prompt += format_example(train_df, i) return prompt @torch.no_grad() def mmlu_eval(args, subject, model, tokenizer, dev_df, test_df): cors = [] all_probs = [] for i in range(test_df.shape[0]): # get prompt and make sure it fits k = args.ntrain prompt_end = format_example(test_df, i, include_answer=False) train_prompt = gen_prompt(dev_df, subject, k) prompt = train_prompt + prompt_end input_ids = tokenizer(prompt, return_tensors="pt").input_ids.cuda() label = test_df.iloc[i, test_df.shape[1] - 1] logits = model( input_ids=input_ids, ).logits[:,-1].flatten() probs = ( torch.nn.functional.softmax( torch.tensor( [ logits[tokenizer("A").input_ids[-1]], logits[tokenizer("B").input_ids[-1]], logits[tokenizer("C").input_ids[-1]], logits[tokenizer("D").input_ids[-1]], ] ), dim=0, ) .detach() .cpu() .to(torch.float32) .numpy() ) pred = {0: "A", 1: "B", 2: "C", 3: "D"}[np.argmax(probs)] cor = pred == label cors.append(cor) all_probs.append(probs) acc = np.mean(cors) cors = np.array(cors) all_probs = np.array(all_probs) print("Average accuracy {:.3f} - {}".format(acc, subject)) return cors, acc, all_probs def main(args): tokenizer = AutoTokenizer.from_pretrained(args.model_path, use_fast=False) quantization_config = BitsAndBytesConfig( load_in_4bit=True, load_in_8bit=False, bnb_4bit_compute_dtype=torch.float16, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4" ) model = AutoModelForCausalLM.from_pretrained( args.model_path, quantization_config=quantization_config if args.load_in_4bit else None, torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map='auto', attn_implementation="flash_attention_2" if args.use_flash_attention_2 else "sdpa" ).eval() subjects = sorted( [ f.split("_test.csv")[0] for f in os.listdir(os.path.join(args.data_dir, "test")) if "_test.csv" in f ] ) if not os.path.exists(args.save_dir): os.makedirs(args.save_dir) if not os.path.exists(os.path.join(args.save_dir, "results")): os.makedirs(os.path.join(args.save_dir, "results")) all_cors = [] subcat_cors = { subcat: [] for subcat_lists in subcategories.values() for subcat in subcat_lists } cat_cors = {cat: [] for cat in categories} for subject in subjects: dev_df = pd.read_csv( os.path.join(args.data_dir, "dev", subject + "_dev.csv"), header=None )[: args.ntrain] if args.do_test: test_df = pd.read_csv( os.path.join(args.data_dir, "test", subject + "_test.csv"), header=None ) else: test_df = pd.read_csv( os.path.join(args.data_dir, "val", subject + "_val.csv"), header=None ) cors, _, probs = mmlu_eval(args, subject, model, tokenizer, dev_df, test_df) subcats = subcategories[subject] for subcat in subcats: subcat_cors[subcat].append(cors) for key in categories.keys(): if subcat in categories[key]: cat_cors[key].append(cors) all_cors.append(cors) test_df["correct"] = cors for j in range(probs.shape[1]): choice = choices[j] test_df["choice{}_probs".format(choice)] = probs[:, j] test_df.to_csv( os.path.join( args.save_dir, "results", f"{subject}.csv" ), index=None, ) for subcat in subcat_cors: subcat_acc = np.mean(np.concatenate(subcat_cors[subcat])) print("Average accuracy {:.3f} - {}".format(subcat_acc, subcat)) for cat in cat_cors: cat_acc = np.mean(np.concatenate(cat_cors[cat])) print("Average accuracy {:.3f} - {}".format(cat_acc, cat)) weighted_acc = np.mean(np.concatenate(all_cors)) print("Average accuracy: {:.3f}".format(weighted_acc)) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--ntrain", "-k", type=int, default=5) parser.add_argument("--ngpu", "-g", type=int, default=8) parser.add_argument("--data_dir", "-d", type=str, default="data") parser.add_argument("--save_dir", "-s", type=str, default="results") parser.add_argument( "--model_path", "-m", type=str, ) parser.add_argument( "--do_test", action="store_true" ) parser.add_argument( "--load_in_4bit", action="store_true" ) parser.add_argument( "--use_flash_attention_2", action="store_true" ) args = parser.parse_args() main(args)
ymcui/Chinese-Mixtral
609
中文Mixtral混合专家大模型(Chinese Mixtral MoE LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/openai_server_demo/openai_api_protocol.py
Python
from typing import Optional, List, Dict, Any, Union, Literal import time import shortuuid from pydantic import BaseModel, Field class ChatCompletionRequest(BaseModel): model: str = "chinese-mixtral" messages: Union[str, List[Dict[str, str]]] temperature: Optional[float] = 0.2 top_p: Optional[float] = 0.9 top_k: Optional[int] = 40 n: Optional[int] = 1 max_tokens: Optional[int] = 512 num_beams: Optional[int] = 1 stop: Optional[Union[str, List[str]]] = None stream: Optional[bool] = False repetition_penalty: Optional[float] = 1.1 user: Optional[str] = None do_sample: Optional[bool] = True class ChatMessage(BaseModel): role: str content: str class DeltaMessage(BaseModel): role: Optional[Literal["user", "assistant", "system"]] = None content: Optional[str] = None class ChatCompletionResponseChoice(BaseModel): index: int message: ChatMessage class ChatCompletionResponseStreamChoice(BaseModel): index: int delta: DeltaMessage finish_reason: Optional[Literal["stop", "length"]] class ChatCompletionResponse(BaseModel): id: str = Field(default_factory=lambda: f"chatcmpl-{shortuuid.random()}") object: str = "chat.completion" created: int = Field(default_factory=lambda: int(time.time())) model: str = "chinese-mixtral" choices: List[ Union[ChatCompletionResponseChoice, ChatCompletionResponseStreamChoice] ] class EmbeddingsRequest(BaseModel): input: Union[str, List[Any]] user: Optional[str] = None class EmbeddingsResponse(BaseModel): object: str = "list" data: List[Dict[str, Any]] model: str = "chinese-mixtral" class CompletionRequest(BaseModel): prompt: Union[str, List[Any]] temperature: Optional[float] = 0.2 n: Optional[int] = 1 max_tokens: Optional[int] = 512 stop: Optional[Union[str, List[str]]] = None stream: Optional[bool] = False top_p: Optional[float] = 0.9 top_k: Optional[int] = 40 num_beams: Optional[int] = 1 logprobs: Optional[int] = None echo: Optional[bool] = False repetition_penalty: Optional[float] = 1.1 user: Optional[str] = None do_sample: Optional[bool] = True class CompletionResponseChoice(BaseModel): index: int text: str class CompletionResponse(BaseModel): id: Optional[str] = Field(default_factory=lambda: f"cmpl-{shortuuid.random()}") object: Optional[str] = "text_completion" created: Optional[int] = Field(default_factory=lambda: int(time.time())) model: Optional[str] = "chinese-mixtral" choices: List[CompletionResponseChoice]
ymcui/Chinese-Mixtral
609
中文Mixtral混合专家大模型(Chinese Mixtral MoE LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/openai_server_demo/openai_api_server.py
Python
import argparse import os from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware import uvicorn from threading import Thread from sse_starlette.sse import EventSourceResponse parser = argparse.ArgumentParser() parser.add_argument('--base_model', default=None, type=str, required=True) parser.add_argument('--lora_model', default=None, type=str,help="If None, perform inference on the base model") parser.add_argument('--tokenizer_path',default=None,type=str) parser.add_argument('--gpus', default="0", type=str) parser.add_argument('--load_in_8bit',action='store_true', help='Load the model in 8bit mode') parser.add_argument('--load_in_4bit',action='store_true', help='Load the model in 4bit mode') parser.add_argument('--only_cpu',action='store_true',help='Only use CPU for inference') parser.add_argument('--use_flash_attention_2', action='store_true', help="Use flash-attention2 to accelerate inference") args = parser.parse_args() if args.only_cpu is True: args.gpus = "" if args.load_in_8bit or args.load_in_4bit: raise ValueError("Quantization is unavailable on CPU.") if args.load_in_8bit and args.load_in_4bit: raise ValueError("Only one quantization method can be chosen for inference. Please check your arguments") os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus import torch import torch.nn.functional as F from transformers import ( AutoModelForCausalLM, LlamaTokenizer, GenerationConfig, TextIteratorStreamer, BitsAndBytesConfig ) from peft import PeftModel import sys parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(parent_dir) from openai_api_protocol import ( ChatCompletionRequest, ChatCompletionResponse, ChatMessage, ChatCompletionResponseChoice, CompletionRequest, CompletionResponse, CompletionResponseChoice, EmbeddingsRequest, EmbeddingsResponse, ChatCompletionResponseStreamChoice, DeltaMessage, ) load_type = torch.float16 if torch.cuda.is_available(): device = torch.device(0) else: device = torch.device("cpu") if args.tokenizer_path is None: args.tokenizer_path = args.lora_model if args.lora_model is None: args.tokenizer_path = args.base_model tokenizer = LlamaTokenizer.from_pretrained(args.tokenizer_path, legacy=True) if args.load_in_4bit or args.load_in_8bit: quantization_config = BitsAndBytesConfig( load_in_4bit=args.load_in_4bit, load_in_8bit=args.load_in_8bit, bnb_4bit_compute_dtype=load_type, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4" ) base_model = AutoModelForCausalLM.from_pretrained( args.base_model, torch_dtype=load_type, low_cpu_mem_usage=True, device_map='auto' if not args.only_cpu else None, #load_in_4bit=args.load_in_4bit, #load_in_8bit=args.load_in_8bit, quantization_config=quantization_config if (args.load_in_4bit or args.load_in_8bit) else None, attn_implementation="flash_attention_2" if args.use_flash_attention_2 else "sdpa", trust_remote_code=True ) model_vocab_size = base_model.get_input_embeddings().weight.size(0) tokenizer_vocab_size = len(tokenizer) print(f"Vocab of the base model: {model_vocab_size}") print(f"Vocab of the tokenizer: {tokenizer_vocab_size}") if model_vocab_size != tokenizer_vocab_size: print("Resize model embeddings to fit tokenizer") base_model.resize_token_embeddings(tokenizer_vocab_size) if args.lora_model is not None: print("loading peft model") model = PeftModel.from_pretrained( base_model, args.lora_model, torch_dtype=load_type, device_map="auto", ) else: model = base_model if device == torch.device("cpu"): model.float() model.eval() DEFAULT_SYSTEM_PROMPT = "" # NOTE: this is an arbitrary template, as the original # one does not contain the system prompt. # You may need to adjust this template to fit your needs. TEMPLATE_WITH_SYSTEM_PROMPT = ( "[INST] <sys> {system_prompt} </sys>\n" "{instruction} [/INST]" ) TEMPLATE_WITHOUT_SYSTEM_PROMPT = "[INST] {instruction} [/INST]" def generate_prompt( instruction, response="", with_system_prompt=False, system_prompt=None ): if with_system_prompt is True and system_prompt is not None: prompt = TEMPLATE_WITH_SYSTEM_PROMPT.format_map( {"instruction": instruction, "system_prompt": system_prompt} ) else: prompt = TEMPLATE_WITHOUT_SYSTEM_PROMPT.format_map({"instruction": instruction}) if len(response) > 0: prompt += " " + response return prompt def generate_completion_prompt(instruction: str): """Generate prompt for completion""" return generate_prompt(instruction, response="", with_system_prompt=False) def generate_chat_prompt(messages: list): """Generate prompt for chat completion""" system_msg = None for msg in messages: if msg.role == "system": system_msg = msg.content prompt = "" is_first_user_content = True for msg in messages: if msg.role == "system": continue if msg.role == "user": if is_first_user_content is True: prompt += generate_prompt( msg.content, with_system_prompt=False, system_prompt=system_msg ) is_first_user_content = False else: prompt += "<s>" + generate_prompt(msg.content, with_system_prompt=False) if msg.role == "assistant": prompt += f" {msg.content}" + "</s>" return prompt def predict( input, max_new_tokens=512, top_p=0.9, temperature=0.2, top_k=40, num_beams=1, repetition_penalty=1.1, do_sample=True, **kwargs, ): """ Main inference method type(input) == str -> /v1/completions type(input) == list -> /v1/chat/completions """ if isinstance(input, str): prompt = generate_completion_prompt(input) else: prompt = generate_chat_prompt(input) inputs = tokenizer(prompt, return_tensors="pt") input_ids = inputs["input_ids"].to(device) attention_mask = inputs['attention_mask'].to(device) generation_config = GenerationConfig( temperature=temperature, top_p=top_p, top_k=top_k, num_beams=num_beams, do_sample=do_sample, **kwargs, ) generation_config.return_dict_in_generate = True generation_config.output_scores = False generation_config.max_new_tokens = max_new_tokens generation_config.repetition_penalty = float(repetition_penalty) # For the reason why pad_token_id = eos_token_id, see: # https://github.com/meta-llama/llama-recipes/blob/f7aa02af9f2c427ebb70853191b72636130b9df5/src/llama_recipes/finetuning.py#L141 with torch.no_grad(): generation_output = model.generate( input_ids=input_ids, attention_mask=attention_mask, eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.eos_token_id, generation_config=generation_config, ) s = generation_output.sequences[0] output = tokenizer.decode(s, skip_special_tokens=True) output = output.split("[/INST]")[-1].strip() return output def stream_predict( input, max_new_tokens=512, top_p=0.9, temperature=0.2, top_k=40, num_beams=4, repetition_penalty=1.1, do_sample=True, model_id="chinese-mixtral", **kwargs, ): choice_data = ChatCompletionResponseStreamChoice( index=0, delta=DeltaMessage(role="assistant"), finish_reason=None ) chunk = ChatCompletionResponse( model=model_id, choices=[choice_data], object="chat.completion.chunk", ) yield "{}".format(chunk.json(exclude_unset=True, ensure_ascii=False)) if isinstance(input, str): prompt = generate_completion_prompt(input) else: prompt = generate_chat_prompt(input) inputs = tokenizer(prompt, return_tensors="pt") input_ids = inputs["input_ids"].to(device) generation_config = GenerationConfig( temperature=temperature, top_p=top_p, top_k=top_k, num_beams=num_beams, do_sample=do_sample, **kwargs, ) streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) generation_kwargs = dict( streamer=streamer, input_ids=input_ids, generation_config=generation_config, return_dict_in_generate=True, output_scores=False, max_new_tokens=max_new_tokens, repetition_penalty=float(repetition_penalty), ) Thread(target=model.generate, kwargs=generation_kwargs).start() for new_text in streamer: choice_data = ChatCompletionResponseStreamChoice( index=0, delta=DeltaMessage(content=new_text), finish_reason=None ) chunk = ChatCompletionResponse( model=model_id, choices=[choice_data], object="chat.completion.chunk" ) yield "{}".format(chunk.json(exclude_unset=True, ensure_ascii=False)) choice_data = ChatCompletionResponseStreamChoice( index=0, delta=DeltaMessage(), finish_reason="stop" ) chunk = ChatCompletionResponse( model=model_id, choices=[choice_data], object="chat.completion.chunk" ) yield "{}".format(chunk.json(exclude_unset=True, ensure_ascii=False)) yield "[DONE]" def get_embedding(input): """Get embedding main function""" with torch.no_grad(): encoding = tokenizer(input, padding=True, return_tensors="pt") input_ids = encoding["input_ids"].to(device) attention_mask = encoding["attention_mask"].to(device) model_output = model(input_ids, attention_mask, output_hidden_states=True) data = model_output.hidden_states[-1] mask = attention_mask.unsqueeze(-1).expand(data.size()).float() masked_embeddings = data * mask sum_embeddings = torch.sum(masked_embeddings, dim=1) seq_length = torch.sum(mask, dim=1) embedding = sum_embeddings / seq_length normalized_embeddings = F.normalize(embedding, p=2, dim=1) ret = normalized_embeddings.squeeze(0).tolist() return ret app = FastAPI() app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) @app.post("/v1/chat/completions") async def create_chat_completion(request: ChatCompletionRequest): """Creates a completion for the chat message""" msgs = request.messages if isinstance(msgs, str): msgs = [ChatMessage(role="user", content=msgs)] else: msgs = [ChatMessage(role=x["role"], content=x["content"]) for x in msgs] if request.stream: generate = stream_predict( input=msgs, max_new_tokens=request.max_tokens, top_p=request.top_p, top_k=request.top_k, temperature=request.temperature, num_beams=request.num_beams, repetition_penalty=request.repetition_penalty, do_sample=request.do_sample, ) return EventSourceResponse(generate, media_type="text/event-stream") output = predict( input=msgs, max_new_tokens=request.max_tokens, top_p=request.top_p, top_k=request.top_k, temperature=request.temperature, num_beams=request.num_beams, repetition_penalty=request.repetition_penalty, do_sample=request.do_sample, ) choices = [ ChatCompletionResponseChoice(index=i, message=msg) for i, msg in enumerate(msgs) ] choices += [ ChatCompletionResponseChoice( index=len(choices), message=ChatMessage(role="assistant", content=output) ) ] return ChatCompletionResponse(choices=choices) @app.post("/v1/completions") async def create_completion(request: CompletionRequest): """Creates a completion""" output = predict( input=request.prompt, max_new_tokens=request.max_tokens, top_p=request.top_p, top_k=request.top_k, temperature=request.temperature, num_beams=request.num_beams, repetition_penalty=request.repetition_penalty, do_sample=request.do_sample, ) choices = [CompletionResponseChoice(index=0, text=output)] return CompletionResponse(choices=choices) @app.post("/v1/embeddings") async def create_embeddings(request: EmbeddingsRequest): """Creates text embedding""" embedding = get_embedding(request.input) data = [{"object": "embedding", "embedding": embedding, "index": 0}] return EmbeddingsResponse(data=data) if __name__ == "__main__": log_config = uvicorn.config.LOGGING_CONFIG log_config["formatters"]["access"][ "fmt" ] = "%(asctime)s - %(levelname)s - %(message)s" log_config["formatters"]["default"][ "fmt" ] = "%(asctime)s - %(levelname)s - %(message)s" uvicorn.run(app, host="0.0.0.0", port=19327, workers=1, log_config=log_config)
ymcui/Chinese-Mixtral
609
中文Mixtral混合专家大模型(Chinese Mixtral MoE LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/training/build_dataset.py
Python
import logging import os from typing import Union, List import datasets import torch from datasets import load_dataset, concatenate_datasets import transformers IGNORE_INDEX = -100 logger = logging.getLogger('__name__') PROMPT_TEMPLATE = ( "[INST] {instruction} [/INST]" ) def build_instruction_dataset(data_path: Union[List[str],str], tokenizer: transformers.PreTrainedTokenizer, max_seq_length: int, data_cache_dir = None, preprocessing_num_workers = None, ): def tokenization(examples): sources = [] targets = [] prompt = PROMPT_TEMPLATE for instruction, input_text, output in zip(examples['instruction'],examples['input'],examples['output']): if input_text is not None and input_text !="": instruction = instruction+'\n' + input_text source = prompt.format_map({'instruction':instruction}) target = f"{output}{tokenizer.eos_token}" sources.append(source) targets.append(target) tokenized_sources = tokenizer(sources,return_attention_mask=False) tokenized_targets = tokenizer(targets,return_attention_mask=False,add_special_tokens=False) all_input_ids = [] all_labels = [] for s,t in zip(tokenized_sources['input_ids'],tokenized_targets['input_ids']): if len(s) >= max_seq_length: continue input_ids = torch.LongTensor(s + t)[:max_seq_length] labels = torch.LongTensor([IGNORE_INDEX] * len(s) + t)[:max_seq_length] all_input_ids.append(input_ids) all_labels.append(labels) results = {'input_ids':all_input_ids, 'labels': all_labels} return results logging.warning("building dataset...") all_datasets = [] if not isinstance(data_path,(list,tuple)): data_path = [data_path] for file in data_path: if data_cache_dir is None: data_cache_dir = str(os.path.dirname(file)) cache_path = os.path.join(data_cache_dir,os.path.basename(file).split('.')[0]+f"_{max_seq_length}") os.makedirs(cache_path, exist_ok=True) try: processed_dataset = datasets.load_from_disk(cache_path) logger.info(f'training datasets-{file} has been loaded from disk') except Exception: raw_dataset = load_dataset("json", data_files=file, cache_dir=cache_path) tokenization_func = tokenization tokenized_dataset = raw_dataset.map( tokenization_func, batched=True, num_proc=preprocessing_num_workers, remove_columns=["instruction","input","output"], keep_in_memory=False, desc="preprocessing on dataset", ) processed_dataset = tokenized_dataset processed_dataset.save_to_disk(cache_path) processed_dataset.set_format('torch') all_datasets.append(processed_dataset['train']) all_datasets = concatenate_datasets(all_datasets) return all_datasets
ymcui/Chinese-Mixtral
609
中文Mixtral混合专家大模型(Chinese Mixtral MoE LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/training/run_clm_pt_with_peft.py
Python
#!/usr/bin/env python # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset. Here is the full list of checkpoints on the hub that can be fine-tuned by this script: https://huggingface.co/models?filter=text-generation """ # You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments. import logging import numpy as np import math import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, List, Dict, Any, Mapping from pathlib import Path import datasets import torch from datasets import load_dataset, concatenate_datasets import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_CAUSAL_LM_MAPPING, AutoConfig, AutoModelForCausalLM, MixtralForCausalLM, LlamaTokenizer, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, set_seed, BitsAndBytesConfig ) from transformers.testing_utils import CaptureLogger from transformers.trainer_utils import get_last_checkpoint from transformers.utils import send_example_telemetry from transformers.utils.versions import require_version from peft import LoraConfig, TaskType, get_peft_model, PeftModel, prepare_model_for_kbit_training def fault_tolerance_data_collator(features: List) -> Dict[str, Any]: if not isinstance(features[0], Mapping): features = [vars(f) for f in features] first = features[0] batch = {} # Special handling for labels. # Ensure that tensor is created with the correct type # (it should be automatically the case, but let's make sure of it.) if "label" in first and first["label"] is not None: label = first["label"].item() if isinstance(first["label"], torch.Tensor) else first["label"] dtype = torch.long if isinstance(label, int) else torch.float batch["labels"] = torch.tensor([f["label"] for f in features], dtype=dtype) elif "label_ids" in first and first["label_ids"] is not None: if isinstance(first["label_ids"], torch.Tensor): batch["labels"] = torch.stack([f["label_ids"] for f in features]) else: dtype = torch.long if isinstance(first["label_ids"][0], int) else torch.float batch["labels"] = torch.tensor([f["label_ids"] for f in features], dtype=dtype) # Handling of all other possible keys. # Again, we will use the first element to figure out which key/values are not None for this model. try: for k, v in first.items(): if k not in ("label", "label_ids") and v is not None and not isinstance(v, str): if isinstance(v, torch.Tensor): batch[k] = torch.stack([f[k] for f in features]) elif isinstance(v, np.ndarray): batch[k] = torch.tensor(np.stack([f[k] for f in features])) else: batch[k] = torch.tensor([f[k] for f in features]) except ValueError: # quick fix by simply take the first example for k, v in first.items(): if k not in ("label", "label_ids") and v is not None and not isinstance(v, str): if isinstance(v, torch.Tensor): batch[k] = torch.stack([features[0][k]] * len(features)) elif isinstance(v, np.ndarray): batch[k] = torch.tensor(np.stack([features[0][k]] * len(features))) else: batch[k] = torch.tensor([features[0][k]] * len(features)) return batch MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ model_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." ) }, ) tokenizer_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The tokenizer for weights initialization.Don't set if you want to train a model from scratch." ) }, ) model_type: Optional[str] = field( default=None, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, ) config_overrides: Optional[str] = field( default=None, metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) }, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=False, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) torch_dtype: Optional[str] = field( default=None, metadata={ "help": ( "Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the " "dtype will be automatically derived from the model's weights." ), "choices": ["auto", "bfloat16", "float16", "float32"], }, ) def __post_init__(self): if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( "--config_overrides can't be used in combination with --config_name or --model_name_or_path" ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_dir: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) streaming: bool = field(default=False, metadata={"help": "Enable streaming mode"}) block_size: Optional[int] = field( default=None, metadata={ "help": ( "Optional input sequence length after tokenization. " "The training dataset will be truncated in block of this size for training. " "Default to the model max input length for single sentence inputs (take into account special tokens)." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) validation_split_percentage: Optional[float] = field( default=0.05, metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" }, ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) keep_linebreaks: bool = field( default=True, metadata={"help": "Whether to keep line breaks when using TXT files or not."} ) data_cache_dir: Optional[str] = field(default="./", metadata={"help": "The datasets processed stored"}) def __post_init__(self): if self.streaming: require_version("datasets>=2.0.0", "The streaming feature requires `datasets>=2.0.0`") @dataclass class MyTrainingArguments(TrainingArguments): trainable : Optional[str] = field(default="q_proj,v_proj") lora_rank : Optional[int] = field(default=8) lora_dropout : Optional[float] = field(default=0.1) lora_alpha : Optional[float] = field(default=32.) modules_to_save : Optional[str] = field(default=None) debug_mode : Optional[bool] = field(default=False) peft_path : Optional[str] = field(default=None) use_flash_attention_2 : Optional[bool] = field(default=False) double_quant: Optional[bool] = field(default=True) quant_type: Optional[str] = field(default="nf4") load_in_kbits: Optional[int] = field(default=16) output_router_logits: Optional[bool] = field(default=False) logger = logging.getLogger(__name__) def main(): parser = HfArgumentParser((ModelArguments, DataTrainingArguments, MyTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_clm", model_args, data_args) # Setup logging logging.basicConfig(format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, # if training_args.local_rank in [-1, 0] else logging.WARN, handlers=[logging.StreamHandler(sys.stdout)],) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # transformers.tokenization_utils.logging.set_verbosity_warning() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) config_kwargs = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, "output_router_logits": True if training_args.output_router_logits else False } if model_args.config_name: config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.config_overrides is not None: logger.info(f"Overriding config: {model_args.config_overrides}") config.update_from_string(model_args.config_overrides) logger.info(f"New config: {config}") tokenizer_kwargs = { "cache_dir": model_args.cache_dir, "use_fast": model_args.use_fast_tokenizer, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs) elif model_args.tokenizer_name_or_path: tokenizer = LlamaTokenizer.from_pretrained(model_args.tokenizer_name_or_path, **tokenizer_kwargs) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) tokenizer.add_eos_token = True # Preprocessing the datasets. # First we tokenize all the texts. # since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base") def tokenize_function(examples): with CaptureLogger(tok_logger) as cl: output = tokenizer(examples["text"]) # clm input could be much much longer than block_size if "Token indices sequence length is longer than the" in cl.out: tok_logger.warning( "^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits" " before being passed to the model." ) return output if data_args.block_size is None: block_size = tokenizer.model_max_length if block_size > 1024: logger.warning( "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" " override this default with `--block_size xxx`." ) block_size = 1024 else: if data_args.block_size > tokenizer.model_max_length: logger.warning( f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model" f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}." ) block_size = min(data_args.block_size, tokenizer.model_max_length) # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size. def group_texts(examples): # Concatenate all texts. concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can # customize this part to your needs. if total_length >= block_size: total_length = (total_length // block_size) * block_size # Split by chunks of max_len. result = { k: [t[i : i + block_size] for i in range(0, total_length, block_size)] for k, t in concatenated_examples.items() } result["labels"] = result["input_ids"].copy() return result with training_args.main_process_first(desc="dataset map tokenization and grouping"): lm_datasets = [] path = Path(data_args.dataset_dir) files = [file.name for file in path.glob("*.txt")] if training_args.debug_mode is True: files = [files[0]] for idx, file in enumerate(files): data_file = os.path.join(path, file) filename = ''.join(file.split(".")[:-1]) cache_path = os.path.join(data_args.data_cache_dir, filename+f"_{block_size}") os.makedirs(cache_path, exist_ok=True) try: processed_dataset = datasets.load_from_disk(cache_path, keep_in_memory=False) logger.info(f'training datasets-{filename} has been loaded from disk') except Exception: cache_dir = os.path.join(data_args.data_cache_dir, filename+f"_text_{block_size}") os.makedirs(cache_dir, exist_ok=True) raw_dataset = load_dataset("text", data_files=data_file, cache_dir=cache_dir, keep_in_memory=False) logger.info(f"{file} has been loaded") tokenized_dataset = raw_dataset.map( tokenize_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns="text", load_from_cache_file=True, keep_in_memory=False, cache_file_names = {k: os.path.join(cache_dir, 'tokenized.arrow') for k in raw_dataset}, desc="Running tokenizer on dataset", ) grouped_datasets = tokenized_dataset.map( group_texts, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=True, keep_in_memory=False, cache_file_names = {k: os.path.join(cache_dir, 'grouped.arrow') for k in tokenized_dataset}, desc=f"Grouping texts in chunks of {block_size}", ) processed_dataset = grouped_datasets processed_dataset.save_to_disk(cache_path) if idx == 0: lm_datasets = processed_dataset['train'] else: lm_datasets = concatenate_datasets([lm_datasets, processed_dataset["train"]]) lm_datasets = lm_datasets.train_test_split(test_size = data_args.validation_split_percentage) if training_args.do_train: train_dataset = lm_datasets['train'] if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) logger.info(f"Num train_samples {len(train_dataset)}") logger.info("Training example:") logger.info(tokenizer.decode(train_dataset[0]['input_ids'])) if training_args.do_eval: eval_dataset = lm_datasets["test"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) logger.info(f"Num eval_samples {len(eval_dataset)}") logger.info("Evaluation example:") logger.info(tokenizer.decode(eval_dataset[0]['input_ids'])) compute_dtype = (torch.float16 if training_args.fp16 else (torch.bfloat16 if training_args.bf16 else torch.float32)) if training_args.load_in_kbits in [4, 8]: if training_args.modules_to_save is not None: load_in_8bit_skip_modules = training_args.modules_to_save.split(',') else: load_in_8bit_skip_modules = None quantization_config = BitsAndBytesConfig( load_in_4bit=training_args.load_in_kbits == 4, load_in_8bit=training_args.load_in_kbits == 8, llm_int8_threshold=6.0, load_in_8bit_skip_modules=load_in_8bit_skip_modules, bnb_4bit_compute_dtype=compute_dtype, bnb_4bit_use_double_quant=training_args.double_quant, bnb_4bit_quant_type=training_args.quant_type # {'fp4', 'nf4'} ) else: quantization_config = None if quantization_config is not None: logger.info(f"quantization_config:{quantization_config.to_dict()}") if model_args.model_name_or_path: torch_dtype = ( model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype) ) device_map = {"":int(os.environ.get("LOCAL_RANK") or 0)} model = MixtralForCausalLM.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, torch_dtype=torch_dtype, low_cpu_mem_usage=True, device_map=device_map, quantization_config=quantization_config, attn_implementation="flash_attention_2" if training_args.use_flash_attention_2 else "sdpa" ) else: model = AutoModelForCausalLM.from_config(config) n_params = sum({p.data_ptr(): p.numel() for p in model.parameters()}.values()) logger.info(f"Training new model from scratch - Total size={n_params/2**20:.2f}M params") if training_args.load_in_kbits in [4, 8]: model = prepare_model_for_kbit_training(model, use_gradient_checkpointing=training_args.gradient_checkpointing) model.config.use_cache = False model_vocab_size = model.get_output_embeddings().weight.size(0) tokenizer_vocab_size = len(tokenizer) logger.info(f"Model vocab size: {model_vocab_size}") logger.info(f"Tokenizer vocab size: {tokenizer_vocab_size}") if model_vocab_size != tokenizer_vocab_size: logger.info(f"Resize model vocab size to {tokenizer_vocab_size}") model.resize_token_embeddings(tokenizer_vocab_size) if training_args.peft_path is not None: logger.info("Peft from pre-trained model") model = PeftModel.from_pretrained(model, training_args.peft_path, device_map=device_map, is_trainable=True) else: logger.info("Init new peft model") target_modules = training_args.trainable.split(',') modules_to_save = training_args.modules_to_save if modules_to_save is not None: modules_to_save = modules_to_save.split(',') lora_rank = training_args.lora_rank lora_dropout = training_args.lora_dropout lora_alpha = training_args.lora_alpha logger.info(f"target_modules: {target_modules}") logger.info(f"lora_rank: {lora_rank}") peft_config = LoraConfig( task_type=TaskType.CAUSAL_LM, target_modules=target_modules, inference_mode=False, r=lora_rank, lora_alpha=lora_alpha, lora_dropout=lora_dropout, modules_to_save=modules_to_save) model = get_peft_model(model, peft_config) model.print_trainable_parameters() # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=tokenizer, data_collator=fault_tolerance_data_collator ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate() max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) try: perplexity = math.exp(metrics["eval_loss"]) except OverflowError: perplexity = float("inf") metrics["perplexity"] = perplexity trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) if __name__ == "__main__": main()
ymcui/Chinese-Mixtral
609
中文Mixtral混合专家大模型(Chinese Mixtral MoE LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/training/run_clm_sft_with_peft.py
Python
#!/usr/bin/env python # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset. Here is the full list of checkpoints on the hub that can be fine-tuned by this script: https://huggingface.co/models?filter=text-generation """ # You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments. import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from pathlib import Path import datasets import torch from build_dataset import build_instruction_dataset import transformers from transformers import ( CONFIG_MAPPING, AutoConfig, BitsAndBytesConfig, MixtralForCausalLM, LlamaTokenizer, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, set_seed, DataCollatorForSeq2Seq ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import send_example_telemetry from transformers.utils.versions import require_version from peft import LoraConfig, TaskType, get_peft_model, PeftModel, prepare_model_for_kbit_training require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt") @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ model_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." ) }, ) tokenizer_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The tokenizer for weights initialization.Don't set if you want to train a model from scratch." ) }, ) config_overrides: Optional[str] = field( default=None, metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) }, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=False, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) torch_dtype: Optional[str] = field( default=None, metadata={ "help": ( "Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the " "dtype will be automatically derived from the model's weights." ), "choices": ["auto", "bfloat16", "float16", "float32"], }, ) def __post_init__(self): if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( "--config_overrides can't be used in combination with --config_name or --model_name_or_path" ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_dir: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) validation_split_percentage: Optional[float] = field( default=0.05, metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" }, ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) keep_linebreaks: bool = field( default=True, metadata={"help": "Whether to keep line breaks when using TXT files or not."} ) data_cache_dir: Optional[str] = field(default=None, metadata={"help": "The datasets processed stored"}) max_seq_length: Optional[int] = field(default=1024) @dataclass class MyTrainingArguments(TrainingArguments): trainable : Optional[str] = field(default="q_proj,v_proj") lora_rank : Optional[int] = field(default=8) lora_dropout : Optional[float] = field(default=0.1) lora_alpha : Optional[float] = field(default=32.) modules_to_save : Optional[str] = field(default=None) peft_path : Optional[str] = field(default=None) use_flash_attention_2 : Optional[bool] = field(default=False) double_quant: Optional[bool] = field(default=True) quant_type: Optional[str] = field(default="nf4") load_in_kbits: Optional[int] = field(default=16) output_router_logits: Optional[bool] = field(default=False) logger = logging.getLogger(__name__) def main(): parser = HfArgumentParser((ModelArguments, DataTrainingArguments, MyTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() send_example_telemetry("run_clm", model_args, data_args) # Setup logging logging.basicConfig(format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, # if training_args.local_rank in [-1, 0] else logging.WARN, handlers=[logging.StreamHandler(sys.stdout)],) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # transformers.tokenization_utils.logging.set_verbosity_warning() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16 or training_args.bf16}" ) # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) config_kwargs = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, "output_router_logits": True if training_args.output_router_logits else False } if model_args.config_name: config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.config_overrides is not None: logger.info(f"Overriding config: {model_args.config_overrides}") config.update_from_string(model_args.config_overrides) logger.info(f"New config: {config}") tokenizer_kwargs = { "cache_dir": model_args.cache_dir, "use_fast": model_args.use_fast_tokenizer, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs) elif model_args.tokenizer_name_or_path: tokenizer = LlamaTokenizer.from_pretrained(model_args.tokenizer_name_or_path, **tokenizer_kwargs) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if tokenizer.pad_token_id is None: tokenizer.pad_token = tokenizer.eos_token data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer) eval_dataset=None train_dataset = None if training_args.do_train: with training_args.main_process_first(desc="loading and tokenization"): path = Path(data_args.dataset_dir) files = [os.path.join(path,file.name) for file in path.glob("*.json")] logger.info(f"Training files: {' '.join(files)}") train_dataset = build_instruction_dataset( data_path=files, tokenizer=tokenizer, max_seq_length=data_args.max_seq_length, data_cache_dir = None, preprocessing_num_workers = data_args.preprocessing_num_workers) logger.info(f"Num train_samples {len(train_dataset)}") logger.info("Training example:") logger.info(tokenizer.decode(train_dataset[0]['input_ids'])) if training_args.do_eval: with training_args.main_process_first(desc="loading and tokenization"): files = [data_args.validation_file] logger.info(f"Evaluation files: {' '.join(files)}") eval_dataset = build_instruction_dataset( data_path=files, tokenizer=tokenizer, max_seq_length=data_args.max_seq_length, data_cache_dir=None, preprocessing_num_workers = data_args.preprocessing_num_workers) logger.info(f"Num eval_samples {len(eval_dataset)}") logger.info("Evaluation example:") logger.info(tokenizer.decode(eval_dataset[0]['input_ids'])) torch_dtype = ( model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype) ) compute_dtype = (torch.float16 if training_args.fp16 else (torch.bfloat16 if training_args.bf16 else torch.float32)) if training_args.load_in_kbits in [4, 8]: if training_args.modules_to_save is not None: load_in_8bit_skip_modules = training_args.modules_to_save.split(',') else: load_in_8bit_skip_modules = None quantization_config = BitsAndBytesConfig( load_in_4bit=training_args.load_in_kbits == 4, load_in_8bit=training_args.load_in_kbits == 8, llm_int8_threshold=6.0, load_in_8bit_skip_modules=load_in_8bit_skip_modules, bnb_4bit_compute_dtype=compute_dtype, bnb_4bit_use_double_quant=training_args.double_quant, bnb_4bit_quant_type=training_args.quant_type # {'fp4', 'nf4'} ) else: quantization_config = None if quantization_config is not None: logger.info(f"quantization_config:{quantization_config.to_dict()}") device_map = {"":int(os.environ.get("LOCAL_RANK") or 0)} model = MixtralForCausalLM.from_pretrained( model_args.model_name_or_path, config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, torch_dtype=torch_dtype, low_cpu_mem_usage=True, device_map=device_map, quantization_config=quantization_config, attn_implementation="flash_attention_2" if training_args.use_flash_attention_2 else "sdpa" ) if training_args.load_in_kbits in [4, 8]: model = prepare_model_for_kbit_training(model, use_gradient_checkpointing=training_args.gradient_checkpointing) model.config.use_cache = False model_vocab_size = model.get_input_embeddings().weight.shape[0] logger.info(f"Model vocab size: {model_vocab_size}") logger.info(f"len(tokenizer):{len(tokenizer)}") if model_vocab_size != len(tokenizer): logger.info(f"Resize model vocab size to {len(tokenizer)}") model.resize_token_embeddings(len(tokenizer)) if training_args.peft_path is not None: logger.info("Peft from pre-trained model") model = PeftModel.from_pretrained(model, training_args.peft_path, device_map=device_map, is_trainable=True) else: logger.info("Init new peft model") target_modules = training_args.trainable.split(',') modules_to_save = training_args.modules_to_save if modules_to_save is not None: modules_to_save = modules_to_save.split(',') lora_rank = training_args.lora_rank lora_dropout = training_args.lora_dropout lora_alpha = training_args.lora_alpha logger.info(f"target_modules: {target_modules}") logger.info(f"lora_rank: {lora_rank}") peft_config = LoraConfig( task_type=TaskType.CAUSAL_LM, target_modules=target_modules, inference_mode=False, r=lora_rank, lora_alpha=lora_alpha, lora_dropout=lora_dropout, modules_to_save=modules_to_save) model = get_peft_model(model, peft_config) model.print_trainable_parameters() # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, tokenizer=tokenizer, data_collator=data_collator, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() metrics = train_result.metrics metrics["train_samples"] = len(train_dataset) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate() metrics["eval_samples"] =len(eval_dataset) try: perplexity = math.exp(metrics["eval_loss"]) except OverflowError: perplexity = float("inf") metrics["perplexity"] = perplexity trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) if __name__ == "__main__": main()
ymcui/Chinese-Mixtral
609
中文Mixtral混合专家大模型(Chinese Mixtral MoE LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/training/run_pt.sh
Shell
## 运行脚本前请仔细阅读wiki(https://github.com/ymcui/Chinese-Mixtral/wiki/pt_scripts_zh) ## Read the wiki(https://github.com/ymcui/Chinese-Mixtral/wiki/pt_scripts_en) carefully before running the script lr=1e-4 lora_rank=64 lora_alpha=128 lora_trainable="q_proj,v_proj,k_proj,o_proj,gate,w1,w2,w3" modules_to_save="embed_tokens,lm_head" lora_dropout=0.05 pretrained_model=path/to/hf/mixtral/dir dataset_dir=path/to/pt/data/dir data_cache=temp_data_cache_dir per_device_train_batch_size=1 gradient_accumulation_steps=8 block_size=1024 output_dir=output_dir deepspeed_config_file=ds_zero2_no_offload.json torchrun --nnodes 1 --nproc_per_node 1 run_clm_pt_with_peft.py \ --deepspeed ${deepspeed_config_file} \ --model_name_or_path ${pretrained_model} \ --tokenizer_name_or_path ${pretrained_model} \ --dataset_dir ${dataset_dir} \ --data_cache_dir ${data_cache} \ --validation_split_percentage 0.001 \ --per_device_train_batch_size ${per_device_train_batch_size} \ --do_train \ --seed $RANDOM \ --fp16 \ --num_train_epochs 1 \ --lr_scheduler_type cosine \ --learning_rate ${lr} \ --warmup_ratio 0.05 \ --weight_decay 0.1 \ --logging_strategy steps \ --logging_steps 10 \ --save_strategy steps \ --save_total_limit 3 \ --save_steps 200 \ --gradient_accumulation_steps ${gradient_accumulation_steps} \ --preprocessing_num_workers 8 \ --block_size ${block_size} \ --output_dir ${output_dir} \ --overwrite_output_dir \ --ddp_timeout 30000 \ --logging_first_step True \ --lora_rank ${lora_rank} \ --lora_alpha ${lora_alpha} \ --trainable ${lora_trainable} \ --lora_dropout ${lora_dropout} \ --modules_to_save ${modules_to_save} \ --torch_dtype float16 \ --load_in_kbits 4 \ --gradient_checkpointing \ --ddp_find_unused_parameters False \ --output_router_logits
ymcui/Chinese-Mixtral
609
中文Mixtral混合专家大模型(Chinese Mixtral MoE LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
scripts/training/run_sft.sh
Shell
## 运行脚本前请仔细阅读wiki(https://github.com/ymcui/Chinese-Mixtral/wiki/sft_scripts_zh) ## Read the wiki(https://github.com/ymcui/Chinese-Mixtral/wiki/sft_scripts_en) carefully before running the script lr=1e-4 lora_rank=64 lora_alpha=128 lora_trainable="q_proj,v_proj,k_proj,o_proj,gate,w1,w2,w3" modules_to_save="embed_tokens,lm_head" lora_dropout=0.05 pretrained_model=path/to/hf/chinese-mixtral/dir/or/model_id dataset_dir=path/to/sft/data/dir per_device_train_batch_size=1 per_device_eval_batch_size=1 gradient_accumulation_steps=8 max_seq_length=1024 output_dir=output_dir validation_file=validation_file_name deepspeed_config_file=ds_zero2_no_offload.json torchrun --nnodes 1 --nproc_per_node 1 run_clm_sft_with_peft.py \ --deepspeed ${deepspeed_config_file} \ --model_name_or_path ${pretrained_model} \ --tokenizer_name_or_path ${pretrained_model} \ --dataset_dir ${dataset_dir} \ --per_device_train_batch_size ${per_device_train_batch_size} \ --per_device_eval_batch_size ${per_device_eval_batch_size} \ --do_train \ --do_eval \ --seed $RANDOM \ --fp16 \ --num_train_epochs 3 \ --lr_scheduler_type cosine \ --learning_rate ${lr} \ --warmup_ratio 0.05 \ --weight_decay 0.1 \ --logging_strategy steps \ --logging_steps 10 \ --save_strategy steps \ --save_total_limit 3 \ --evaluation_strategy steps \ --eval_steps 100 \ --save_steps 200 \ --gradient_accumulation_steps ${gradient_accumulation_steps} \ --preprocessing_num_workers 8 \ --max_seq_length ${max_seq_length} \ --output_dir ${output_dir} \ --overwrite_output_dir \ --ddp_timeout 30000 \ --logging_first_step True \ --lora_rank ${lora_rank} \ --lora_alpha ${lora_alpha} \ --trainable ${lora_trainable} \ --lora_dropout ${lora_dropout} \ --modules_to_save ${modules_to_save} \ --torch_dtype float16 \ --validation_file ${validation_file} \ --load_in_kbits 4 \ --gradient_checkpointing \ --ddp_find_unused_parameters False \ --output_router_logits
ymcui/Chinese-Mixtral
609
中文Mixtral混合专家大模型(Chinese Mixtral MoE LLMs)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
src/classifier_utils.py
Python
from absl import flags import re import numpy as np import tensorflow as tf from data_utils import SEP_ID, CLS_ID FLAGS = flags.FLAGS SEG_ID_A = 0 SEG_ID_B = 1 SEG_ID_CLS = 2 SEG_ID_SEP = 3 SEG_ID_PAD = 4 class PaddingInputExample(object): """Fake example so the num input examples is a multiple of the batch size. When running eval/predict on the TPU, we need to pad the number of examples to be a multiple of the batch size, because the TPU requires a fixed batch size. The alternative is to drop the last batch, which is bad because it means the entire output data won't be generated. We use this class instead of `None` because treating `None` as padding battches could cause silent errors. """ class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_ids, input_mask, segment_ids, label_id, is_real_example=True): self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_id = label_id self.is_real_example = is_real_example def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def convert_single_example(ex_index, example, label_list, max_seq_length, tokenize_fn): """Converts a single `InputExample` into a single `InputFeatures`.""" if isinstance(example, PaddingInputExample): return InputFeatures( input_ids=[0] * max_seq_length, input_mask=[1] * max_seq_length, segment_ids=[0] * max_seq_length, label_id=0, is_real_example=False) if label_list is not None: label_map = {} for (i, label) in enumerate(label_list): label_map[label] = i tokens_a = tokenize_fn(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenize_fn(example.text_b) if tokens_b: # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for two [SEP] & one [CLS] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: # Account for one [SEP] & one [CLS] with "- 2" if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[:max_seq_length - 2] tokens = [] segment_ids = [] for token in tokens_a: tokens.append(token) segment_ids.append(SEG_ID_A) tokens.append(SEP_ID) segment_ids.append(SEG_ID_A) if tokens_b: for token in tokens_b: tokens.append(token) segment_ids.append(SEG_ID_B) tokens.append(SEP_ID) segment_ids.append(SEG_ID_B) tokens.append(CLS_ID) segment_ids.append(SEG_ID_CLS) input_ids = tokens # The mask has 0 for real tokens and 1 for padding tokens. Only real # tokens are attended to. input_mask = [0] * len(input_ids) # Zero-pad up to the sequence length. if len(input_ids) < max_seq_length: delta_len = max_seq_length - len(input_ids) input_ids = [0] * delta_len + input_ids input_mask = [1] * delta_len + input_mask segment_ids = [SEG_ID_PAD] * delta_len + segment_ids assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length if label_list is not None: label_id = label_map[example.label] else: label_id = example.label if ex_index < 5: tf.logging.info("*** Example ***") tf.logging.info("guid: %s" % (example.guid)) tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) tf.logging.info("label: {} (id = {})".format(example.label, label_id)) feature = InputFeatures( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id) return feature
ymcui/Chinese-XLNet
1,650
Pre-Trained Chinese XLNet(中文XLNet预训练模型)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
src/cmrc2018_evaluate_drcd.py
Python
# -*- coding: utf-8 -*- ''' Evaluation script for CMRC 2018 version: v5 Note: v5 formatted output, add usage description v4 fixed segmentation issues ''' from __future__ import print_function from collections import Counter, OrderedDict import string import re import argparse import json import sys reload(sys) sys.setdefaultencoding('utf8') import nltk import pdb # split Chinese with English def mixed_segmentation(in_str, rm_punc=False): in_str = str(in_str).decode('utf-8').lower().strip() segs_out = [] temp_str = "" sp_char = ['-',':','_','*','^','/','\\','~','`','+','=', ',','。',':','?','!','“','”',';','’','《','》','……','·','、', '「','」','(',')','-','~','『','』'] for char in in_str: if rm_punc and char in sp_char: continue if re.search(ur'[\u4e00-\u9fa5]', char) or char in sp_char: if temp_str != "": ss = nltk.word_tokenize(temp_str) segs_out.extend(ss) temp_str = "" segs_out.append(char) else: temp_str += char #handling last part if temp_str != "": ss = nltk.word_tokenize(temp_str) segs_out.extend(ss) return segs_out # remove punctuation def remove_punctuation(in_str): in_str = str(in_str).decode('utf-8').lower().strip() sp_char = ['-',':','_','*','^','/','\\','~','`','+','=', ',','。',':','?','!','“','”',';','’','《','》','……','·','、', '「','」','(',')','-','~','『','』'] out_segs = [] for char in in_str: if char in sp_char: continue else: out_segs.append(char) return ''.join(out_segs) # find longest common string def find_lcs(s1, s2): m = [[0 for i in range(len(s2)+1)] for j in range(len(s1)+1)] mmax = 0 p = 0 for i in range(len(s1)): for j in range(len(s2)): if s1[i] == s2[j]: m[i+1][j+1] = m[i][j]+1 if m[i+1][j+1] > mmax: mmax=m[i+1][j+1] p=i+1 return s1[p-mmax:p], mmax # def evaluate(ground_truth_file, prediction_file): f1 = 0 em = 0 total_count = 0 skip_count = 0 for instance in ground_truth_file["data"]: #context_id = instance['context_id'].strip() #context_text = instance['context_text'].strip() for para in instance["paragraphs"]: for qas in para['qas']: total_count += 1 query_id = qas['id'].strip() query_text = qas['question'].strip() answers = [x["text"] for x in qas['answers']] if query_id not in prediction_file: sys.stderr.write('Unanswered question: {}\n'.format(query_id)) skip_count += 1 continue prediction = str(prediction_file[query_id]).decode('utf-8') f1 += calc_f1_score(answers, prediction) em += calc_em_score(answers, prediction) f1_score = 100.0 * f1 / total_count em_score = 100.0 * em / total_count return f1_score, em_score, total_count, skip_count def calc_f1_score(answers, prediction): f1_scores = [] for ans in answers: ans_segs = mixed_segmentation(ans, rm_punc=True) prediction_segs = mixed_segmentation(prediction, rm_punc=True) lcs, lcs_len = find_lcs(ans_segs, prediction_segs) if lcs_len == 0: f1_scores.append(0) continue precision = 1.0*lcs_len/len(prediction_segs) recall = 1.0*lcs_len/len(ans_segs) f1 = (2*precision*recall)/(precision+recall) f1_scores.append(f1) return max(f1_scores) def calc_em_score(answers, prediction): em = 0 for ans in answers: ans_ = remove_punctuation(ans) prediction_ = remove_punctuation(prediction) if ans_ == prediction_: em = 1 break return em if __name__ == '__main__': parser = argparse.ArgumentParser(description='Evaluation Script for CMRC 2018') parser.add_argument('dataset_file', help='Official dataset file') parser.add_argument('prediction_file', help='Your prediction File') args = parser.parse_args() ground_truth_file = json.load(open(args.dataset_file, 'rb')) prediction_file = json.load(open(args.prediction_file, 'rb')) F1, EM, TOTAL, SKIP = evaluate(ground_truth_file, prediction_file) AVG = (EM+F1)*0.5 output_result = OrderedDict() output_result['AVERAGE'] = '%.3f' % AVG output_result['F1'] = '%.3f' % F1 output_result['EM'] = '%.3f' % EM output_result['TOTAL'] = TOTAL output_result['SKIP'] = SKIP output_result['FILE'] = args.prediction_file print(json.dumps(output_result))
ymcui/Chinese-XLNet
1,650
Pre-Trained Chinese XLNet(中文XLNet预训练模型)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
src/data_utils.py
Python
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import os import random from absl import flags import absl.logging as _logging # pylint: disable=unused-import import numpy as np import tensorflow as tf from prepro_utils import preprocess_text, encode_ids import sentencepiece as spm special_symbols = { "<unk>" : 0, "<s>" : 1, "</s>" : 2, "<cls>" : 3, "<sep>" : 4, "<pad>" : 5, "<mask>" : 6, "<eod>" : 7, "<eop>" : 8, } VOCAB_SIZE = 32000 UNK_ID = special_symbols["<unk>"] CLS_ID = special_symbols["<cls>"] SEP_ID = special_symbols["<sep>"] MASK_ID = special_symbols["<mask>"] EOD_ID = special_symbols["<eod>"] def _int64_feature(values): return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) def _float_feature(values): return tf.train.Feature(float_list=tf.train.FloatList(value=values)) def format_filename(prefix, bsz_per_host, seq_len, bi_data, suffix, mask_alpha=5, mask_beta=1, reuse_len=None, uncased=False, fixed_num_predict=None): """docs.""" if reuse_len is None: reuse_len_str = "" else: reuse_len_str = "reuse-{}.".format(reuse_len) if not uncased: uncased_str = "" else: uncased_str = "uncased." if bi_data: bi_data_str = "bi" else: bi_data_str = "uni" if fixed_num_predict is not None: fnp_str = "fnp-{}.".format(fixed_num_predict) else: fnp_str = "" file_name = "{}.bsz-{}.seqlen-{}.{}{}{}.alpha-{}.beta-{}.{}{}".format( prefix, bsz_per_host, seq_len, reuse_len_str, uncased_str, bi_data_str, mask_alpha, mask_beta, fnp_str, suffix) return file_name def _create_data(idx, input_paths): # Load sentence-piece model sp = spm.SentencePieceProcessor() sp.Load(FLAGS.sp_path) input_shards = [] total_line_cnt = 0 for input_path in input_paths: input_data, sent_ids = [], [] sent_id, line_cnt = True, 0 tf.logging.info("Processing %s", input_path) for line in tf.gfile.Open(input_path): if line_cnt % 100000 == 0: tf.logging.info("Loading line %d", line_cnt) line_cnt += 1 if not line.strip(): if FLAGS.use_eod: sent_id = not sent_id cur_sent = [EOD_ID] else: continue else: if FLAGS.from_raw_text: cur_sent = preprocess_text(line.strip(), lower=FLAGS.uncased) cur_sent = encode_ids(sp, cur_sent) else: cur_sent = list(map(int, line.strip().split())) input_data.extend(cur_sent) sent_ids.extend([sent_id] * len(cur_sent)) sent_id = not sent_id tf.logging.info("Finish with line %d", line_cnt) if line_cnt == 0: continue input_data = np.array(input_data, dtype=np.int64) sent_ids = np.array(sent_ids, dtype=np.bool) total_line_cnt += line_cnt input_shards.append((input_data, sent_ids)) tf.logging.info("[Task %d] Total number line: %d", idx, total_line_cnt) tfrecord_dir = os.path.join(FLAGS.save_dir, "tfrecords") filenames, num_batch = [], 0 # Randomly shuffle input shards (with a fixed but distinct random seed) np.random.seed(100 * FLAGS.task + FLAGS.pass_id) perm_indices = np.random.permutation(len(input_shards)) tf.logging.info("Using perm indices %s for pass %d", perm_indices.tolist(), FLAGS.pass_id) input_data_list, sent_ids_list = [], [] prev_sent_id = None for perm_idx in perm_indices: input_data, sent_ids = input_shards[perm_idx] # make sure the `send_ids[0] == not prev_sent_id` if prev_sent_id is not None and sent_ids[0] == prev_sent_id: sent_ids = np.logical_not(sent_ids) # append to temporary list input_data_list.append(input_data) sent_ids_list.append(sent_ids) # update `prev_sent_id` prev_sent_id = sent_ids[-1] input_data = np.concatenate(input_data_list) sent_ids = np.concatenate(sent_ids_list) file_name, cur_num_batch = create_tfrecords( save_dir=tfrecord_dir, basename="{}-{}-{}".format(FLAGS.split, idx, FLAGS.pass_id), data=[input_data, sent_ids], bsz_per_host=FLAGS.bsz_per_host, seq_len=FLAGS.seq_len, bi_data=FLAGS.bi_data, sp=sp, ) filenames.append(file_name) num_batch += cur_num_batch record_info = { "filenames": filenames, "num_batch": num_batch } return record_info def create_data(_): # Validate FLAGS assert FLAGS.bsz_per_host % FLAGS.num_core_per_host == 0 if not FLAGS.use_tpu: FLAGS.num_core_per_host = 1 # forced to be one # Make workdirs if not tf.gfile.Exists(FLAGS.save_dir): tf.gfile.MakeDirs(FLAGS.save_dir) tfrecord_dir = os.path.join(FLAGS.save_dir, "tfrecords") if not tf.gfile.Exists(tfrecord_dir): tf.gfile.MakeDirs(tfrecord_dir) # Create and dump corpus_info from task 0 if FLAGS.task == 0: corpus_info = { "vocab_size": VOCAB_SIZE, "bsz_per_host": FLAGS.bsz_per_host, "num_core_per_host": FLAGS.num_core_per_host, "seq_len": FLAGS.seq_len, "reuse_len": FLAGS.reuse_len, "uncased": FLAGS.uncased, "bi_data": FLAGS.bi_data, "mask_alpha": FLAGS.mask_alpha, "mask_beta": FLAGS.mask_beta, "num_predict": FLAGS.num_predict, "use_eod": FLAGS.use_eod, "sp_path": FLAGS.sp_path, "input_glob": FLAGS.input_glob, } corpus_info_path = os.path.join(FLAGS.save_dir, "corpus_info.json") with tf.gfile.Open(corpus_info_path, "w") as fp: json.dump(corpus_info, fp) # Interleavely split the work into FLAGS.num_task splits file_paths = sorted(tf.gfile.Glob(FLAGS.input_glob)) tf.logging.info("Use glob: %s", FLAGS.input_glob) tf.logging.info("Find %d files: %s", len(file_paths), file_paths) task_file_paths = file_paths[FLAGS.task::FLAGS.num_task] if not task_file_paths: tf.logging.info("Exit: task %d has no file to process.", FLAGS.task) return tf.logging.info("Task %d process %d files: %s", FLAGS.task, len(task_file_paths), task_file_paths) record_info = _create_data(FLAGS.task, task_file_paths) record_prefix = "record_info-{}-{}-{}".format( FLAGS.split, FLAGS.task, FLAGS.pass_id) record_name = format_filename( prefix=record_prefix, bsz_per_host=FLAGS.bsz_per_host, seq_len=FLAGS.seq_len, mask_alpha=FLAGS.mask_alpha, mask_beta=FLAGS.mask_beta, reuse_len=FLAGS.reuse_len, bi_data=FLAGS.bi_data, suffix="json", uncased=FLAGS.uncased, fixed_num_predict=FLAGS.num_predict) record_info_path = os.path.join(tfrecord_dir, record_name) with tf.gfile.Open(record_info_path, "w") as fp: json.dump(record_info, fp) def batchify(data, bsz_per_host, sent_ids=None): num_step = len(data) // bsz_per_host data = data[:bsz_per_host * num_step] data = data.reshape(bsz_per_host, num_step) if sent_ids is not None: sent_ids = sent_ids[:bsz_per_host * num_step] sent_ids = sent_ids.reshape(bsz_per_host, num_step) if sent_ids is not None: return data, sent_ids return data def _split_a_and_b(data, sent_ids, begin_idx, tot_len, extend_target=False): """Split two segments from `data` starting from the index `begin_idx`.""" data_len = data.shape[0] if begin_idx + tot_len >= data_len: tf.logging.info("[_split_a_and_b] returns None: " "begin_idx %d + tot_len %d >= data_len %d", begin_idx, tot_len, data_len) return None end_idx = begin_idx + 1 cut_points = [] while end_idx < data_len: if sent_ids[end_idx] != sent_ids[end_idx - 1]: if end_idx - begin_idx >= tot_len: break cut_points.append(end_idx) end_idx += 1 a_begin = begin_idx if len(cut_points) == 0 or random.random() < 0.5: label = 0 if len(cut_points) == 0: a_end = end_idx else: a_end = random.choice(cut_points) b_len = max(1, tot_len - (a_end - a_begin)) # (zihang): `data_len - 1` to account for extend_target b_begin = random.randint(0, data_len - 1 - b_len) b_end = b_begin + b_len while b_begin > 0 and sent_ids[b_begin - 1] == sent_ids[b_begin]: b_begin -= 1 # (zihang): `data_len - 1` to account for extend_target while b_end < data_len - 1 and sent_ids[b_end - 1] == sent_ids[b_end]: b_end += 1 new_begin = a_end else: label = 1 a_end = random.choice(cut_points) b_begin = a_end b_end = end_idx new_begin = b_end while a_end - a_begin + b_end - b_begin > tot_len: if a_end - a_begin > b_end - b_begin: # delete the right side only for the LM objective a_end -= 1 else: b_end -= 1 ret = [data[a_begin: a_end], data[b_begin: b_end], label, new_begin] if extend_target: if a_end >= data_len or b_end >= data_len: tf.logging.info("[_split_a_and_b] returns None: " "a_end %d or b_end %d >= data_len %d", a_end, b_end, data_len) return None a_target = data[a_begin + 1: a_end + 1] b_target = data[b_begin: b_end + 1] ret.extend([a_target, b_target]) return ret def _is_start_piece(piece): special_pieces = set(list('!"#$%&\"()*+,-./:;?@[\\]^_`{|}~')) if (piece.startswith("▁") or piece.startswith("<") or piece in special_pieces): return True else: return False def _sample_mask(sp, seg, reverse=False, max_gram=5, goal_num_predict=None): """Sample `goal_num_predict` tokens for partial prediction. About `mask_beta` tokens are chosen in a context of `mask_alpha` tokens.""" seg_len = len(seg) mask = np.array([False] * seg_len, dtype=np.bool) num_predict = 0 ngrams = np.arange(1, max_gram + 1, dtype=np.int64) pvals = 1. / np.arange(1, max_gram + 1) pvals /= pvals.sum(keepdims=True) if reverse: seg = np.flip(seg, 0) cur_len = 0 while cur_len < seg_len: if goal_num_predict is not None and num_predict >= goal_num_predict: break n = np.random.choice(ngrams, p=pvals) if goal_num_predict is not None: n = min(n, goal_num_predict - num_predict) ctx_size = (n * FLAGS.mask_alpha) // FLAGS.mask_beta l_ctx = np.random.choice(ctx_size) r_ctx = ctx_size - l_ctx # Find the start position of a complete token beg = cur_len + l_ctx while beg < seg_len and not _is_start_piece(sp.IdToPiece(seg[beg].item())): beg += 1 if beg >= seg_len: break # Find the end position of the n-gram (start pos of the n+1-th gram) end = beg + 1 cnt_ngram = 1 while end < seg_len: if _is_start_piece(sp.IdToPiece(seg[beg].item())): cnt_ngram += 1 if cnt_ngram > n: break end += 1 if end >= seg_len: break # Update mask[beg:end] = True num_predict += end - beg cur_len = end + r_ctx while goal_num_predict is not None and num_predict < goal_num_predict: i = np.random.randint(seg_len) if not mask[i]: mask[i] = True num_predict += 1 if reverse: mask = np.flip(mask, 0) return mask def create_tfrecords(save_dir, basename, data, bsz_per_host, seq_len, bi_data, sp): data, sent_ids = data[0], data[1] num_core = FLAGS.num_core_per_host bsz_per_core = bsz_per_host // num_core if bi_data: assert bsz_per_host % (2 * FLAGS.num_core_per_host) == 0 fwd_data, fwd_sent_ids = batchify(data, bsz_per_host // 2, sent_ids) fwd_data = fwd_data.reshape(num_core, 1, bsz_per_core // 2, -1) fwd_sent_ids = fwd_sent_ids.reshape(num_core, 1, bsz_per_core // 2, -1) bwd_data = fwd_data[:, :, :, ::-1] bwd_sent_ids = fwd_sent_ids[:, :, :, ::-1] data = np.concatenate( [fwd_data, bwd_data], 1).reshape(bsz_per_host, -1) sent_ids = np.concatenate( [fwd_sent_ids, bwd_sent_ids], 1).reshape(bsz_per_host, -1) else: data, sent_ids = batchify(data, bsz_per_host, sent_ids) tf.logging.info("Raw data shape %s.", data.shape) file_name = format_filename( prefix=basename, bsz_per_host=bsz_per_host, seq_len=seq_len, bi_data=bi_data, suffix="tfrecords", mask_alpha=FLAGS.mask_alpha, mask_beta=FLAGS.mask_beta, reuse_len=FLAGS.reuse_len, uncased=FLAGS.uncased, fixed_num_predict=FLAGS.num_predict ) save_path = os.path.join(save_dir, file_name) record_writer = tf.python_io.TFRecordWriter(save_path) tf.logging.info("Start writing %s.", save_path) num_batch = 0 reuse_len = FLAGS.reuse_len # [sep] x 2 + [cls] assert reuse_len < seq_len - 3 data_len = data.shape[1] sep_array = np.array([SEP_ID], dtype=np.int64) cls_array = np.array([CLS_ID], dtype=np.int64) i = 0 while i + seq_len <= data_len: if num_batch % 500 == 0: tf.logging.info("Processing batch %d", num_batch) all_ok = True features = [] for idx in range(bsz_per_host): inp = data[idx, i: i + reuse_len] tgt = data[idx, i + 1: i + reuse_len + 1] results = _split_a_and_b( data[idx], sent_ids[idx], begin_idx=i + reuse_len, tot_len=seq_len - reuse_len - 3, extend_target=True) if results is None: tf.logging.info("Break out with seq idx %d", i) all_ok = False break # unpack the results (a_data, b_data, label, _, a_target, b_target) = tuple(results) # sample ngram spans to predict reverse = bi_data and (idx // (bsz_per_core // 2)) % 2 == 1 if FLAGS.num_predict is None: num_predict_0 = num_predict_1 = None else: num_predict_1 = FLAGS.num_predict // 2 num_predict_0 = FLAGS.num_predict - num_predict_1 mask_0 = _sample_mask(sp, inp, reverse=reverse, goal_num_predict=num_predict_0) mask_1 = _sample_mask(sp, np.concatenate([a_data, sep_array, b_data, sep_array, cls_array]), reverse=reverse, goal_num_predict=num_predict_1) # concatenate data cat_data = np.concatenate([inp, a_data, sep_array, b_data, sep_array, cls_array]) seg_id = ([0] * (reuse_len + a_data.shape[0]) + [0] + [1] * b_data.shape[0] + [1] + [2]) assert cat_data.shape[0] == seq_len assert mask_0.shape[0] == seq_len // 2 assert mask_1.shape[0] == seq_len // 2 # the last two CLS's are not used, just for padding purposes tgt = np.concatenate([tgt, a_target, b_target, cls_array, cls_array]) assert tgt.shape[0] == seq_len is_masked = np.concatenate([mask_0, mask_1], 0) if FLAGS.num_predict is not None: assert np.sum(is_masked) == FLAGS.num_predict feature = { "input": _int64_feature(cat_data), "is_masked": _int64_feature(is_masked), "target": _int64_feature(tgt), "seg_id": _int64_feature(seg_id), "label": _int64_feature([label]), } features.append(feature) if all_ok: assert len(features) == bsz_per_host for feature in features: example = tf.train.Example(features=tf.train.Features(feature=feature)) record_writer.write(example.SerializeToString()) num_batch += 1 else: break i += reuse_len record_writer.close() tf.logging.info("Done writing %s. Num of batches: %d", save_path, num_batch) return save_path, num_batch ################ # get_input_fn # ################ def _convert_example(example, use_bfloat16): """Cast int64 into int32 and float32 to bfloat16 if use_bfloat16.""" for key in list(example.keys()): val = example[key] if tf.keras.backend.is_sparse(val): val = tf.sparse.to_dense(val) if val.dtype == tf.int64: val = tf.cast(val, tf.int32) if use_bfloat16 and val.dtype == tf.float32: val = tf.cast(val, tf.bfloat16) example[key] = val def parse_files_to_dataset(parser, file_names, split, num_batch, num_hosts, host_id, num_core_per_host, bsz_per_core): # list of file pathes num_files = len(file_names) num_files_per_host = num_files // num_hosts my_start_file_id = host_id * num_files_per_host my_end_file_id = (host_id + 1) * num_files_per_host if host_id == num_hosts - 1: my_end_file_id = num_files file_paths = file_names[my_start_file_id: my_end_file_id] tf.logging.info("Host %d handles %d files", host_id, len(file_paths)) assert split == "train" dataset = tf.data.Dataset.from_tensor_slices(file_paths) # file-level shuffle if len(file_paths) > 1: dataset = dataset.shuffle(len(file_paths)) # Note: we cannot perform sample-level shuffle here because this will violate # the consecutive requirement of data stream. dataset = tf.data.TFRecordDataset(dataset) # (zihang): since we are doing online preprocessing, the parsed result of # the same input at each time will be different. Thus, cache processed data # is not helpful. It will use a lot of memory and lead to contrainer OOM. # So, change to cache non-parsed raw data instead. dataset = dataset.cache().map(parser).repeat() dataset = dataset.batch(bsz_per_core, drop_remainder=True) dataset = dataset.prefetch(num_core_per_host * bsz_per_core) return dataset def _local_perm(inputs, targets, is_masked, perm_size, seq_len): """ Sample a permutation of the factorization order, and create an attention mask accordingly. Args: inputs: int64 Tensor in shape [seq_len], input ids. targets: int64 Tensor in shape [seq_len], target ids. is_masked: bool Tensor in shape [seq_len]. True means being selected for partial prediction. perm_size: the length of longest permutation. Could be set to be reuse_len. Should not be larger than reuse_len or there will be data leaks. seq_len: int, sequence length. """ # Generate permutation indices index = tf.range(seq_len, dtype=tf.int64) index = tf.transpose(tf.reshape(index, [-1, perm_size])) index = tf.random_shuffle(index) index = tf.reshape(tf.transpose(index), [-1]) # `perm_mask` and `target_mask` # non-functional tokens non_func_tokens = tf.logical_not(tf.logical_or( tf.equal(inputs, SEP_ID), tf.equal(inputs, CLS_ID))) non_mask_tokens = tf.logical_and(tf.logical_not(is_masked), non_func_tokens) masked_or_func_tokens = tf.logical_not(non_mask_tokens) # Set the permutation indices of non-masked (& non-funcional) tokens to the # smallest index (-1): # (1) they can be seen by all other positions # (2) they cannot see masked positions, so there won"t be information leak smallest_index = -tf.ones([seq_len], dtype=tf.int64) rev_index = tf.where(non_mask_tokens, smallest_index, index) # Create `target_mask`: non-funcional and maksed tokens # 1: use mask as input and have loss # 0: use token (or [SEP], [CLS]) as input and do not have loss target_tokens = tf.logical_and(masked_or_func_tokens, non_func_tokens) target_mask = tf.cast(target_tokens, tf.float32) # Create `perm_mask` # `target_tokens` cannot see themselves self_rev_index = tf.where(target_tokens, rev_index, rev_index + 1) # 1: cannot attend if i <= j and j is not non-masked (masked_or_func_tokens) # 0: can attend if i > j or j is non-masked perm_mask = tf.logical_and( self_rev_index[:, None] <= rev_index[None, :], masked_or_func_tokens) perm_mask = tf.cast(perm_mask, tf.float32) # new target: [next token] for LM and [curr token] (self) for PLM new_targets = tf.concat([inputs[0: 1], targets[: -1]], axis=0) # construct inputs_k inputs_k = inputs # construct inputs_q inputs_q = target_mask return perm_mask, new_targets, target_mask, inputs_k, inputs_q def get_dataset(params, num_hosts, num_core_per_host, split, file_names, num_batch, seq_len, reuse_len, perm_size, mask_alpha, mask_beta, use_bfloat16=False, num_predict=None): bsz_per_core = params["batch_size"] if num_hosts > 1: host_id = params["context"].current_host else: host_id = 0 #### Function used to parse tfrecord def parser(record): """function used to parse tfrecord.""" record_spec = { "input": tf.FixedLenFeature([seq_len], tf.int64), "target": tf.FixedLenFeature([seq_len], tf.int64), "seg_id": tf.FixedLenFeature([seq_len], tf.int64), "label": tf.FixedLenFeature([1], tf.int64), "is_masked": tf.FixedLenFeature([seq_len], tf.int64), } # retrieve serialized example example = tf.parse_single_example( serialized=record, features=record_spec) inputs = example.pop("input") target = example.pop("target") is_masked = tf.cast(example.pop("is_masked"), tf.bool) non_reuse_len = seq_len - reuse_len assert perm_size <= reuse_len and perm_size <= non_reuse_len perm_mask_0, target_0, target_mask_0, input_k_0, input_q_0 = _local_perm( inputs[:reuse_len], target[:reuse_len], is_masked[:reuse_len], perm_size, reuse_len) perm_mask_1, target_1, target_mask_1, input_k_1, input_q_1 = _local_perm( inputs[reuse_len:], target[reuse_len:], is_masked[reuse_len:], perm_size, non_reuse_len) perm_mask_0 = tf.concat([perm_mask_0, tf.ones([reuse_len, non_reuse_len])], axis=1) perm_mask_1 = tf.concat([tf.zeros([non_reuse_len, reuse_len]), perm_mask_1], axis=1) perm_mask = tf.concat([perm_mask_0, perm_mask_1], axis=0) target = tf.concat([target_0, target_1], axis=0) target_mask = tf.concat([target_mask_0, target_mask_1], axis=0) input_k = tf.concat([input_k_0, input_k_1], axis=0) input_q = tf.concat([input_q_0, input_q_1], axis=0) if num_predict is not None: indices = tf.range(seq_len, dtype=tf.int64) bool_target_mask = tf.cast(target_mask, tf.bool) indices = tf.boolean_mask(indices, bool_target_mask) ##### extra padding due to CLS/SEP introduced after prepro actual_num_predict = tf.shape(indices)[0] pad_len = num_predict - actual_num_predict ##### target_mapping target_mapping = tf.one_hot(indices, seq_len, dtype=tf.float32) paddings = tf.zeros([pad_len, seq_len], dtype=target_mapping.dtype) target_mapping = tf.concat([target_mapping, paddings], axis=0) example["target_mapping"] = tf.reshape(target_mapping, [num_predict, seq_len]) ##### target target = tf.boolean_mask(target, bool_target_mask) paddings = tf.zeros([pad_len], dtype=target.dtype) target = tf.concat([target, paddings], axis=0) example["target"] = tf.reshape(target, [num_predict]) ##### target mask target_mask = tf.concat( [tf.ones([actual_num_predict], dtype=tf.float32), tf.zeros([pad_len], dtype=tf.float32)], axis=0) example["target_mask"] = tf.reshape(target_mask, [num_predict]) else: example["target"] = tf.reshape(target, [seq_len]) example["target_mask"] = tf.reshape(target_mask, [seq_len]) # reshape back to fixed shape example["perm_mask"] = tf.reshape(perm_mask, [seq_len, seq_len]) example["input_k"] = tf.reshape(input_k, [seq_len]) example["input_q"] = tf.reshape(input_q, [seq_len]) _convert_example(example, use_bfloat16) for k, v in example.items(): tf.logging.info("%s: %s", k, v) return example # Get dataset dataset = parse_files_to_dataset( parser=parser, file_names=file_names, split=split, num_batch=num_batch, num_hosts=num_hosts, host_id=host_id, num_core_per_host=num_core_per_host, bsz_per_core=bsz_per_core) return dataset def get_input_fn( tfrecord_dir, split, bsz_per_host, seq_len, reuse_len, bi_data, num_hosts=1, num_core_per_host=1, perm_size=None, mask_alpha=None, mask_beta=None, uncased=False, num_passes=None, use_bfloat16=False, num_predict=None): # Merge all record infos into a single one record_glob_base = format_filename( prefix="record_info-{}-*".format(split), bsz_per_host=bsz_per_host, seq_len=seq_len, bi_data=bi_data, suffix="json", mask_alpha=mask_alpha, mask_beta=mask_beta, reuse_len=reuse_len, uncased=uncased, fixed_num_predict=num_predict) record_info = {"num_batch": 0, "filenames": []} tfrecord_dirs = tfrecord_dir.split(",") tf.logging.info("Use the following tfrecord dirs: %s", tfrecord_dirs) for idx, record_dir in enumerate(tfrecord_dirs): record_glob = os.path.join(record_dir, record_glob_base) tf.logging.info("[%d] Record glob: %s", idx, record_glob) record_paths = sorted(tf.gfile.Glob(record_glob)) tf.logging.info("[%d] Num of record info path: %d", idx, len(record_paths)) cur_record_info = {"num_batch": 0, "filenames": []} for record_info_path in record_paths: if num_passes is not None: record_info_name = os.path.basename(record_info_path) fields = record_info_name.split(".")[0].split("-") pass_id = int(fields[-1]) if len(fields) == 5 and pass_id >= num_passes: tf.logging.info("Skip pass %d: %s", pass_id, record_info_name) continue with tf.gfile.Open(record_info_path, "r") as fp: info = json.load(fp) if num_passes is not None: eff_num_passes = min(num_passes, len(info["filenames"])) ratio = eff_num_passes / len(info["filenames"]) cur_record_info["num_batch"] += int(info["num_batch"] * ratio) cur_record_info["filenames"] += info["filenames"][:eff_num_passes] else: cur_record_info["num_batch"] += info["num_batch"] cur_record_info["filenames"] += info["filenames"] # overwrite directory for `cur_record_info` new_filenames = [] for filename in cur_record_info["filenames"]: basename = os.path.basename(filename) new_filename = os.path.join(record_dir, basename) new_filenames.append(new_filename) cur_record_info["filenames"] = new_filenames tf.logging.info("[Dir %d] Number of chosen batches: %s", idx, cur_record_info["num_batch"]) tf.logging.info("[Dir %d] Number of chosen files: %s", idx, len(cur_record_info["filenames"])) tf.logging.info(cur_record_info["filenames"]) # add `cur_record_info` to global `record_info` record_info["num_batch"] += cur_record_info["num_batch"] record_info["filenames"] += cur_record_info["filenames"] tf.logging.info("Total number of batches: %d", record_info["num_batch"]) tf.logging.info("Total number of files: %d", len(record_info["filenames"])) tf.logging.info(record_info["filenames"]) def input_fn(params): """docs.""" assert params["batch_size"] * num_core_per_host == bsz_per_host dataset = get_dataset( params=params, num_hosts=num_hosts, num_core_per_host=num_core_per_host, split=split, file_names=record_info["filenames"], num_batch=record_info["num_batch"], seq_len=seq_len, reuse_len=reuse_len, perm_size=perm_size, mask_alpha=mask_alpha, mask_beta=mask_beta, use_bfloat16=use_bfloat16, num_predict=num_predict) return dataset return input_fn, record_info if __name__ == "__main__": FLAGS = flags.FLAGS flags.DEFINE_bool("use_tpu", True, help="whether to use TPUs") flags.DEFINE_integer("bsz_per_host", 32, help="batch size per host.") flags.DEFINE_integer("num_core_per_host", 8, help="num TPU cores per host.") flags.DEFINE_integer("seq_len", 512, help="Sequence length.") flags.DEFINE_integer("reuse_len", 256, help="Number of token that can be reused as memory. " "Could be half of `seq_len`.") flags.DEFINE_bool("uncased", True, help="Use uncased inputs or not.") flags.DEFINE_bool("bi_data", True, help="whether to create bidirectional data") flags.DEFINE_integer("mask_alpha", default=6, help="How many tokens to form a group.") flags.DEFINE_integer("mask_beta", default=1, help="How many tokens to mask within each group.") flags.DEFINE_bool("use_eod", True, help="whether to append EOD at the end of a doc.") flags.DEFINE_bool("from_raw_text", True, help="Whether the input is raw text or encoded ids.") flags.DEFINE_integer("num_predict", default=85, help="Num of tokens to predict.") flags.DEFINE_string("input_glob", "data/example/*.txt", help="Input file glob.") flags.DEFINE_string("sp_path", "", help="Path to the sentence piece model.") flags.DEFINE_string("save_dir", "proc_data/example", help="Directory for saving the processed data.") flags.DEFINE_enum("split", "train", ["train", "dev", "test"], help="Save the data as which split.") flags.DEFINE_integer("pass_id", 0, help="ID of the current pass." "Different passes sample different negative segment.") flags.DEFINE_integer("num_task", 1, help="Number of total tasks.") flags.DEFINE_integer("task", 0, help="The Task ID. This value is used when " "using multiple workers to identify each worker.") tf.logging.set_verbosity(tf.logging.INFO) tf.app.run(create_data)
ymcui/Chinese-XLNet
1,650
Pre-Trained Chinese XLNet(中文XLNet预训练模型)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
src/function_builder.py
Python
"""doc.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import os import tensorflow as tf import modeling import xlnet def construct_scalar_host_call( monitor_dict, model_dir, prefix="", reduce_fn=None): """ Construct host calls to monitor training progress on TPUs. """ metric_names = list(monitor_dict.keys()) def host_call_fn(global_step, *args): """actual host call function.""" step = global_step[0] with tf.contrib.summary.create_file_writer( logdir=model_dir, filename_suffix=".host_call").as_default(): with tf.contrib.summary.always_record_summaries(): for i, name in enumerate(metric_names): if reduce_fn is None: scalar = args[i][0] else: scalar = reduce_fn(args[i]) with tf.contrib.summary.record_summaries_every_n_global_steps( 100, global_step=step): tf.contrib.summary.scalar(prefix + name, scalar, step=step) return tf.contrib.summary.all_summary_ops() global_step_tensor = tf.reshape(tf.train.get_or_create_global_step(), [1]) other_tensors = [tf.reshape(monitor_dict[key], [1]) for key in metric_names] return host_call_fn, [global_step_tensor] + other_tensors def two_stream_loss(FLAGS, features, labels, mems, is_training): """Pretraining loss with two-stream attention Transformer-XL.""" #### Unpack input mem_name = "mems" mems = mems.get(mem_name, None) inp_k = tf.transpose(features["input_k"], [1, 0]) inp_q = tf.transpose(features["input_q"], [1, 0]) seg_id = tf.transpose(features["seg_id"], [1, 0]) inp_mask = None perm_mask = tf.transpose(features["perm_mask"], [1, 2, 0]) if FLAGS.num_predict is not None: # [num_predict x tgt_len x bsz] target_mapping = tf.transpose(features["target_mapping"], [1, 2, 0]) else: target_mapping = None # target for LM loss tgt = tf.transpose(features["target"], [1, 0]) # target mask for LM loss tgt_mask = tf.transpose(features["target_mask"], [1, 0]) # construct xlnet config and save to model_dir xlnet_config = xlnet.XLNetConfig(FLAGS=FLAGS) xlnet_config.to_json(os.path.join(FLAGS.model_dir, "config.json")) # construct run config from FLAGS run_config = xlnet.create_run_config(is_training, False, FLAGS) xlnet_model = xlnet.XLNetModel( xlnet_config=xlnet_config, run_config=run_config, input_ids=inp_k, seg_ids=seg_id, input_mask=inp_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, inp_q=inp_q) output = xlnet_model.get_sequence_output() new_mems = {mem_name: xlnet_model.get_new_memory()} lookup_table = xlnet_model.get_embedding_table() initializer = xlnet_model.get_initializer() with tf.variable_scope("model", reuse=tf.AUTO_REUSE): # LM loss lm_loss = modeling.lm_loss( hidden=output, target=tgt, n_token=xlnet_config.n_token, d_model=xlnet_config.d_model, initializer=initializer, lookup_table=lookup_table, tie_weight=True, bi_data=run_config.bi_data, use_tpu=run_config.use_tpu) #### Quantity to monitor monitor_dict = {} if FLAGS.use_bfloat16: tgt_mask = tf.cast(tgt_mask, tf.float32) lm_loss = tf.cast(lm_loss, tf.float32) total_loss = tf.reduce_sum(lm_loss * tgt_mask) / tf.reduce_sum(tgt_mask) monitor_dict["total_loss"] = total_loss return total_loss, new_mems, monitor_dict def get_loss(FLAGS, features, labels, mems, is_training): """Pretraining loss with two-stream attention Transformer-XL.""" if FLAGS.use_bfloat16: with tf.tpu.bfloat16_scope(): return two_stream_loss(FLAGS, features, labels, mems, is_training) else: return two_stream_loss(FLAGS, features, labels, mems, is_training) def get_classification_loss( FLAGS, features, n_class, is_training): """Loss for downstream classification tasks.""" bsz_per_core = tf.shape(features["input_ids"])[0] inp = tf.transpose(features["input_ids"], [1, 0]) seg_id = tf.transpose(features["segment_ids"], [1, 0]) inp_mask = tf.transpose(features["input_mask"], [1, 0]) label = tf.reshape(features["label_ids"], [bsz_per_core]) xlnet_config = xlnet.XLNetConfig(json_path=FLAGS.model_config_path) run_config = xlnet.create_run_config(is_training, True, FLAGS) xlnet_model = xlnet.XLNetModel( xlnet_config=xlnet_config, run_config=run_config, input_ids=inp, seg_ids=seg_id, input_mask=inp_mask) summary = xlnet_model.get_pooled_out(FLAGS.summary_type, FLAGS.use_summ_proj) with tf.variable_scope("model", reuse=tf.AUTO_REUSE): if FLAGS.cls_scope is not None and FLAGS.cls_scope: cls_scope = "classification_{}".format(FLAGS.cls_scope) else: cls_scope = "classification_{}".format(FLAGS.task_name.lower()) per_example_loss, logits = modeling.classification_loss( hidden=summary, labels=label, n_class=n_class, initializer=xlnet_model.get_initializer(), scope=cls_scope, return_logits=True) total_loss = tf.reduce_mean(per_example_loss) return total_loss, per_example_loss, logits def get_regression_loss( FLAGS, features, is_training): """Loss for downstream regression tasks.""" bsz_per_core = tf.shape(features["input_ids"])[0] inp = tf.transpose(features["input_ids"], [1, 0]) seg_id = tf.transpose(features["segment_ids"], [1, 0]) inp_mask = tf.transpose(features["input_mask"], [1, 0]) label = tf.reshape(features["label_ids"], [bsz_per_core]) xlnet_config = xlnet.XLNetConfig(json_path=FLAGS.model_config_path) run_config = xlnet.create_run_config(is_training, True, FLAGS) xlnet_model = xlnet.XLNetModel( xlnet_config=xlnet_config, run_config=run_config, input_ids=inp, seg_ids=seg_id, input_mask=inp_mask) summary = xlnet_model.get_pooled_out(FLAGS.summary_type, FLAGS.use_summ_proj) with tf.variable_scope("model", reuse=tf.AUTO_REUSE): per_example_loss, logits = modeling.regression_loss( hidden=summary, labels=label, initializer=xlnet_model.get_initializer(), scope="regression_{}".format(FLAGS.task_name.lower()), return_logits=True) total_loss = tf.reduce_mean(per_example_loss) return total_loss, per_example_loss, logits def get_qa_outputs(FLAGS, features, is_training): """Loss for downstream span-extraction QA tasks such as SQuAD.""" inp = tf.transpose(features["input_ids"], [1, 0]) seg_id = tf.transpose(features["segment_ids"], [1, 0]) inp_mask = tf.transpose(features["input_mask"], [1, 0]) seq_len = tf.shape(inp)[0] xlnet_config = xlnet.XLNetConfig(json_path=FLAGS.model_config_path) run_config = xlnet.create_run_config(is_training, True, FLAGS) xlnet_model = xlnet.XLNetModel( xlnet_config=xlnet_config, run_config=run_config, input_ids=inp, seg_ids=seg_id, input_mask=inp_mask) output = xlnet_model.get_sequence_output() initializer = xlnet_model.get_initializer() return_dict = {} # invalid position mask such as query and special symbols (PAD, SEP, CLS) p_mask = features["p_mask"] # logit of the start position with tf.variable_scope("start_logits"): start_logits = tf.layers.dense( output, 1, kernel_initializer=initializer) start_logits = tf.transpose(tf.squeeze(start_logits, -1), [1, 0]) start_logits_masked = start_logits * (1 - p_mask) - 1e30 * p_mask start_log_probs = tf.nn.log_softmax(start_logits_masked, -1) # logit of the end position with tf.variable_scope("end_logits"): if is_training: # during training, compute the end logits based on the # ground truth of the start position start_positions = tf.reshape(features["start_positions"], [-1]) start_index = tf.one_hot(start_positions, depth=seq_len, axis=-1, dtype=tf.float32) start_features = tf.einsum("lbh,bl->bh", output, start_index) start_features = tf.tile(start_features[None], [seq_len, 1, 1]) end_logits = tf.layers.dense( tf.concat([output, start_features], axis=-1), xlnet_config.d_model, kernel_initializer=initializer, activation=tf.tanh, name="dense_0") end_logits = tf.contrib.layers.layer_norm( end_logits, begin_norm_axis=-1) end_logits = tf.layers.dense( end_logits, 1, kernel_initializer=initializer, name="dense_1") end_logits = tf.transpose(tf.squeeze(end_logits, -1), [1, 0]) end_logits_masked = end_logits * (1 - p_mask) - 1e30 * p_mask end_log_probs = tf.nn.log_softmax(end_logits_masked, -1) else: # during inference, compute the end logits based on beam search start_top_log_probs, start_top_index = tf.nn.top_k( start_log_probs, k=FLAGS.start_n_top) start_index = tf.one_hot(start_top_index, depth=seq_len, axis=-1, dtype=tf.float32) start_features = tf.einsum("lbh,bkl->bkh", output, start_index) end_input = tf.tile(output[:, :, None], [1, 1, FLAGS.start_n_top, 1]) start_features = tf.tile(start_features[None], [seq_len, 1, 1, 1]) end_input = tf.concat([end_input, start_features], axis=-1) end_logits = tf.layers.dense( end_input, xlnet_config.d_model, kernel_initializer=initializer, activation=tf.tanh, name="dense_0") end_logits = tf.contrib.layers.layer_norm(end_logits, begin_norm_axis=-1) end_logits = tf.layers.dense( end_logits, 1, kernel_initializer=initializer, name="dense_1") end_logits = tf.reshape(end_logits, [seq_len, -1, FLAGS.start_n_top]) end_logits = tf.transpose(end_logits, [1, 2, 0]) end_logits_masked = end_logits * ( 1 - p_mask[:, None]) - 1e30 * p_mask[:, None] end_log_probs = tf.nn.log_softmax(end_logits_masked, -1) end_top_log_probs, end_top_index = tf.nn.top_k( end_log_probs, k=FLAGS.end_n_top) end_top_log_probs = tf.reshape( end_top_log_probs, [-1, FLAGS.start_n_top * FLAGS.end_n_top]) end_top_index = tf.reshape( end_top_index, [-1, FLAGS.start_n_top * FLAGS.end_n_top]) if is_training: return_dict["start_log_probs"] = start_log_probs return_dict["end_log_probs"] = end_log_probs else: return_dict["start_top_log_probs"] = start_top_log_probs return_dict["start_top_index"] = start_top_index return_dict["end_top_log_probs"] = end_top_log_probs return_dict["end_top_index"] = end_top_index return return_dict def get_race_loss(FLAGS, features, is_training): """Loss for downstream multi-choice QA tasks such as RACE.""" bsz_per_core = tf.shape(features["input_ids"])[0] def _transform_features(feature): out = tf.reshape(feature, [bsz_per_core, 4, -1]) out = tf.transpose(out, [2, 0, 1]) out = tf.reshape(out, [-1, bsz_per_core * 4]) return out inp = _transform_features(features["input_ids"]) seg_id = _transform_features(features["segment_ids"]) inp_mask = _transform_features(features["input_mask"]) label = tf.reshape(features["label_ids"], [bsz_per_core]) xlnet_config = xlnet.XLNetConfig(json_path=FLAGS.model_config_path) run_config = xlnet.create_run_config(is_training, True, FLAGS) xlnet_model = xlnet.XLNetModel( xlnet_config=xlnet_config, run_config=run_config, input_ids=inp, seg_ids=seg_id, input_mask=inp_mask) summary = xlnet_model.get_pooled_out(FLAGS.summary_type, FLAGS.use_summ_proj) with tf.variable_scope("logits"): logits = tf.layers.dense(summary, 1, kernel_initializer=xlnet_model.get_initializer()) logits = tf.reshape(logits, [bsz_per_core, 4]) one_hot_target = tf.one_hot(label, 4) per_example_loss = -tf.reduce_sum( tf.nn.log_softmax(logits) * one_hot_target, -1) total_loss = tf.reduce_mean(per_example_loss) return total_loss, per_example_loss, logits
ymcui/Chinese-XLNet
1,650
Pre-Trained Chinese XLNet(中文XLNet预训练模型)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
src/gpu_utils.py
Python
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import tensorflow as tf def assign_to_gpu(gpu=0, ps_dev="/device:CPU:0"): def _assign(op): node_def = op if isinstance(op, tf.NodeDef) else op.node_def if node_def.op == "Variable": return ps_dev else: return "/gpu:%d" % gpu return _assign def average_grads_and_vars(tower_grads_and_vars): def average_dense(grad_and_vars): if len(grad_and_vars) == 1: return grad_and_vars[0][0] grad = grad_and_vars[0][0] for g, _ in grad_and_vars[1:]: grad += g return grad / len(grad_and_vars) def average_sparse(grad_and_vars): if len(grad_and_vars) == 1: return grad_and_vars[0][0] indices = [] values = [] for g, _ in grad_and_vars: indices += [g.indices] values += [g.values] indices = tf.concat(indices, 0) values = tf.concat(values, 0) / len(grad_and_vars) return tf.IndexedSlices(values, indices, grad_and_vars[0][0].dense_shape) average_grads_and_vars = [] for grad_and_vars in zip(*tower_grads_and_vars): if grad_and_vars[0][0] is None: grad = None elif isinstance(grad_and_vars[0][0], tf.IndexedSlices): grad = average_sparse(grad_and_vars) else: grad = average_dense(grad_and_vars) # Keep in mind that the Variables are redundant because they are shared # across towers. So .. we will just return the first tower's pointer to # the Variable. v = grad_and_vars[0][1] grad_and_var = (grad, v) average_grads_and_vars.append(grad_and_var) return average_grads_and_vars def load_from_checkpoint(saver, logdir): sess = tf.get_default_session() ckpt = tf.train.get_checkpoint_state(logdir) if ckpt and ckpt.model_checkpoint_path: if os.path.isabs(ckpt.model_checkpoint_path): # Restores from checkpoint with absolute path. saver.restore(sess, ckpt.model_checkpoint_path) else: # Restores from checkpoint with relative path. saver.restore(sess, os.path.join(logdir, ckpt.model_checkpoint_path)) return True return False
ymcui/Chinese-XLNet
1,650
Pre-Trained Chinese XLNet(中文XLNet预训练模型)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
src/model_utils.py
Python
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import os import re import numpy as np import six from os.path import join from six.moves import zip from absl import flags import tensorflow as tf def configure_tpu(FLAGS): if FLAGS.use_tpu: tpu_cluster = tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) master = tpu_cluster.get_master() else: tpu_cluster = None master = FLAGS.master session_config = tf.ConfigProto(allow_soft_placement=True) # Uncomment the following line if you hope to monitor GPU RAM growth # session_config.gpu_options.allow_growth = True if FLAGS.use_tpu: strategy = None tf.logging.info('Use TPU without distribute strategy.') elif FLAGS.num_core_per_host == 1: strategy = None tf.logging.info('Single device mode.') else: strategy = tf.contrib.distribute.MirroredStrategy( num_gpus=FLAGS.num_core_per_host) tf.logging.info('Use MirroredStrategy with %d devices.', strategy.num_replicas_in_sync) per_host_input = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( master=master, model_dir=FLAGS.model_dir, session_config=session_config, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations, num_shards=FLAGS.num_hosts * FLAGS.num_core_per_host, per_host_input_for_training=per_host_input), keep_checkpoint_max=FLAGS.max_save, save_checkpoints_secs=None, save_checkpoints_steps=FLAGS.save_steps, train_distribute=strategy ) return run_config def init_from_checkpoint(FLAGS, global_vars=False): tvars = tf.global_variables() if global_vars else tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if FLAGS.init_checkpoint is not None: if FLAGS.init_checkpoint.endswith("latest"): ckpt_dir = os.path.dirname(FLAGS.init_checkpoint) init_checkpoint = tf.train.latest_checkpoint(ckpt_dir) else: init_checkpoint = FLAGS.init_checkpoint tf.logging.info("Initialize from the ckpt {}".format(init_checkpoint)) (assignment_map, initialized_variable_names ) = get_assignment_map_from_checkpoint(tvars, init_checkpoint) if FLAGS.use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) # Log customized initialization tf.logging.info("**** Global Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) return scaffold_fn def get_train_op(FLAGS, total_loss, grads_and_vars=None): global_step = tf.train.get_or_create_global_step() # increase the learning rate linearly if FLAGS.warmup_steps > 0: warmup_lr = (tf.cast(global_step, tf.float32) / tf.cast(FLAGS.warmup_steps, tf.float32) * FLAGS.learning_rate) else: warmup_lr = 0.0 # decay the learning rate if FLAGS.decay_method == "poly": decay_lr = tf.train.polynomial_decay( FLAGS.learning_rate, global_step=global_step - FLAGS.warmup_steps, decay_steps=FLAGS.train_steps - FLAGS.warmup_steps, end_learning_rate=FLAGS.learning_rate * FLAGS.min_lr_ratio) elif FLAGS.decay_method == "cos": decay_lr = tf.train.cosine_decay( FLAGS.learning_rate, global_step=global_step - FLAGS.warmup_steps, decay_steps=FLAGS.train_steps - FLAGS.warmup_steps, alpha=FLAGS.min_lr_ratio) else: raise ValueError(FLAGS.decay_method) learning_rate = tf.where(global_step < FLAGS.warmup_steps, warmup_lr, decay_lr) if (FLAGS.weight_decay > 0 and not FLAGS.use_tpu and FLAGS.num_core_per_host > 1): raise ValueError("Do not support `weight_decay > 0` with multi-gpu " "training so far.") if FLAGS.weight_decay == 0: optimizer = tf.train.AdamOptimizer( learning_rate=learning_rate, epsilon=FLAGS.adam_epsilon) else: optimizer = AdamWeightDecayOptimizer( learning_rate=learning_rate, epsilon=FLAGS.adam_epsilon, exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"], weight_decay_rate=FLAGS.weight_decay) if FLAGS.use_tpu: optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer) if grads_and_vars is None: grads_and_vars = optimizer.compute_gradients(total_loss) gradients, variables = zip(*grads_and_vars) clipped, gnorm = tf.clip_by_global_norm(gradients, FLAGS.clip) if getattr(FLAGS, "lr_layer_decay_rate", 1.0) != 1.0: n_layer = 0 for i in range(len(clipped)): m = re.search(r"model/transformer/layer_(\d+?)/", variables[i].name) if not m: continue n_layer = max(n_layer, int(m.group(1)) + 1) for i in range(len(clipped)): for l in range(n_layer): if "model/transformer/layer_{}/".format(l) in variables[i].name: abs_rate = FLAGS.lr_layer_decay_rate ** (n_layer - 1 - l) clipped[i] *= abs_rate tf.logging.info("Apply mult {:.4f} to layer-{} grad of {}".format( abs_rate, l, variables[i].name)) break train_op = optimizer.apply_gradients( zip(clipped, variables), global_step=global_step) # Manually increment `global_step` for AdamWeightDecayOptimizer if FLAGS.weight_decay > 0: new_global_step = global_step + 1 train_op = tf.group(train_op, [global_step.assign(new_global_step)]) return train_op, learning_rate, gnorm def clean_ckpt(_): input_ckpt = FLAGS.clean_input_ckpt output_model_dir = FLAGS.clean_output_model_dir tf.reset_default_graph() var_list = tf.contrib.framework.list_variables(input_ckpt) var_values, var_dtypes = {}, {} for (name, shape) in var_list: if not name.startswith("global_step") and "adam" not in name.lower(): var_values[name] = None tf.logging.info("Include {}".format(name)) else: tf.logging.info("Exclude {}".format(name)) tf.logging.info("Loading from {}".format(input_ckpt)) reader = tf.contrib.framework.load_checkpoint(input_ckpt) for name in var_values: tensor = reader.get_tensor(name) var_dtypes[name] = tensor.dtype var_values[name] = tensor with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE): tf_vars = [ tf.get_variable(v, shape=var_values[v].shape, dtype=var_dtypes[v]) for v in var_values ] placeholders = [tf.placeholder(v.dtype, shape=v.shape) for v in tf_vars] assign_ops = [tf.assign(v, p) for (v, p) in zip(tf_vars, placeholders)] global_step = tf.Variable( 0, name="global_step", trainable=False, dtype=tf.int64) saver = tf.train.Saver(tf.all_variables()) if not tf.gfile.Exists(output_model_dir): tf.gfile.MakeDirs(output_model_dir) # Build a model consisting only of variables, set them to the average values. with tf.Session() as sess: sess.run(tf.initialize_all_variables()) for p, assign_op, (name, value) in zip(placeholders, assign_ops, six.iteritems(var_values)): sess.run(assign_op, {p: value}) # Use the built saver to save the averaged checkpoint. saver.save(sess, join(output_model_dir, "model.ckpt"), global_step=global_step) def avg_checkpoints(model_dir, output_model_dir, last_k): tf.reset_default_graph() checkpoint_state = tf.train.get_checkpoint_state(model_dir) checkpoints = checkpoint_state.all_model_checkpoint_paths[- last_k:] var_list = tf.contrib.framework.list_variables(checkpoints[0]) var_values, var_dtypes = {}, {} for (name, shape) in var_list: if not name.startswith("global_step"): var_values[name] = np.zeros(shape) for checkpoint in checkpoints: reader = tf.contrib.framework.load_checkpoint(checkpoint) for name in var_values: tensor = reader.get_tensor(name) var_dtypes[name] = tensor.dtype var_values[name] += tensor tf.logging.info("Read from checkpoint %s", checkpoint) for name in var_values: # Average. var_values[name] /= len(checkpoints) with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE): tf_vars = [ tf.get_variable(v, shape=var_values[v].shape, dtype=var_dtypes[v]) for v in var_values ] placeholders = [tf.placeholder(v.dtype, shape=v.shape) for v in tf_vars] assign_ops = [tf.assign(v, p) for (v, p) in zip(tf_vars, placeholders)] global_step = tf.Variable( 0, name="global_step", trainable=False, dtype=tf.int64) saver = tf.train.Saver(tf.all_variables()) # Build a model consisting only of variables, set them to the average values. with tf.Session() as sess: sess.run(tf.initialize_all_variables()) for p, assign_op, (name, value) in zip(placeholders, assign_ops, six.iteritems(var_values)): sess.run(assign_op, {p: value}) # Use the built saver to save the averaged checkpoint. saver.save(sess, join(output_model_dir, "model.ckpt"), global_step=global_step) def get_assignment_map_from_checkpoint(tvars, init_checkpoint): """Compute the union of the current variables and checkpoint variables.""" assignment_map = {} initialized_variable_names = {} name_to_variable = collections.OrderedDict() for var in tvars: name = var.name m = re.match("^(.*):\\d+$", name) if m is not None: name = m.group(1) name_to_variable[name] = var init_vars = tf.train.list_variables(init_checkpoint) assignment_map = collections.OrderedDict() for x in init_vars: (name, var) = (x[0], x[1]) # tf.logging.info('original name: %s', name) if name not in name_to_variable: continue # assignment_map[name] = name assignment_map[name] = name_to_variable[name] initialized_variable_names[name] = 1 initialized_variable_names[name + ":0"] = 1 return (assignment_map, initialized_variable_names) class AdamWeightDecayOptimizer(tf.train.Optimizer): """A basic Adam optimizer that includes "correct" L2 weight decay.""" def __init__(self, learning_rate, weight_decay_rate=0.0, beta_1=0.9, beta_2=0.999, epsilon=1e-6, exclude_from_weight_decay=None, include_in_weight_decay=["r_s_bias", "r_r_bias", "r_w_bias"], name="AdamWeightDecayOptimizer"): """Constructs a AdamWeightDecayOptimizer.""" super(AdamWeightDecayOptimizer, self).__init__(False, name) self.learning_rate = learning_rate self.weight_decay_rate = weight_decay_rate self.beta_1 = beta_1 self.beta_2 = beta_2 self.epsilon = epsilon self.exclude_from_weight_decay = exclude_from_weight_decay self.include_in_weight_decay = include_in_weight_decay def apply_gradients(self, grads_and_vars, global_step=None, name=None): """See base class.""" assignments = [] for (grad, param) in grads_and_vars: if grad is None or param is None: continue param_name = self._get_variable_name(param.name) m = tf.get_variable( name=param_name + "/adam_m", shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer()) v = tf.get_variable( name=param_name + "/adam_v", shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer()) # Standard Adam update. next_m = ( tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad)) next_v = ( tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2, tf.square(grad))) update = next_m / (tf.sqrt(next_v) + self.epsilon) # Just adding the square of the weights to the loss function is *not* # the correct way of using L2 regularization/weight decay with Adam, # since that will interact with the m and v parameters in strange ways. # # Instead we want ot decay the weights in a manner that doesn't interact # with the m/v parameters. This is equivalent to adding the square # of the weights to the loss with plain (non-momentum) SGD. if self._do_use_weight_decay(param_name): update += self.weight_decay_rate * param update_with_lr = self.learning_rate * update next_param = param - update_with_lr assignments.extend( [param.assign(next_param), m.assign(next_m), v.assign(next_v)]) return tf.group(*assignments, name=name) def _do_use_weight_decay(self, param_name): """Whether to use L2 weight decay for `param_name`.""" if not self.weight_decay_rate: return False for r in self.include_in_weight_decay: if re.search(r, param_name) is not None: return True if self.exclude_from_weight_decay: for r in self.exclude_from_weight_decay: if re.search(r, param_name) is not None: tf.logging.info('Adam WD excludes {}'.format(param_name)) return False return True def _get_variable_name(self, param_name): """Get the variable name from the tensor name.""" m = re.match("^(.*):\\d+$", param_name) if m is not None: param_name = m.group(1) return param_name if __name__ == "__main__": flags.DEFINE_string("clean_input_ckpt", "", "input ckpt for cleaning") flags.DEFINE_string("clean_output_model_dir", "", "output dir for cleaned ckpt") FLAGS = flags.FLAGS tf.app.run(clean_ckpt)
ymcui/Chinese-XLNet
1,650
Pre-Trained Chinese XLNet(中文XLNet预训练模型)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
src/modeling.py
Python
from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf def gelu(x): """Gaussian Error Linear Unit. This is a smoother version of the RELU. Original paper: https://arxiv.org/abs/1606.08415 Args: x: float Tensor to perform activation. Returns: `x` with the GELU activation applied. """ cdf = 0.5 * (1.0 + tf.tanh( (np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3))))) return x * cdf def embedding_lookup(x, n_token, d_embed, initializer, use_tpu=True, scope='embedding', reuse=None, dtype=tf.float32): """TPU and GPU embedding_lookup function.""" with tf.variable_scope(scope, reuse=reuse): lookup_table = tf.get_variable('lookup_table', [n_token, d_embed], dtype=dtype, initializer=initializer) if use_tpu: one_hot_idx = tf.one_hot(x, n_token, dtype=dtype) if one_hot_idx.shape.ndims == 2: return tf.einsum('in,nd->id', one_hot_idx, lookup_table), lookup_table else: return tf.einsum('ibn,nd->ibd', one_hot_idx, lookup_table), lookup_table else: return tf.nn.embedding_lookup(lookup_table, x), lookup_table def positional_embedding(pos_seq, inv_freq, bsz=None): sinusoid_inp = tf.einsum('i,d->id', pos_seq, inv_freq) pos_emb = tf.concat([tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)], -1) pos_emb = pos_emb[:, None, :] if bsz is not None: pos_emb = tf.tile(pos_emb, [1, bsz, 1]) return pos_emb def positionwise_ffn(inp, d_model, d_inner, dropout, kernel_initializer, activation_type='relu', scope='ff', is_training=True, reuse=None): """Position-wise Feed-forward Network.""" if activation_type == 'relu': activation = tf.nn.relu elif activation_type == 'gelu': activation = gelu else: raise ValueError('Unsupported activation type {}'.format(activation_type)) output = inp with tf.variable_scope(scope, reuse=reuse): output = tf.layers.dense(output, d_inner, activation=activation, kernel_initializer=kernel_initializer, name='layer_1') output = tf.layers.dropout(output, dropout, training=is_training, name='drop_1') output = tf.layers.dense(output, d_model, kernel_initializer=kernel_initializer, name='layer_2') output = tf.layers.dropout(output, dropout, training=is_training, name='drop_2') output = tf.contrib.layers.layer_norm(output + inp, begin_norm_axis=-1, scope='LayerNorm') return output def head_projection(h, d_model, n_head, d_head, kernel_initializer, name): """Project hidden states to a specific head with a 4D-shape.""" proj_weight = tf.get_variable('{}/kernel'.format(name), [d_model, n_head, d_head], dtype=h.dtype, initializer=kernel_initializer) head = tf.einsum('ibh,hnd->ibnd', h, proj_weight) return head def post_attention(h, attn_vec, d_model, n_head, d_head, dropout, is_training, kernel_initializer, residual=True): """Post-attention processing.""" # post-attention projection (back to `d_model`) proj_o = tf.get_variable('o/kernel', [d_model, n_head, d_head], dtype=h.dtype, initializer=kernel_initializer) attn_out = tf.einsum('ibnd,hnd->ibh', attn_vec, proj_o) attn_out = tf.layers.dropout(attn_out, dropout, training=is_training) if residual: output = tf.contrib.layers.layer_norm(attn_out + h, begin_norm_axis=-1, scope='LayerNorm') else: output = tf.contrib.layers.layer_norm(attn_out, begin_norm_axis=-1, scope='LayerNorm') return output def abs_attn_core(q_head, k_head, v_head, attn_mask, dropatt, is_training, scale): """Core absolute positional attention operations.""" attn_score = tf.einsum('ibnd,jbnd->ijbn', q_head, k_head) attn_score *= scale if attn_mask is not None: attn_score = attn_score - 1e30 * attn_mask # attention probability attn_prob = tf.nn.softmax(attn_score, 1) attn_prob = tf.layers.dropout(attn_prob, dropatt, training=is_training) # attention output attn_vec = tf.einsum('ijbn,jbnd->ibnd', attn_prob, v_head) return attn_vec def rel_attn_core(q_head, k_head_h, v_head_h, k_head_r, seg_embed, seg_mat, r_w_bias, r_r_bias, r_s_bias, attn_mask, dropatt, is_training, scale): """Core relative positional attention operations.""" # content based attention score ac = tf.einsum('ibnd,jbnd->ijbn', q_head + r_w_bias, k_head_h) # position based attention score bd = tf.einsum('ibnd,jbnd->ijbn', q_head + r_r_bias, k_head_r) bd = rel_shift(bd, klen=tf.shape(ac)[1]) # segment based attention score if seg_mat is None: ef = 0 else: ef = tf.einsum('ibnd,snd->ibns', q_head + r_s_bias, seg_embed) ef = tf.einsum('ijbs,ibns->ijbn', seg_mat, ef) # merge attention scores and perform masking attn_score = (ac + bd + ef) * scale if attn_mask is not None: # attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask attn_score = attn_score - 1e30 * attn_mask # attention probability attn_prob = tf.nn.softmax(attn_score, 1) attn_prob = tf.layers.dropout(attn_prob, dropatt, training=is_training) # attention output attn_vec = tf.einsum('ijbn,jbnd->ibnd', attn_prob, v_head_h) return attn_vec def rel_shift(x, klen=-1): """perform relative shift to form the relative attention score.""" x_size = tf.shape(x) x = tf.reshape(x, [x_size[1], x_size[0], x_size[2], x_size[3]]) x = tf.slice(x, [1, 0, 0, 0], [-1, -1, -1, -1]) x = tf.reshape(x, [x_size[0], x_size[1] - 1, x_size[2], x_size[3]]) x = tf.slice(x, [0, 0, 0, 0], [-1, klen, -1, -1]) return x def _create_mask(qlen, mlen, dtype=tf.float32, same_length=False): """create causal attention mask.""" attn_mask = tf.ones([qlen, qlen], dtype=dtype) mask_u = tf.matrix_band_part(attn_mask, 0, -1) mask_dia = tf.matrix_band_part(attn_mask, 0, 0) attn_mask_pad = tf.zeros([qlen, mlen], dtype=dtype) ret = tf.concat([attn_mask_pad, mask_u - mask_dia], 1) if same_length: mask_l = tf.matrix_band_part(attn_mask, -1, 0) ret = tf.concat([ret[:, :qlen] + mask_l - mask_dia, ret[:, qlen:]], 1) return ret def _cache_mem(curr_out, prev_mem, mem_len, reuse_len=None): """cache hidden states into memory.""" if mem_len is None or mem_len == 0: return None else: if reuse_len is not None and reuse_len > 0: curr_out = curr_out[:reuse_len] if prev_mem is None: new_mem = curr_out[-mem_len:] else: new_mem = tf.concat([prev_mem, curr_out], 0)[-mem_len:] return tf.stop_gradient(new_mem) def relative_positional_encoding(qlen, klen, d_model, clamp_len, attn_type, bi_data, bsz=None, dtype=None): """create relative positional encoding.""" freq_seq = tf.range(0, d_model, 2.0) if dtype is not None and dtype != tf.float32: freq_seq = tf.cast(freq_seq, dtype=dtype) inv_freq = 1 / (10000 ** (freq_seq / d_model)) if attn_type == 'bi': # beg, end = klen - 1, -qlen beg, end = klen, -qlen elif attn_type == 'uni': # beg, end = klen - 1, -1 beg, end = klen, -1 else: raise ValueError('Unknown `attn_type` {}.'.format(attn_type)) if bi_data: fwd_pos_seq = tf.range(beg, end, -1.0) bwd_pos_seq = tf.range(-beg, -end, 1.0) if dtype is not None and dtype != tf.float32: fwd_pos_seq = tf.cast(fwd_pos_seq, dtype=dtype) bwd_pos_seq = tf.cast(bwd_pos_seq, dtype=dtype) if clamp_len > 0: fwd_pos_seq = tf.clip_by_value(fwd_pos_seq, -clamp_len, clamp_len) bwd_pos_seq = tf.clip_by_value(bwd_pos_seq, -clamp_len, clamp_len) if bsz is not None: # With bi_data, the batch size should be divisible by 2. assert bsz%2 == 0 fwd_pos_emb = positional_embedding(fwd_pos_seq, inv_freq, bsz//2) bwd_pos_emb = positional_embedding(bwd_pos_seq, inv_freq, bsz//2) else: fwd_pos_emb = positional_embedding(fwd_pos_seq, inv_freq) bwd_pos_emb = positional_embedding(bwd_pos_seq, inv_freq) pos_emb = tf.concat([fwd_pos_emb, bwd_pos_emb], axis=1) else: fwd_pos_seq = tf.range(beg, end, -1.0) if dtype is not None and dtype != tf.float32: fwd_pos_seq = tf.cast(fwd_pos_seq, dtype=dtype) if clamp_len > 0: fwd_pos_seq = tf.clip_by_value(fwd_pos_seq, -clamp_len, clamp_len) pos_emb = positional_embedding(fwd_pos_seq, inv_freq, bsz) return pos_emb def multihead_attn(q, k, v, attn_mask, d_model, n_head, d_head, dropout, dropatt, is_training, kernel_initializer, residual=True, scope='abs_attn', reuse=None): """Standard multi-head attention with absolute positional embedding.""" scale = 1 / (d_head ** 0.5) with tf.variable_scope(scope, reuse=reuse): # attention heads q_head = head_projection( q, d_model, n_head, d_head, kernel_initializer, 'q') k_head = head_projection( k, d_model, n_head, d_head, kernel_initializer, 'k') v_head = head_projection( v, d_model, n_head, d_head, kernel_initializer, 'v') # attention vector attn_vec = abs_attn_core(q_head, k_head, v_head, attn_mask, dropatt, is_training, scale) # post processing output = post_attention(v, attn_vec, d_model, n_head, d_head, dropout, is_training, kernel_initializer, residual) return output def rel_multihead_attn(h, r, r_w_bias, r_r_bias, seg_mat, r_s_bias, seg_embed, attn_mask, mems, d_model, n_head, d_head, dropout, dropatt, is_training, kernel_initializer, scope='rel_attn', reuse=None): """Multi-head attention with relative positional encoding.""" scale = 1 / (d_head ** 0.5) with tf.variable_scope(scope, reuse=reuse): if mems is not None and mems.shape.ndims > 1: cat = tf.concat([mems, h], 0) else: cat = h # content heads q_head_h = head_projection( h, d_model, n_head, d_head, kernel_initializer, 'q') k_head_h = head_projection( cat, d_model, n_head, d_head, kernel_initializer, 'k') v_head_h = head_projection( cat, d_model, n_head, d_head, kernel_initializer, 'v') # positional heads k_head_r = head_projection( r, d_model, n_head, d_head, kernel_initializer, 'r') # core attention ops attn_vec = rel_attn_core( q_head_h, k_head_h, v_head_h, k_head_r, seg_embed, seg_mat, r_w_bias, r_r_bias, r_s_bias, attn_mask, dropatt, is_training, scale) # post processing output = post_attention(h, attn_vec, d_model, n_head, d_head, dropout, is_training, kernel_initializer) return output def two_stream_rel_attn(h, g, r, mems, r_w_bias, r_r_bias, seg_mat, r_s_bias, seg_embed, attn_mask_h, attn_mask_g, target_mapping, d_model, n_head, d_head, dropout, dropatt, is_training, kernel_initializer, scope='rel_attn'): """Two-stream attention with relative positional encoding.""" scale = 1 / (d_head ** 0.5) with tf.variable_scope(scope, reuse=False): # content based attention score if mems is not None and mems.shape.ndims > 1: cat = tf.concat([mems, h], 0) else: cat = h # content-based key head k_head_h = head_projection( cat, d_model, n_head, d_head, kernel_initializer, 'k') # content-based value head v_head_h = head_projection( cat, d_model, n_head, d_head, kernel_initializer, 'v') # position-based key head k_head_r = head_projection( r, d_model, n_head, d_head, kernel_initializer, 'r') ##### h-stream # content-stream query head q_head_h = head_projection( h, d_model, n_head, d_head, kernel_initializer, 'q') # core attention ops attn_vec_h = rel_attn_core( q_head_h, k_head_h, v_head_h, k_head_r, seg_embed, seg_mat, r_w_bias, r_r_bias, r_s_bias, attn_mask_h, dropatt, is_training, scale) # post processing output_h = post_attention(h, attn_vec_h, d_model, n_head, d_head, dropout, is_training, kernel_initializer) with tf.variable_scope(scope, reuse=True): ##### g-stream # query-stream query head q_head_g = head_projection( g, d_model, n_head, d_head, kernel_initializer, 'q') # core attention ops if target_mapping is not None: q_head_g = tf.einsum('mbnd,mlb->lbnd', q_head_g, target_mapping) attn_vec_g = rel_attn_core( q_head_g, k_head_h, v_head_h, k_head_r, seg_embed, seg_mat, r_w_bias, r_r_bias, r_s_bias, attn_mask_g, dropatt, is_training, scale) attn_vec_g = tf.einsum('lbnd,mlb->mbnd', attn_vec_g, target_mapping) else: attn_vec_g = rel_attn_core( q_head_g, k_head_h, v_head_h, k_head_r, seg_embed, seg_mat, r_w_bias, r_r_bias, r_s_bias, attn_mask_g, dropatt, is_training, scale) # post processing output_g = post_attention(g, attn_vec_g, d_model, n_head, d_head, dropout, is_training, kernel_initializer) return output_h, output_g def transformer_xl(inp_k, n_token, n_layer, d_model, n_head, d_head, d_inner, dropout, dropatt, attn_type, bi_data, initializer, is_training, mem_len=None, inp_q=None, mems=None, same_length=False, clamp_len=-1, untie_r=False, use_tpu=True, input_mask=None, perm_mask=None, seg_id=None, reuse_len=None, ff_activation='relu', target_mapping=None, use_bfloat16=False, scope='transformer', **kwargs): """ Defines a Transformer-XL computation graph with additional support for XLNet. Args: inp_k: int32 Tensor in shape [len, bsz], the input token IDs. seg_id: int32 Tensor in shape [len, bsz], the input segment IDs. input_mask: float32 Tensor in shape [len, bsz], the input mask. 0 for real tokens and 1 for padding. mems: a list of float32 Tensors in shape [mem_len, bsz, d_model], memory from previous batches. The length of the list equals n_layer. If None, no memory is used. perm_mask: float32 Tensor in shape [len, len, bsz]. If perm_mask[i, j, k] = 0, i attend to j in batch k; if perm_mask[i, j, k] = 1, i does not attend to j in batch k. If None, each position attends to all the others. target_mapping: float32 Tensor in shape [num_predict, len, bsz]. If target_mapping[i, j, k] = 1, the i-th predict in batch k is on the j-th token. Only used during pretraining for partial prediction. Set to None during finetuning. inp_q: float32 Tensor in shape [len, bsz]. 1 for tokens with losses and 0 for tokens without losses. Only used during pretraining for two-stream attention. Set to None during finetuning. n_layer: int, the number of layers. d_model: int, the hidden size. n_head: int, the number of attention heads. d_head: int, the dimension size of each attention head. d_inner: int, the hidden size in feed-forward layers. ff_activation: str, "relu" or "gelu". untie_r: bool, whether to untie the biases in attention. n_token: int, the vocab size. is_training: bool, whether in training mode. use_tpu: bool, whether TPUs are used. use_bfloat16: bool, use bfloat16 instead of float32. dropout: float, dropout rate. dropatt: float, dropout rate on attention probabilities. init: str, the initialization scheme, either "normal" or "uniform". init_range: float, initialize the parameters with a uniform distribution in [-init_range, init_range]. Only effective when init="uniform". init_std: float, initialize the parameters with a normal distribution with mean 0 and stddev init_std. Only effective when init="normal". mem_len: int, the number of tokens to cache. reuse_len: int, the number of tokens in the currect batch to be cached and reused in the future. bi_data: bool, whether to use bidirectional input pipeline. Usually set to True during pretraining and False during finetuning. clamp_len: int, clamp all relative distances larger than clamp_len. -1 means no clamping. same_length: bool, whether to use the same attention length for each token. summary_type: str, "last", "first", "mean", or "attn". The method to pool the input to get a vector representation. initializer: A tf initializer. scope: scope name for the computation graph. """ tf.logging.info('memory input {}'.format(mems)) tf_float = tf.bfloat16 if use_bfloat16 else tf.float32 tf.logging.info('Use float type {}'.format(tf_float)) new_mems = [] with tf.variable_scope(scope): if untie_r: r_w_bias = tf.get_variable('r_w_bias', [n_layer, n_head, d_head], dtype=tf_float, initializer=initializer) r_r_bias = tf.get_variable('r_r_bias', [n_layer, n_head, d_head], dtype=tf_float, initializer=initializer) else: r_w_bias = tf.get_variable('r_w_bias', [n_head, d_head], dtype=tf_float, initializer=initializer) r_r_bias = tf.get_variable('r_r_bias', [n_head, d_head], dtype=tf_float, initializer=initializer) bsz = tf.shape(inp_k)[1] qlen = tf.shape(inp_k)[0] mlen = tf.shape(mems[0])[0] if mems is not None else 0 klen = mlen + qlen ##### Attention mask # causal attention mask if attn_type == 'uni': attn_mask = _create_mask(qlen, mlen, tf_float, same_length) attn_mask = attn_mask[:, :, None, None] elif attn_type == 'bi': attn_mask = None else: raise ValueError('Unsupported attention type: {}'.format(attn_type)) # data mask: input mask & perm mask if input_mask is not None and perm_mask is not None: data_mask = input_mask[None] + perm_mask elif input_mask is not None and perm_mask is None: data_mask = input_mask[None] elif input_mask is None and perm_mask is not None: data_mask = perm_mask else: data_mask = None if data_mask is not None: # all mems can be attended to mems_mask = tf.zeros([tf.shape(data_mask)[0], mlen, bsz], dtype=tf_float) data_mask = tf.concat([mems_mask, data_mask], 1) if attn_mask is None: attn_mask = data_mask[:, :, :, None] else: attn_mask += data_mask[:, :, :, None] if attn_mask is not None: attn_mask = tf.cast(attn_mask > 0, dtype=tf_float) if attn_mask is not None: non_tgt_mask = -tf.eye(qlen, dtype=tf_float) non_tgt_mask = tf.concat([tf.zeros([qlen, mlen], dtype=tf_float), non_tgt_mask], axis=-1) non_tgt_mask = tf.cast((attn_mask + non_tgt_mask[:, :, None, None]) > 0, dtype=tf_float) else: non_tgt_mask = None ##### Word embedding word_emb_k, lookup_table = embedding_lookup( x=inp_k, n_token=n_token, d_embed=d_model, initializer=initializer, use_tpu=use_tpu, dtype=tf_float, scope='word_embedding') if inp_q is not None: with tf.variable_scope('mask_emb'): mask_emb = tf.get_variable('mask_emb', [1, 1, d_model], dtype=tf_float) if target_mapping is not None: word_emb_q = tf.tile(mask_emb, [tf.shape(target_mapping)[0], bsz, 1]) else: inp_q_ext = inp_q[:, :, None] word_emb_q = inp_q_ext * mask_emb + (1 - inp_q_ext) * word_emb_k output_h = tf.layers.dropout(word_emb_k, dropout, training=is_training) if inp_q is not None: output_g = tf.layers.dropout(word_emb_q, dropout, training=is_training) ##### Segment embedding if seg_id is not None: if untie_r: r_s_bias = tf.get_variable('r_s_bias', [n_layer, n_head, d_head], dtype=tf_float, initializer=initializer) else: # default case (tie) r_s_bias = tf.get_variable('r_s_bias', [n_head, d_head], dtype=tf_float, initializer=initializer) seg_embed = tf.get_variable('seg_embed', [n_layer, 2, n_head, d_head], dtype=tf_float, initializer=initializer) # Convert `seg_id` to one-hot `seg_mat` mem_pad = tf.zeros([mlen, bsz], dtype=tf.int32) cat_ids = tf.concat([mem_pad, seg_id], 0) # `1` indicates not in the same segment [qlen x klen x bsz] seg_mat = tf.cast( tf.logical_not(tf.equal(seg_id[:, None], cat_ids[None, :])), tf.int32) seg_mat = tf.one_hot(seg_mat, 2, dtype=tf_float) else: seg_mat = None ##### Positional encoding pos_emb = relative_positional_encoding( qlen, klen, d_model, clamp_len, attn_type, bi_data, bsz=bsz, dtype=tf_float) pos_emb = tf.layers.dropout(pos_emb, dropout, training=is_training) ##### Attention layers if mems is None: mems = [None] * n_layer for i in range(n_layer): # cache new mems new_mems.append(_cache_mem(output_h, mems[i], mem_len, reuse_len)) # segment bias if seg_id is None: r_s_bias_i = None seg_embed_i = None else: r_s_bias_i = r_s_bias if not untie_r else r_s_bias[i] seg_embed_i = seg_embed[i] with tf.variable_scope('layer_{}'.format(i)): if inp_q is not None: output_h, output_g = two_stream_rel_attn( h=output_h, g=output_g, r=pos_emb, r_w_bias=r_w_bias if not untie_r else r_w_bias[i], r_r_bias=r_r_bias if not untie_r else r_r_bias[i], seg_mat=seg_mat, r_s_bias=r_s_bias_i, seg_embed=seg_embed_i, attn_mask_h=non_tgt_mask, attn_mask_g=attn_mask, mems=mems[i], target_mapping=target_mapping, d_model=d_model, n_head=n_head, d_head=d_head, dropout=dropout, dropatt=dropatt, is_training=is_training, kernel_initializer=initializer) reuse = True else: reuse = False output_h = rel_multihead_attn( h=output_h, r=pos_emb, r_w_bias=r_w_bias if not untie_r else r_w_bias[i], r_r_bias=r_r_bias if not untie_r else r_r_bias[i], seg_mat=seg_mat, r_s_bias=r_s_bias_i, seg_embed=seg_embed_i, attn_mask=non_tgt_mask, mems=mems[i], d_model=d_model, n_head=n_head, d_head=d_head, dropout=dropout, dropatt=dropatt, is_training=is_training, kernel_initializer=initializer, reuse=reuse) if inp_q is not None: output_g = positionwise_ffn( inp=output_g, d_model=d_model, d_inner=d_inner, dropout=dropout, kernel_initializer=initializer, activation_type=ff_activation, is_training=is_training) output_h = positionwise_ffn( inp=output_h, d_model=d_model, d_inner=d_inner, dropout=dropout, kernel_initializer=initializer, activation_type=ff_activation, is_training=is_training, reuse=reuse) if inp_q is not None: output = tf.layers.dropout(output_g, dropout, training=is_training) else: output = tf.layers.dropout(output_h, dropout, training=is_training) return output, new_mems, lookup_table def lm_loss(hidden, target, n_token, d_model, initializer, lookup_table=None, tie_weight=False, bi_data=True, use_tpu=False): """doc.""" with tf.variable_scope('lm_loss'): if tie_weight: assert lookup_table is not None, \ 'lookup_table cannot be None for tie_weight' softmax_w = lookup_table else: softmax_w = tf.get_variable('weight', [n_token, d_model], dtype=hidden.dtype, initializer=initializer) softmax_b = tf.get_variable('bias', [n_token], dtype=hidden.dtype, initializer=tf.zeros_initializer()) logits = tf.einsum('ibd,nd->ibn', hidden, softmax_w) + softmax_b if use_tpu: one_hot_target = tf.one_hot(target, n_token, dtype=logits.dtype) loss = -tf.reduce_sum(tf.nn.log_softmax(logits) * one_hot_target, -1) else: loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=logits) return loss def summarize_sequence(summary_type, hidden, d_model, n_head, d_head, dropout, dropatt, input_mask, is_training, initializer, scope=None, reuse=None, use_proj=True): """ Different classification tasks may not may not share the same parameters to summarize the sequence features. If shared, one can keep the `scope` to the default value `None`. Otherwise, one should specify a different `scope` for each task. """ with tf.variable_scope(scope, 'sequnece_summary', reuse=reuse): if summary_type == 'last': summary = hidden[-1] elif summary_type == 'first': summary = hidden[0] elif summary_type == 'mean': summary = tf.reduce_mean(hidden, axis=0) elif summary_type == 'attn': bsz = tf.shape(hidden)[1] summary_bias = tf.get_variable('summary_bias', [d_model], dtype=hidden.dtype, initializer=initializer) summary_bias = tf.tile(summary_bias[None, None], [1, bsz, 1]) if input_mask is not None: input_mask = input_mask[None, :, :, None] summary = multihead_attn(summary_bias, hidden, hidden, input_mask, d_model, n_head, d_head, dropout, dropatt, is_training, initializer, residual=False) summary = summary[0] else: raise ValueError('Unsupported summary type {}'.format(summary_type)) # use another projection as in BERT if use_proj: summary = tf.layers.dense( summary, d_model, activation=tf.tanh, kernel_initializer=initializer, name='summary') # dropout summary = tf.layers.dropout( summary, dropout, training=is_training, name='dropout') return summary def classification_loss(hidden, labels, n_class, initializer, scope, reuse=None, return_logits=False): """ Different classification tasks should use different scope names to ensure different dense layers (parameters) are used to produce the logits. An exception will be in transfer learning, where one hopes to transfer the classification weights. """ with tf.variable_scope(scope, reuse=reuse): logits = tf.layers.dense( hidden, n_class, kernel_initializer=initializer, name='logit') one_hot_target = tf.one_hot(labels, n_class, dtype=hidden.dtype) loss = -tf.reduce_sum(tf.nn.log_softmax(logits) * one_hot_target, -1) if return_logits: return loss, logits return loss def regression_loss(hidden, labels, initializer, scope, reuse=None, return_logits=False): with tf.variable_scope(scope, reuse=reuse): logits = tf.layers.dense( hidden, 1, kernel_initializer=initializer, name='logit') logits = tf.squeeze(logits, axis=-1) loss = tf.square(logits - labels) if return_logits: return loss, logits return loss
ymcui/Chinese-XLNet
1,650
Pre-Trained Chinese XLNet(中文XLNet预训练模型)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
src/prepro_utils.py
Python
# coding=utf-8 from __future__ import absolute_import from __future__ import division from __future__ import print_function import unicodedata import six from functools import partial SPIECE_UNDERLINE = '▁' def printable_text(text): """Returns text encoded in a way suitable for print or `tf.logging`.""" # These functions want `str` for both Python2 and Python3, but in one case # it's a Unicode string and in the other it's a byte string. if six.PY3: if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode("utf-8", "ignore") else: raise ValueError("Unsupported string type: %s" % (type(text))) elif six.PY2: if isinstance(text, str): return text elif isinstance(text, unicode): return text.encode("utf-8") else: raise ValueError("Unsupported string type: %s" % (type(text))) else: raise ValueError("Not running on Python2 or Python 3?") def print_(*args): new_args = [] for arg in args: if isinstance(arg, list): s = [printable_text(i) for i in arg] s = ' '.join(s) new_args.append(s) else: new_args.append(printable_text(arg)) print(*new_args) def preprocess_text(inputs, lower=False, remove_space=True, keep_accents=False): if remove_space: outputs = ' '.join(inputs.strip().split()) else: outputs = inputs outputs = outputs.replace("``", '"').replace("''", '"') if six.PY2 and isinstance(outputs, str): outputs = outputs.decode('utf-8') if not keep_accents: outputs = unicodedata.normalize('NFKD', outputs) outputs = ''.join([c for c in outputs if not unicodedata.combining(c)]) if lower: outputs = outputs.lower() return outputs def encode_pieces(sp_model, text, return_unicode=True, sample=False): # return_unicode is used only for py2 # note(zhiliny): in some systems, sentencepiece only accepts str for py2 if six.PY2 and isinstance(text, unicode): text = text.encode('utf-8') if not sample: pieces = sp_model.EncodeAsPieces(text) else: pieces = sp_model.SampleEncodeAsPieces(text, 64, 0.1) new_pieces = [] for piece in pieces: if len(piece) > 1 and piece[-1] == ',' and piece[-2].isdigit(): cur_pieces = sp_model.EncodeAsPieces( piece[:-1].replace(SPIECE_UNDERLINE, '')) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0]) == 1: cur_pieces = cur_pieces[1:] else: cur_pieces[0] = cur_pieces[0][1:] cur_pieces.append(piece[-1]) new_pieces.extend(cur_pieces) else: new_pieces.append(piece) # note(zhiliny): convert back to unicode for py2 if six.PY2 and return_unicode: ret_pieces = [] for piece in new_pieces: if isinstance(piece, str): piece = piece.decode('utf-8') ret_pieces.append(piece) new_pieces = ret_pieces return new_pieces def encode_ids(sp_model, text, sample=False): pieces = encode_pieces(sp_model, text, return_unicode=False, sample=sample) ids = [sp_model.PieceToId(piece) for piece in pieces] return ids if __name__ == '__main__': import sentencepiece as spm sp = spm.SentencePieceProcessor() sp.load('sp10m.uncased.v3.model') print_(u'I was born in 2000, and this is falsé.') print_(u'ORIGINAL', sp.EncodeAsPieces(u'I was born in 2000, and this is falsé.')) print_(u'OURS', encode_pieces(sp, u'I was born in 2000, and this is falsé.')) print(encode_ids(sp, u'I was born in 2000, and this is falsé.')) print_('') prepro_func = partial(preprocess_text, lower=True) print_(prepro_func('I was born in 2000, and this is falsé.')) print_('ORIGINAL', sp.EncodeAsPieces(prepro_func('I was born in 2000, and this is falsé.'))) print_('OURS', encode_pieces(sp, prepro_func('I was born in 2000, and this is falsé.'))) print(encode_ids(sp, prepro_func('I was born in 2000, and this is falsé.'))) print_('') print_('I was born in 2000, and this is falsé.') print_('ORIGINAL', sp.EncodeAsPieces('I was born in 2000, and this is falsé.')) print_('OURS', encode_pieces(sp, 'I was born in 2000, and this is falsé.')) print(encode_ids(sp, 'I was born in 2000, and this is falsé.')) print_('') print_('I was born in 92000, and this is falsé.') print_('ORIGINAL', sp.EncodeAsPieces('I was born in 92000, and this is falsé.')) print_('OURS', encode_pieces(sp, 'I was born in 92000, and this is falsé.')) print(encode_ids(sp, 'I was born in 92000, and this is falsé.'))
ymcui/Chinese-XLNet
1,650
Pre-Trained Chinese XLNet(中文XLNet预训练模型)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
src/run_classifier.py
Python
from __future__ import absolute_import from __future__ import division from __future__ import print_function from os.path import join from absl import flags import os import sys import csv import collections import numpy as np import time import math import json import random from copy import copy from collections import defaultdict as dd import absl.logging as _logging # pylint: disable=unused-import import tensorflow as tf import sentencepiece as spm from data_utils import SEP_ID, VOCAB_SIZE, CLS_ID import model_utils import function_builder from classifier_utils import PaddingInputExample from classifier_utils import convert_single_example from prepro_utils import preprocess_text, encode_ids # Model flags.DEFINE_string("model_config_path", default=None, help="Model config path.") flags.DEFINE_float("dropout", default=0.1, help="Dropout rate.") flags.DEFINE_float("dropatt", default=0.1, help="Attention dropout rate.") flags.DEFINE_integer("clamp_len", default=-1, help="Clamp length") flags.DEFINE_string("summary_type", default="last", help="Method used to summarize a sequence into a compact vector.") flags.DEFINE_bool("use_summ_proj", default=True, help="Whether to use projection for summarizing sequences.") flags.DEFINE_bool("use_bfloat16", False, help="Whether to use bfloat16.") # Parameter initialization flags.DEFINE_enum("init", default="normal", enum_values=["normal", "uniform"], help="Initialization method.") flags.DEFINE_float("init_std", default=0.02, help="Initialization std when init is normal.") flags.DEFINE_float("init_range", default=0.1, help="Initialization std when init is uniform.") # I/O paths flags.DEFINE_bool("overwrite_data", default=False, help="If False, will use cached data if available.") flags.DEFINE_string("init_checkpoint", default=None, help="checkpoint path for initializing the model. " "Could be a pretrained model or a finetuned model.") flags.DEFINE_string("output_dir", default="", help="Output dir for TF records.") flags.DEFINE_string("spiece_model_file", default="", help="Sentence Piece model path.") flags.DEFINE_string("model_dir", default="", help="Directory for saving the finetuned model.") flags.DEFINE_string("data_dir", default="", help="Directory for input data.") # TPUs and machines flags.DEFINE_bool("use_tpu", default=False, help="whether to use TPU.") flags.DEFINE_integer("num_hosts", default=1, help="How many TPU hosts.") flags.DEFINE_integer("num_core_per_host", default=8, help="8 for TPU v2 and v3-8, 16 for larger TPU v3 pod. In the context " "of GPU training, it refers to the number of GPUs used.") flags.DEFINE_string("tpu_job_name", default=None, help="TPU worker job name.") flags.DEFINE_string("tpu", default=None, help="TPU name.") flags.DEFINE_string("tpu_zone", default=None, help="TPU zone.") flags.DEFINE_string("gcp_project", default=None, help="gcp project.") flags.DEFINE_string("master", default=None, help="master") flags.DEFINE_integer("iterations", default=1000, help="number of iterations per TPU training loop.") # training flags.DEFINE_bool("do_train", default=False, help="whether to do training") flags.DEFINE_integer("train_steps", default=1000, help="Number of training steps") flags.DEFINE_integer("num_train_epochs", default=0, help="Number of training steps") flags.DEFINE_integer("warmup_steps", default=0, help="number of warmup steps") flags.DEFINE_float("learning_rate", default=1e-5, help="initial learning rate") flags.DEFINE_float("lr_layer_decay_rate", 1.0, "Top layer: lr[L] = FLAGS.learning_rate." "Low layer: lr[l-1] = lr[l] * lr_layer_decay_rate.") flags.DEFINE_float("min_lr_ratio", default=0.0, help="min lr ratio for cos decay.") flags.DEFINE_float("clip", default=1.0, help="Gradient clipping") flags.DEFINE_integer("max_save", default=0, help="Max number of checkpoints to save. Use 0 to save all.") flags.DEFINE_integer("save_steps", default=None, help="Save the model for every save_steps. " "If None, not to save any model.") flags.DEFINE_integer("train_batch_size", default=8, help="Batch size for training") flags.DEFINE_float("weight_decay", default=0.00, help="Weight decay rate") flags.DEFINE_float("adam_epsilon", default=1e-8, help="Adam epsilon") flags.DEFINE_string("decay_method", default="poly", help="poly or cos") # evaluation flags.DEFINE_bool("do_eval", default=False, help="whether to do eval") flags.DEFINE_bool("do_predict", default=False, help="whether to do prediction") flags.DEFINE_float("predict_threshold", default=0, help="Threshold for binary prediction.") flags.DEFINE_string("eval_split", default="dev", help="could be dev or test") flags.DEFINE_integer("eval_batch_size", default=128, help="batch size for evaluation") flags.DEFINE_integer("predict_batch_size", default=128, help="batch size for prediction.") flags.DEFINE_string("predict_dir", default=None, help="Dir for saving prediction files.") flags.DEFINE_bool("eval_all_ckpt", default=False, help="Eval all ckpts. If False, only evaluate the last one.") flags.DEFINE_string("predict_ckpt", default=None, help="Ckpt path for do_predict. If None, use the last one.") # task specific flags.DEFINE_string("task_name", default=None, help="Task name") flags.DEFINE_integer("max_seq_length", default=128, help="Max sequence length") flags.DEFINE_integer("shuffle_buffer", default=2048, help="Buffer size used for shuffle.") flags.DEFINE_integer("num_passes", default=1, help="Num passes for processing training data. " "This is use to batch data without loss for TPUs.") flags.DEFINE_bool("uncased", default=False, help="Use uncased.") flags.DEFINE_string("cls_scope", default=None, help="Classifier layer scope.") flags.DEFINE_bool("is_regression", default=False, help="Whether it's a regression task.") FLAGS = flags.FLAGS class InputExample(object): """A single training/test example for simple sequence classification.""" def __init__(self, guid, text_a, text_b=None, label=None): """Constructs a InputExample. Args: guid: Unique id for the example. text_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. text_b: (Optional) string. The untokenized text of the second sequence. Only must be specified for sequence pair tasks. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.text_a = text_a self.text_b = text_b self.label = label class DataProcessor(object): """Base class for data converters for sequence classification data sets.""" def get_train_examples(self, data_dir): """Gets a collection of `InputExample`s for the train set.""" raise NotImplementedError() def get_dev_examples(self, data_dir): """Gets a collection of `InputExample`s for the dev set.""" raise NotImplementedError() def get_test_examples(self, data_dir): """Gets a collection of `InputExample`s for prediction.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with tf.gfile.Open(input_file, "r") as f: reader = csv.reader(f, delimiter="\t", quotechar=quotechar) lines = [] for line in reader: if len(line) == 0: continue lines.append(line) return lines class GLUEProcessor(DataProcessor): def __init__(self): self.train_file = "train.tsv" self.dev_file = "dev.tsv" self.test_file = "test.tsv" self.label_column = None self.text_a_column = None self.text_b_column = None self.contains_header = True self.test_text_a_column = None self.test_text_b_column = None self.test_contains_header = True def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, self.train_file)), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, self.dev_file)), "dev") def get_test_examples(self, data_dir): """See base class.""" if self.test_text_a_column is None: self.test_text_a_column = self.text_a_column if self.test_text_b_column is None: self.test_text_b_column = self.text_b_column return self._create_examples( self._read_tsv(os.path.join(data_dir, self.test_file)), "test") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0 and self.contains_header and set_type != "test": continue if i == 0 and self.test_contains_header and set_type == "test": continue guid = "%s-%s" % (set_type, i) a_column = (self.text_a_column if set_type != "test" else self.test_text_a_column) b_column = (self.text_b_column if set_type != "test" else self.test_text_b_column) # there are some incomplete lines in QNLI if len(line) <= a_column: tf.logging.warning('Incomplete line, ignored.') continue text_a = line[a_column] if b_column is not None: if len(line) <= b_column: tf.logging.warning('Incomplete line, ignored.') continue text_b = line[b_column] else: text_b = None if set_type == "test": label = self.get_labels()[0] else: if len(line) <= self.label_column: tf.logging.warning('Incomplete line, ignored.') continue label = line[self.label_column] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class Yelp5Processor(DataProcessor): def get_train_examples(self, data_dir): return self._create_examples(os.path.join(data_dir, "train.csv")) def get_dev_examples(self, data_dir): return self._create_examples(os.path.join(data_dir, "test.csv")) def get_labels(self): """See base class.""" return ["1", "2", "3", "4", "5"] def _create_examples(self, input_file): """Creates examples for the training and dev sets.""" examples = [] with tf.gfile.Open(input_file) as f: reader = csv.reader(f) for i, line in enumerate(reader): label = line[0] text_a = line[1].replace('""', '"').replace('\\"', '"') examples.append( InputExample(guid=str(i), text_a=text_a, text_b=None, label=label)) return examples class ImdbProcessor(DataProcessor): def get_labels(self): return ["neg", "pos"] def get_train_examples(self, data_dir): return self._create_examples(os.path.join(data_dir, "train")) def get_dev_examples(self, data_dir): return self._create_examples(os.path.join(data_dir, "test")) def _create_examples(self, data_dir): examples = [] for label in ["neg", "pos"]: cur_dir = os.path.join(data_dir, label) for filename in tf.gfile.ListDirectory(cur_dir): if not filename.endswith("txt"): continue path = os.path.join(cur_dir, filename) with tf.gfile.Open(path) as f: text = f.read().strip().replace("<br />", " ") examples.append(InputExample( guid="unused_id", text_a=text, text_b=None, label=label)) return examples class MnliMatchedProcessor(GLUEProcessor): def __init__(self): super(MnliMatchedProcessor, self).__init__() self.dev_file = "dev_matched.tsv" self.test_file = "test_matched.tsv" self.label_column = -1 self.text_a_column = 8 self.text_b_column = 9 def get_labels(self): return ["contradiction", "entailment", "neutral"] class XnliProcessor(DataProcessor): def __init__(self): self.language = "zh" def get_train_examples(self, data_dir, set_type="train"): """See base class.""" train_file = os.path.join(data_dir, "multinli", "multinli.train.%s.tsv" % self.language) lines = self._read_tsv(train_file) examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = line[0].replace(' ','') text_b = line[1].replace(' ','') label = line[2] if label == "contradictory": label = "contradiction" examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_devtest_examples(self, data_dir, set_type="dev"): """See base class.""" devtest_file = os.path.join(data_dir, "xnli."+set_type+".tsv") tf.logging.info("using file %s" % devtest_file) lines = self._read_tsv(devtest_file) examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) language = line[0] if language != self.language: continue text_a = line[6].replace(' ','') text_b = line[7].replace(' ','') label = line[1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"] class CSCProcessor(DataProcessor): def get_labels(self): return ["0", "1"] def get_train_examples(self, data_dir): set_type = "train" input_file = os.path.join(data_dir, set_type+".tsv") tf.logging.info("using file %s" % input_file) lines = self._read_tsv(input_file) examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = line[1] label = line[0] examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples def get_devtest_examples(self, data_dir, set_type="dev"): input_file = os.path.join(data_dir, set_type+".tsv") tf.logging.info("using file %s" % input_file) lines = self._read_tsv(input_file) examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = line[1] label = line[0] examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples class CSVProcessor(DataProcessor): def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with tf.gfile.Open(input_file, "r") as f: reader = csv.reader(f) lines = [] for line in reader: if len(line) == 0: continue lines.append(line) return lines def get_labels(self): return ["0", "1"] def get_train_examples(self, data_dir): set_type = "train" input_file = os.path.join(data_dir, set_type + ".csv") tf.logging.info("using file %s" % input_file) lines = self._read_tsv(input_file) examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = line[0] label = line[1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples def get_devtest_examples(self, data_dir, set_type="dev"): input_file = os.path.join(data_dir, set_type + ".csv") tf.logging.info("using file %s" % input_file) lines = self._read_tsv(input_file) examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = line[0] label = line[1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples class MnliMismatchedProcessor(MnliMatchedProcessor): def __init__(self): super(MnliMismatchedProcessor, self).__init__() self.dev_file = "dev_mismatched.tsv" self.test_file = "test_mismatched.tsv" class StsbProcessor(GLUEProcessor): def __init__(self): super(StsbProcessor, self).__init__() self.label_column = 9 self.text_a_column = 7 self.text_b_column = 8 def get_labels(self): return [0.0] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0 and self.contains_header and set_type != "test": continue if i == 0 and self.test_contains_header and set_type == "test": continue guid = "%s-%s" % (set_type, i) a_column = (self.text_a_column if set_type != "test" else self.test_text_a_column) b_column = (self.text_b_column if set_type != "test" else self.test_text_b_column) # there are some incomplete lines in QNLI if len(line) <= a_column: tf.logging.warning('Incomplete line, ignored.') continue text_a = line[a_column] if b_column is not None: if len(line) <= b_column: tf.logging.warning('Incomplete line, ignored.') continue text_b = line[b_column] else: text_b = None if set_type == "test": label = self.get_labels()[0] else: if len(line) <= self.label_column: tf.logging.warning('Incomplete line, ignored.') continue label = float(line[self.label_column]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def file_based_convert_examples_to_features( examples, label_list, max_seq_length, tokenize_fn, output_file, num_passes=1): """Convert a set of `InputExample`s to a TFRecord file.""" # do not create duplicated records if tf.gfile.Exists(output_file) and not FLAGS.overwrite_data: tf.logging.info("Do not overwrite tfrecord {} exists.".format(output_file)) return tf.logging.info("Create new tfrecord {}.".format(output_file)) writer = tf.python_io.TFRecordWriter(output_file) if num_passes > 1: examples *= num_passes for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: tf.logging.info("Writing example {} of {}".format(ex_index, len(examples))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenize_fn) def create_int_feature(values): f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) return f def create_float_feature(values): f = tf.train.Feature(float_list=tf.train.FloatList(value=list(values))) return f features = collections.OrderedDict() features["input_ids"] = create_int_feature(feature.input_ids) features["input_mask"] = create_float_feature(feature.input_mask) features["segment_ids"] = create_int_feature(feature.segment_ids) if label_list is not None: features["label_ids"] = create_int_feature([feature.label_id]) else: features["label_ids"] = create_float_feature([float(feature.label_id)]) features["is_real_example"] = create_int_feature( [int(feature.is_real_example)]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) writer.close() def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = { "input_ids": tf.FixedLenFeature([seq_length], tf.int64), "input_mask": tf.FixedLenFeature([seq_length], tf.float32), "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), "label_ids": tf.FixedLenFeature([], tf.int64), "is_real_example": tf.FixedLenFeature([], tf.int64), } if FLAGS.is_regression: name_to_features["label_ids"] = tf.FixedLenFeature([], tf.float32) tf.logging.info("Input tfrecord file {}".format(input_file)) def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.cast(t, tf.int32) example[name] = t return example def input_fn(params, input_context=None): """The actual input function.""" if FLAGS.use_tpu: batch_size = params["batch_size"] elif is_training: batch_size = FLAGS.train_batch_size elif FLAGS.do_eval: batch_size = FLAGS.eval_batch_size else: batch_size = FLAGS.predict_batch_size d = tf.data.TFRecordDataset(input_file) # Shard the dataset to difference devices if input_context is not None: tf.logging.info("Input pipeline id %d out of %d", input_context.input_pipeline_id, input_context.num_replicas_in_sync) d = d.shard(input_context.num_input_pipelines, input_context.input_pipeline_id) # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. if is_training: d = d.shuffle(buffer_size=FLAGS.shuffle_buffer) d = d.repeat() d = d.apply( tf.contrib.data.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, drop_remainder=drop_remainder)) return d return input_fn def get_model_fn(n_class): def model_fn(features, labels, mode, params): #### Training or Evaluation is_training = (mode == tf.estimator.ModeKeys.TRAIN) #### Get loss from inputs if FLAGS.is_regression: (total_loss, per_example_loss, logits ) = function_builder.get_regression_loss(FLAGS, features, is_training) else: (total_loss, per_example_loss, logits ) = function_builder.get_classification_loss( FLAGS, features, n_class, is_training) #### Check model parameters num_params = sum([np.prod(v.shape) for v in tf.trainable_variables()]) tf.logging.info('#params: {}'.format(num_params)) #### load pretrained models scaffold_fn = model_utils.init_from_checkpoint(FLAGS) #### Evaluation mode if mode == tf.estimator.ModeKeys.EVAL: assert FLAGS.num_hosts == 1 def metric_fn(per_example_loss, label_ids, logits, is_real_example): predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) eval_input_dict = { 'labels': label_ids, 'predictions': predictions, 'weights': is_real_example } accuracy = tf.metrics.accuracy(**eval_input_dict) loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example) return { 'eval_accuracy': accuracy, 'eval_loss': loss} def regression_metric_fn( per_example_loss, label_ids, logits, is_real_example): loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example) pearsonr = tf.contrib.metrics.streaming_pearson_correlation( logits, label_ids, weights=is_real_example) return {'eval_loss': loss, 'eval_pearsonr': pearsonr} is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32) #### Constucting evaluation TPUEstimatorSpec with new cache. label_ids = tf.reshape(features['label_ids'], [-1]) if FLAGS.is_regression: metric_fn = regression_metric_fn else: metric_fn = metric_fn metric_args = [per_example_loss, label_ids, logits, is_real_example] if FLAGS.use_tpu: eval_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=(metric_fn, metric_args), scaffold_fn=scaffold_fn) else: eval_spec = tf.estimator.EstimatorSpec( mode=mode, loss=total_loss, eval_metric_ops=metric_fn(*metric_args)) return eval_spec elif mode == tf.estimator.ModeKeys.PREDICT: label_ids = tf.reshape(features["label_ids"], [-1]) predictions = { "logits": logits, "labels": label_ids, "is_real": features["is_real_example"] } if FLAGS.use_tpu: output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions=predictions, scaffold_fn=scaffold_fn) else: output_spec = tf.estimator.EstimatorSpec( mode=mode, predictions=predictions) return output_spec #### Configuring the optimizer train_op, learning_rate, _ = model_utils.get_train_op(FLAGS, total_loss) monitor_dict = {} monitor_dict["lr"] = learning_rate #### Constucting training TPUEstimatorSpec with new cache. if FLAGS.use_tpu: #### Creating host calls if not FLAGS.is_regression: label_ids = tf.reshape(features['label_ids'], [-1]) predictions = tf.argmax(logits, axis=-1, output_type=label_ids.dtype) is_correct = tf.equal(predictions, label_ids) accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32)) monitor_dict["accuracy"] = accuracy host_call = function_builder.construct_scalar_host_call( monitor_dict=monitor_dict, model_dir=FLAGS.model_dir, prefix="train/", reduce_fn=tf.reduce_mean) else: host_call = None train_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, host_call=host_call, scaffold_fn=scaffold_fn) else: train_spec = tf.estimator.EstimatorSpec( mode=mode, loss=total_loss, train_op=train_op) return train_spec return model_fn def main(_): tf.logging.set_verbosity(tf.logging.INFO) #### Validate flags if FLAGS.save_steps is not None: FLAGS.iterations = min(FLAGS.iterations, FLAGS.save_steps) if FLAGS.do_predict: predict_dir = FLAGS.predict_dir if not tf.gfile.Exists(predict_dir): tf.gfile.MakeDirs(predict_dir) processors = { "mnli_matched": MnliMatchedProcessor, "mnli_mismatched": MnliMismatchedProcessor, 'sts-b': StsbProcessor, 'imdb': ImdbProcessor, "yelp5": Yelp5Processor, "xnli": XnliProcessor, "csc": CSCProcessor, "csv": CSVProcessor, } if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict: raise ValueError( "At least one of `do_train`, `do_eval, `do_predict` or " "`do_submit` must be True.") if not tf.gfile.Exists(FLAGS.output_dir): tf.gfile.MakeDirs(FLAGS.output_dir) task_name = FLAGS.task_name.lower() if task_name not in processors: raise ValueError("Task not found: %s" % (task_name)) processor = processors[task_name]() label_list = processor.get_labels() if not FLAGS.is_regression else None sp = spm.SentencePieceProcessor() sp.Load(FLAGS.spiece_model_file) def tokenize_fn(text): text = preprocess_text(text, lower=FLAGS.uncased) return encode_ids(sp, text) run_config = model_utils.configure_tpu(FLAGS) model_fn = get_model_fn(len(label_list) if label_list is not None else None) spm_basename = os.path.basename(FLAGS.spiece_model_file) # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. if FLAGS.use_tpu: estimator = tf.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, predict_batch_size=FLAGS.predict_batch_size, eval_batch_size=FLAGS.eval_batch_size) else: estimator = tf.estimator.Estimator( model_fn=model_fn, config=run_config) if FLAGS.do_train: train_file_base = "{}.len-{}.train.tf_record".format( spm_basename, FLAGS.max_seq_length) train_file = os.path.join(FLAGS.output_dir, train_file_base) tf.logging.info("Use tfrecord file {}".format(train_file)) train_examples = processor.get_train_examples(FLAGS.data_dir) np.random.shuffle(train_examples) tf.logging.info("Num of train samples: {}".format(len(train_examples))) file_based_convert_examples_to_features( train_examples, label_list, FLAGS.max_seq_length, tokenize_fn, train_file, FLAGS.num_passes) # here we use epoch number to calculate total train_steps FLAGS.train_steps = int(len(train_examples) * FLAGS.num_train_epochs / FLAGS.train_batch_size) FLAGS.warmup_steps = int(0.1 * FLAGS.train_steps) train_input_fn = file_based_input_fn_builder( input_file=train_file, seq_length=FLAGS.max_seq_length, is_training=True, drop_remainder=True) estimator.train(input_fn=train_input_fn, max_steps=FLAGS.train_steps) if FLAGS.do_eval or FLAGS.do_predict: eval_examples = processor.get_devtest_examples(FLAGS.data_dir, FLAGS.eval_split) tf.logging.info("Num of eval samples: {}".format(len(eval_examples))) if FLAGS.do_eval: # TPU requires a fixed batch size for all batches, therefore the number # of examples must be a multiple of the batch size, or else examples # will get dropped. So we pad with fake examples which are ignored # later on. These do NOT count towards the metric (all tf.metrics # support a per-instance weight, and these get a weight of 0.0). # # Modified in XL: We also adopt the same mechanism for GPUs. while len(eval_examples) % FLAGS.eval_batch_size != 0: eval_examples.append(PaddingInputExample()) eval_file_base = "{}.len-{}.{}.eval.tf_record".format( spm_basename, FLAGS.max_seq_length, FLAGS.eval_split) eval_file = os.path.join(FLAGS.output_dir, eval_file_base) file_based_convert_examples_to_features( eval_examples, label_list, FLAGS.max_seq_length, tokenize_fn, eval_file) assert len(eval_examples) % FLAGS.eval_batch_size == 0 eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size) eval_input_fn = file_based_input_fn_builder( input_file=eval_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=True) # Filter out all checkpoints in the directory steps_and_files = [] filenames = tf.gfile.ListDirectory(FLAGS.model_dir) for filename in filenames: if filename.endswith(".index"): ckpt_name = filename[:-6] tf.logging.info(f"ckpt_name: {ckpt_name}") cur_filename = join(FLAGS.model_dir, ckpt_name) step = cur_filename.split("-")[-1] if step.isdigit(): global_step = int(step) tf.logging.info("Add {} to eval list.".format(cur_filename)) steps_and_files.append([global_step, cur_filename]) steps_and_files = sorted(steps_and_files, key=lambda x: x[0]) # Decide whether to evaluate all ckpts if not FLAGS.eval_all_ckpt: steps_and_files = steps_and_files[-1:] eval_results = [] for global_step, filename in sorted(steps_and_files, key=lambda x: x[0]): ret = estimator.evaluate( input_fn=eval_input_fn, steps=eval_steps, checkpoint_path=filename) ret["step"] = global_step ret["path"] = filename eval_results.append(ret) tf.logging.info("=" * 80) log_str = "Eval result | " for key, val in sorted(ret.items(), key=lambda x: x[0]): log_str += "{} {} | ".format(key, val) tf.logging.info(log_str) key_name = "eval_pearsonr" if FLAGS.is_regression else "eval_accuracy" eval_results.sort(key=lambda x: x[key_name], reverse=True) tf.logging.info("=" * 80) log_str = "Best result | " for key, val in sorted(eval_results[0].items(), key=lambda x: x[0]): log_str += "{} {} | ".format(key, val) tf.logging.info(log_str) if FLAGS.do_predict: eval_file_base = "{}.len-{}.{}.predict.tf_record".format( spm_basename, FLAGS.max_seq_length, FLAGS.eval_split) eval_file = os.path.join(FLAGS.output_dir, eval_file_base) file_based_convert_examples_to_features( eval_examples, label_list, FLAGS.max_seq_length, tokenize_fn, eval_file) pred_input_fn = file_based_input_fn_builder( input_file=eval_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=False) predict_results = [] with tf.gfile.Open(os.path.join(predict_dir, "{}.tsv".format( task_name)), "w") as fout: fout.write("index\tprediction\n") for pred_cnt, result in enumerate(estimator.predict( input_fn=pred_input_fn, yield_single_examples=True, checkpoint_path=FLAGS.predict_ckpt)): if pred_cnt % 1000 == 0: tf.logging.info("Predicting submission for example: {}".format( pred_cnt)) logits = [float(x) for x in result["logits"].flat] predict_results.append(logits) if len(logits) == 1: label_out = logits[0] elif len(logits) == 2: if logits[1] - logits[0] > FLAGS.predict_threshold: label_out = label_list[1] else: label_out = label_list[0] elif len(logits) > 2: max_index = np.argmax(np.array(logits, dtype=np.float32)) label_out = label_list[max_index] else: raise NotImplementedError fout.write("{}\t{}\n".format(pred_cnt, label_out)) predict_json_path = os.path.join(predict_dir, "{}.logits.json".format( task_name)) with tf.gfile.Open(predict_json_path, "w") as fp: json.dump(predict_results, fp, indent=4) if __name__ == "__main__": tf.app.run()
ymcui/Chinese-XLNet
1,650
Pre-Trained Chinese XLNet(中文XLNet预训练模型)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
src/run_cmrc_drcd.py
Python
# coding=utf-8 from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl import flags import absl.logging as _logging # pylint: disable=unused-import import collections import os import time import math import json import six import random import gc import numpy as np if six.PY2: import cPickle as pickle else: import pickle import tensorflow as tf import sentencepiece as spm from prepro_utils import preprocess_text, encode_ids, encode_pieces, printable_text import function_builder import model_utils import squad_utils from data_utils import SEP_ID, CLS_ID, VOCAB_SIZE SPIECE_UNDERLINE = u'▁' SEG_ID_P = 0 SEG_ID_Q = 1 SEG_ID_CLS = 2 SEG_ID_PAD = 3 # Preprocessing flags.DEFINE_bool("do_prepro", default=False, help="Perform preprocessing only.") flags.DEFINE_integer("num_proc", default=1, help="Number of preprocessing processes.") flags.DEFINE_integer("proc_id", default=0, help="Process id for preprocessing.") # Model flags.DEFINE_string("model_config_path", default=None, help="Model config path.") flags.DEFINE_float("dropout", default=0.1, help="Dropout rate.") flags.DEFINE_float("dropatt", default=0.1, help="Attention dropout rate.") flags.DEFINE_integer("clamp_len", default=-1, help="Clamp length.") flags.DEFINE_string("summary_type", default="last", help="Method used to summarize a sequence into a vector.") flags.DEFINE_bool("use_bfloat16", default=False, help="Whether to use bfloat16.") # Parameter initialization flags.DEFINE_enum("init", default="normal", enum_values=["normal", "uniform"], help="Initialization method.") flags.DEFINE_float("init_std", default=0.02, help="Initialization std when init is normal.") flags.DEFINE_float("init_range", default=0.1, help="Initialization std when init is uniform.") # I/O paths flags.DEFINE_bool("overwrite_data", default=False, help="If False, will use cached data if available.") flags.DEFINE_string("init_checkpoint", default=None, help="checkpoint path for initializing the model. " "Could be a pretrained model or a finetuned model.") flags.DEFINE_bool("init_global_vars", default=False, help="If true, init all global vars. If false, init " "trainable vars only.") flags.DEFINE_string("output_dir", default="", help="Output dir for TF records.") flags.DEFINE_string("predict_dir", default="", help="Dir for predictions.") flags.DEFINE_string("spiece_model_file", default="", help="Sentence Piece model path.") flags.DEFINE_string("model_dir", default="", help="Directory for saving the finetuned model.") flags.DEFINE_string("train_file", default="", help="Path of train file.") flags.DEFINE_string("predict_file", default="", help="Path of prediction file.") # Data preprocessing config flags.DEFINE_integer("max_seq_length", default=512, help="Max sequence length") flags.DEFINE_integer("max_query_length", default=64, help="Max query length") flags.DEFINE_integer("doc_stride", default=128, help="Doc stride") flags.DEFINE_integer("max_answer_length", default=64, help="Max answer length") flags.DEFINE_bool("uncased", default=False, help="Use uncased data.") # TPUs and machines flags.DEFINE_bool("use_tpu", default=False, help="whether to use TPU.") flags.DEFINE_integer("num_hosts", default=1, help="How many TPU hosts.") flags.DEFINE_integer("num_core_per_host", default=8, help="8 for TPU v2 and v3-8, 16 for larger TPU v3 pod. In the context " "of GPU training, it refers to the number of GPUs used.") flags.DEFINE_string("tpu_job_name", default=None, help="TPU worker job name.") flags.DEFINE_string("tpu", default=None, help="TPU name.") flags.DEFINE_string("tpu_zone", default=None, help="TPU zone.") flags.DEFINE_string("gcp_project", default=None, help="gcp project.") flags.DEFINE_string("master", default=None, help="master") flags.DEFINE_integer("iterations", default=1000, help="number of iterations per TPU training loop.") # Training flags.DEFINE_bool("do_train", default=True, help="whether to do training") flags.DEFINE_integer("train_batch_size", default=48, help="batch size for training") flags.DEFINE_integer("train_steps", default=8000, help="Number of training steps") flags.DEFINE_integer("warmup_steps", default=0, help="number of warmup steps") flags.DEFINE_integer("save_steps", default=None, help="Save the model for every save_steps. " "If None, not to save any model.") flags.DEFINE_integer("max_save", default=5, help="Max number of checkpoints to save. " "Use 0 to save all.") flags.DEFINE_integer("shuffle_buffer", default=2048, help="Buffer size used for shuffle.") # Optimization flags.DEFINE_float("learning_rate", default=3e-5, help="initial learning rate") flags.DEFINE_float("min_lr_ratio", default=0.0, help="min lr ratio for cos decay.") flags.DEFINE_float("clip", default=1.0, help="Gradient clipping") flags.DEFINE_float("weight_decay", default=0.00, help="Weight decay rate") flags.DEFINE_float("adam_epsilon", default=1e-6, help="Adam epsilon") flags.DEFINE_string("decay_method", default="poly", help="poly or cos") flags.DEFINE_float("lr_layer_decay_rate", default=0.75, help="Top layer: lr[L] = FLAGS.learning_rate." "Lower layers: lr[l-1] = lr[l] * lr_layer_decay_rate.") # Eval / Prediction flags.DEFINE_bool("do_predict", default=False, help="whether to do predict") flags.DEFINE_integer("predict_batch_size", default=32, help="batch size for prediction") flags.DEFINE_integer("n_best_size", default=5, help="n best size for predictions") flags.DEFINE_integer("start_n_top", default=5, help="Beam size for span start.") flags.DEFINE_integer("end_n_top", default=5, help="Beam size for span end.") flags.DEFINE_string("target_eval_key", default="best_f1", help="Use has_ans_f1 for Model I.") FLAGS = flags.FLAGS class SquadExample(object): """A single training/test example for simple sequence classification. For examples without an answer, the start and end position are -1. """ def __init__(self, qas_id, question_text, paragraph_text, orig_answer_text=None, start_position=None, is_impossible=False): self.qas_id = qas_id self.question_text = question_text self.paragraph_text = paragraph_text self.orig_answer_text = orig_answer_text self.start_position = start_position self.is_impossible = is_impossible def __str__(self): return self.__repr__() def __repr__(self): s = "" s += "qas_id: %s" % (printable_text(self.qas_id)) s += ", question_text: %s" % ( printable_text(self.question_text)) s += ", paragraph_text: [%s]" % (" ".join(self.paragraph_text)) if self.start_position: s += ", start_position: %d" % (self.start_position) if self.start_position: s += ", is_impossible: %r" % (self.is_impossible) return s class InputFeatures(object): """A single set of features of data.""" def __init__(self, unique_id, example_index, doc_span_index, tok_start_to_orig_index, tok_end_to_orig_index, token_is_max_context, input_ids, input_mask, p_mask, segment_ids, paragraph_len, cls_index, start_position=None, end_position=None, is_impossible=None): self.unique_id = unique_id self.example_index = example_index self.doc_span_index = doc_span_index self.tok_start_to_orig_index = tok_start_to_orig_index self.tok_end_to_orig_index = tok_end_to_orig_index self.token_is_max_context = token_is_max_context self.input_ids = input_ids self.input_mask = input_mask self.p_mask = p_mask self.segment_ids = segment_ids self.paragraph_len = paragraph_len self.cls_index = cls_index self.start_position = start_position self.end_position = end_position self.is_impossible = is_impossible def read_squad_examples(input_file, is_training): """Read a SQuAD json file into a list of SquadExample.""" with tf.gfile.Open(input_file, "r") as reader: input_data = json.load(reader)["data"] examples = [] for entry in input_data: for paragraph in entry["paragraphs"]: paragraph_text = paragraph["context"] for qa in paragraph["qas"]: qas_id = qa["id"] question_text = qa["question"] start_position = None orig_answer_text = None is_impossible = False if is_training: if "is_impossible" in qa: is_impossible = qa["is_impossible"] else: is_impossible = False if (len(qa["answers"]) != 1) and (not is_impossible): raise ValueError( "For training, each question should have exactly 1 answer.") if not is_impossible: answer = qa["answers"][0] orig_answer_text = answer["text"] start_position = answer["answer_start"] else: start_position = -1 orig_answer_text = "" example = SquadExample( qas_id=qas_id, question_text=question_text, paragraph_text=paragraph_text, orig_answer_text=orig_answer_text, start_position=start_position, is_impossible=is_impossible) examples.append(example) return examples def _convert_index(index, pos, M=None, is_start=True): if pos >= len(index): pos = len(index) - 1 if index[pos] is not None: return index[pos] N = len(index) rear = pos while rear < N - 1 and index[rear] is None: rear += 1 front = pos while front > 0 and index[front] is None: front -= 1 assert index[front] is not None or index[rear] is not None if index[front] is None: if index[rear] >= 1: if is_start: return 0 else: return index[rear] - 1 return index[rear] if index[rear] is None: if M is not None and index[front] < M - 1: if is_start: return index[front] + 1 else: return M - 1 return index[front] if is_start: if index[rear] > index[front] + 1: return index[front] + 1 else: return index[rear] else: if index[rear] > index[front] + 1: return index[rear] - 1 else: return index[front] def convert_examples_to_features(examples, sp_model, max_seq_length, doc_stride, max_query_length, is_training, output_fn): """Loads a data file into a list of `InputBatch`s.""" cnt_pos, cnt_neg = 0, 0 unique_id = 1000000000 max_N, max_M = 1024, 1024 f = np.zeros((max_N, max_M), dtype=np.float32) for (example_index, example) in enumerate(examples): if example_index % 100 == 0: tf.logging.info('Converting {}/{} pos {} neg {}'.format( example_index, len(examples), cnt_pos, cnt_neg)) query_tokens = encode_ids( sp_model, preprocess_text(example.question_text, lower=FLAGS.uncased)) if len(query_tokens) > max_query_length: query_tokens = query_tokens[0:max_query_length] paragraph_text = example.paragraph_text para_tokens = encode_pieces( sp_model, preprocess_text(example.paragraph_text, lower=FLAGS.uncased)) chartok_to_tok_index = [] tok_start_to_chartok_index = [] tok_end_to_chartok_index = [] char_cnt = 0 for i, token in enumerate(para_tokens): chartok_to_tok_index.extend([i] * len(token)) tok_start_to_chartok_index.append(char_cnt) char_cnt += len(token) tok_end_to_chartok_index.append(char_cnt - 1) tok_cat_text = ''.join(para_tokens).replace(SPIECE_UNDERLINE, ' ') N, M = len(paragraph_text), len(tok_cat_text) if N > max_N or M > max_M: max_N = max(N, max_N) max_M = max(M, max_M) f = np.zeros((max_N, max_M), dtype=np.float32) gc.collect() g = {} def _lcs_match(max_dist): f.fill(0) g.clear() ### longest common sub sequence # f[i, j] = max(f[i - 1, j], f[i, j - 1], f[i - 1, j - 1] + match(i, j)) for i in range(N): # note(zhiliny): # unlike standard LCS, this is specifically optimized for the setting # because the mismatch between sentence pieces and original text will # be small for j in range(i - max_dist, i + max_dist): if j >= M or j < 0: continue if i > 0: g[(i, j)] = 0 f[i, j] = f[i - 1, j] if j > 0 and f[i, j - 1] > f[i, j]: g[(i, j)] = 1 f[i, j] = f[i, j - 1] f_prev = f[i - 1, j - 1] if i > 0 and j > 0 else 0 if (preprocess_text(paragraph_text[i], lower=FLAGS.uncased, remove_space=False) == tok_cat_text[j] and f_prev + 1 > f[i, j]): g[(i, j)] = 2 f[i, j] = f_prev + 1 max_dist = abs(N - M) + 5 for _ in range(2): _lcs_match(max_dist) if f[N - 1, M - 1] > 0.8 * N: break max_dist *= 2 orig_to_chartok_index = [None] * N chartok_to_orig_index = [None] * M i, j = N - 1, M - 1 while i >= 0 and j >= 0: if (i, j) not in g: break if g[(i, j)] == 2: orig_to_chartok_index[i] = j chartok_to_orig_index[j] = i i, j = i - 1, j - 1 elif g[(i, j)] == 1: j = j - 1 else: i = i - 1 if all(v is None for v in orig_to_chartok_index) or f[N - 1, M - 1] < 0.8 * N: print('MISMATCH DETECTED!') continue tok_start_to_orig_index = [] tok_end_to_orig_index = [] for i in range(len(para_tokens)): start_chartok_pos = tok_start_to_chartok_index[i] end_chartok_pos = tok_end_to_chartok_index[i] start_orig_pos = _convert_index(chartok_to_orig_index, start_chartok_pos, N, is_start=True) end_orig_pos = _convert_index(chartok_to_orig_index, end_chartok_pos, N, is_start=False) tok_start_to_orig_index.append(start_orig_pos) tok_end_to_orig_index.append(end_orig_pos) if not is_training: tok_start_position = tok_end_position = None if is_training and example.is_impossible: tok_start_position = -1 tok_end_position = -1 if is_training and not example.is_impossible: start_position = example.start_position end_position = start_position + len(example.orig_answer_text) - 1 start_chartok_pos = _convert_index(orig_to_chartok_index, start_position, is_start=True) tok_start_position = chartok_to_tok_index[start_chartok_pos] end_chartok_pos = _convert_index(orig_to_chartok_index, end_position, is_start=False) tok_end_position = chartok_to_tok_index[end_chartok_pos] assert tok_start_position <= tok_end_position def _piece_to_id(x): if six.PY2 and isinstance(x, unicode): x = x.encode('utf-8') return sp_model.PieceToId(x) all_doc_tokens = list(map(_piece_to_id, para_tokens)) # The -3 accounts for [CLS], [SEP] and [SEP] max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 # We can have documents that are longer than the maximum sequence length. # To deal with this we do a sliding window approach, where we take chunks # of the up to our max length with a stride of `doc_stride`. _DocSpan = collections.namedtuple( # pylint: disable=invalid-name "DocSpan", ["start", "length"]) doc_spans = [] start_offset = 0 while start_offset < len(all_doc_tokens): length = len(all_doc_tokens) - start_offset if length > max_tokens_for_doc: length = max_tokens_for_doc doc_spans.append(_DocSpan(start=start_offset, length=length)) if start_offset + length == len(all_doc_tokens): break start_offset += min(length, doc_stride) for (doc_span_index, doc_span) in enumerate(doc_spans): tokens = [] token_is_max_context = {} segment_ids = [] p_mask = [] cur_tok_start_to_orig_index = [] cur_tok_end_to_orig_index = [] for i in range(doc_span.length): split_token_index = doc_span.start + i cur_tok_start_to_orig_index.append( tok_start_to_orig_index[split_token_index]) cur_tok_end_to_orig_index.append( tok_end_to_orig_index[split_token_index]) is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index) token_is_max_context[len(tokens)] = is_max_context tokens.append(all_doc_tokens[split_token_index]) segment_ids.append(SEG_ID_P) p_mask.append(0) paragraph_len = len(tokens) tokens.append(SEP_ID) segment_ids.append(SEG_ID_P) p_mask.append(1) # note(zhiliny): we put P before Q # because during pretraining, B is always shorter than A for token in query_tokens: tokens.append(token) segment_ids.append(SEG_ID_Q) p_mask.append(1) tokens.append(SEP_ID) segment_ids.append(SEG_ID_Q) p_mask.append(1) cls_index = len(segment_ids) tokens.append(CLS_ID) segment_ids.append(SEG_ID_CLS) p_mask.append(0) input_ids = tokens # The mask has 0 for real tokens and 1 for padding tokens. Only real # tokens are attended to. input_mask = [0] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(1) segment_ids.append(SEG_ID_PAD) p_mask.append(1) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length assert len(p_mask) == max_seq_length span_is_impossible = example.is_impossible start_position = None end_position = None if is_training and not span_is_impossible: # For training, if our document chunk does not contain an annotation # we throw it out, since there is nothing to predict. doc_start = doc_span.start doc_end = doc_span.start + doc_span.length - 1 out_of_span = False if not (tok_start_position >= doc_start and tok_end_position <= doc_end): out_of_span = True if out_of_span: # continue start_position = 0 end_position = 0 span_is_impossible = True else: # note(zhiliny): we put P before Q, so doc_offset should be zero. # doc_offset = len(query_tokens) + 2 doc_offset = 0 start_position = tok_start_position - doc_start + doc_offset end_position = tok_end_position - doc_start + doc_offset if is_training and span_is_impossible: start_position = cls_index end_position = cls_index if example_index < 20: tf.logging.info("*** Example ***") tf.logging.info("unique_id: %s" % (unique_id)) tf.logging.info("example_index: %s" % (example_index)) tf.logging.info("doc_span_index: %s" % (doc_span_index)) tf.logging.info("tok_start_to_orig_index: %s" % " ".join( [str(x) for x in cur_tok_start_to_orig_index])) tf.logging.info("tok_end_to_orig_index: %s" % " ".join( [str(x) for x in cur_tok_end_to_orig_index])) tf.logging.info("token_is_max_context: %s" % " ".join([ "%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context) ])) tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) tf.logging.info( "input_mask: %s" % " ".join([str(x) for x in input_mask])) tf.logging.info( "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) if is_training and span_is_impossible: tf.logging.info("impossible example span") if is_training and not span_is_impossible: pieces = [sp_model.IdToPiece(token) for token in tokens[start_position: (end_position + 1)]] answer_text = sp_model.DecodePieces(pieces) tf.logging.info("start_position: %d" % (start_position)) tf.logging.info("end_position: %d" % (end_position)) tf.logging.info( "answer: %s" % (printable_text(answer_text))) # note(zhiliny): With multi processing, # the example_index is actually the index within the current process # therefore we use example_index=None to avoid being used in the future. # The current code does not use example_index of training data. if is_training: feat_example_index = None else: feat_example_index = example_index feature = InputFeatures( unique_id=unique_id, example_index=feat_example_index, doc_span_index=doc_span_index, tok_start_to_orig_index=cur_tok_start_to_orig_index, tok_end_to_orig_index=cur_tok_end_to_orig_index, token_is_max_context=token_is_max_context, input_ids=input_ids, input_mask=input_mask, p_mask=p_mask, segment_ids=segment_ids, paragraph_len=paragraph_len, cls_index=cls_index, start_position=start_position, end_position=end_position, is_impossible=span_is_impossible) # Run callback output_fn(feature) unique_id += 1 if span_is_impossible: cnt_neg += 1 else: cnt_pos += 1 tf.logging.info("Total number of instances: {} = pos {} neg {}".format( cnt_pos + cnt_neg, cnt_pos, cnt_neg)) def _check_is_max_context(doc_spans, cur_span_index, position): """Check if this is the 'max context' doc span for the token.""" # Because of the sliding window approach taken to scoring documents, a single # token can appear in multiple documents. E.g. # Doc: the man went to the store and bought a gallon of milk # Span A: the man went to the # Span B: to the store and bought # Span C: and bought a gallon of # ... # # Now the word 'bought' will have two scores from spans B and C. We only # want to consider the score with "maximum context", which we define as # the *minimum* of its left and right context (the *sum* of left and # right context will always be the same, of course). # # In the example the maximum context for 'bought' would be span C since # it has 1 left context and 3 right context, while span B has 4 left context # and 0 right context. best_score = None best_span_index = None for (span_index, doc_span) in enumerate(doc_spans): end = doc_span.start + doc_span.length - 1 if position < doc_span.start: continue if position > end: continue num_left_context = position - doc_span.start num_right_context = end - position score = min(num_left_context, num_right_context) + 0.01 * doc_span.length if best_score is None or score > best_score: best_score = score best_span_index = span_index return cur_span_index == best_span_index class FeatureWriter(object): """Writes InputFeature to TF example file.""" def __init__(self, filename, is_training): self.filename = filename self.is_training = is_training self.num_features = 0 self._writer = tf.python_io.TFRecordWriter(filename) def process_feature(self, feature): """Write a InputFeature to the TFRecordWriter as a tf.train.Example.""" self.num_features += 1 def create_int_feature(values): feature = tf.train.Feature( int64_list=tf.train.Int64List(value=list(values))) return feature def create_float_feature(values): f = tf.train.Feature(float_list=tf.train.FloatList(value=list(values))) return f features = collections.OrderedDict() features["unique_ids"] = create_int_feature([feature.unique_id]) features["input_ids"] = create_int_feature(feature.input_ids) features["input_mask"] = create_float_feature(feature.input_mask) features["p_mask"] = create_float_feature(feature.p_mask) features["segment_ids"] = create_int_feature(feature.segment_ids) features["cls_index"] = create_int_feature([feature.cls_index]) if self.is_training: features["start_positions"] = create_int_feature([feature.start_position]) features["end_positions"] = create_int_feature([feature.end_position]) impossible = 0 if feature.is_impossible: impossible = 1 features["is_impossible"] = create_float_feature([impossible]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) self._writer.write(tf_example.SerializeToString()) def close(self): self._writer.close() RawResult = collections.namedtuple("RawResult", ["unique_id", "start_top_log_probs", "start_top_index", "end_top_log_probs", "end_top_index"]) _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_log_prob", "end_log_prob"]) _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name "NbestPrediction", ["text", "start_log_prob", "end_log_prob"]) def write_predictions(all_examples, all_features, all_results, n_best_size, max_answer_length, output_prediction_file, output_nbest_file, orig_data): """Write final predictions to the json file and log-odds of null if needed.""" tf.logging.info("Writing predictions to: %s" % (output_prediction_file)) # tf.logging.info("Writing nbest to: %s" % (output_nbest_file)) example_index_to_features = collections.defaultdict(list) for feature in all_features: example_index_to_features[feature.example_index].append(feature) unique_id_to_result = {} for result in all_results: unique_id_to_result[result.unique_id] = result all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() scores_diff_json = collections.OrderedDict() for (example_index, example) in enumerate(all_examples): features = example_index_to_features[example_index] prelim_predictions = [] # keep track of the minimum score of null start+end of position 0 for (feature_index, feature) in enumerate(features): result = unique_id_to_result[feature.unique_id] for i in range(FLAGS.start_n_top): for j in range(FLAGS.end_n_top): start_log_prob = result.start_top_log_probs[i] start_index = result.start_top_index[i] j_index = i * FLAGS.end_n_top + j end_log_prob = result.end_top_log_probs[j_index] end_index = result.end_top_index[j_index] # We could hypothetically create invalid predictions, e.g., predict # that the start of the span is in the question. We throw out all # invalid predictions. if start_index >= feature.paragraph_len - 1: continue if end_index >= feature.paragraph_len - 1: continue if not feature.token_is_max_context.get(start_index, False): continue if end_index < start_index: continue length = end_index - start_index + 1 if length > max_answer_length: continue prelim_predictions.append( _PrelimPrediction( feature_index=feature_index, start_index=start_index, end_index=end_index, start_log_prob=start_log_prob, end_log_prob=end_log_prob)) prelim_predictions = sorted( prelim_predictions, key=lambda x: (x.start_log_prob + x.end_log_prob), reverse=True) seen_predictions = {} nbest = [] for pred in prelim_predictions: if len(nbest) >= n_best_size: break feature = features[pred.feature_index] tok_start_to_orig_index = feature.tok_start_to_orig_index tok_end_to_orig_index = feature.tok_end_to_orig_index start_orig_pos = tok_start_to_orig_index[pred.start_index] end_orig_pos = tok_end_to_orig_index[pred.end_index] paragraph_text = example.paragraph_text final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip() if final_text in seen_predictions: continue seen_predictions[final_text] = True nbest.append( _NbestPrediction( text=final_text, start_log_prob=pred.start_log_prob, end_log_prob=pred.end_log_prob)) # In very rare edge cases we could have no valid predictions. So we # just create a nonce prediction in this case to avoid failure. if not nbest: nbest.append( _NbestPrediction(text="", start_log_prob=-1e6, end_log_prob=-1e6)) total_scores = [] best_non_null_entry = None for entry in nbest: total_scores.append(entry.start_log_prob + entry.end_log_prob) if not best_non_null_entry: best_non_null_entry = entry probs = _compute_softmax(total_scores) nbest_json = [] for (i, entry) in enumerate(nbest): output = collections.OrderedDict() output["text"] = entry.text output["probability"] = probs[i] output["start_log_prob"] = entry.start_log_prob output["end_log_prob"] = entry.end_log_prob nbest_json.append(output) assert len(nbest_json) >= 1 assert best_non_null_entry is not None score_diff = 0 #score_null scores_diff_json[example.qas_id] = score_diff # note(zhiliny): always predict best_non_null_entry # and the evaluation script will search for the best threshold all_predictions[example.qas_id] = best_non_null_entry.text all_nbest_json[example.qas_id] = nbest_json with tf.gfile.GFile(output_prediction_file, "w") as writer: writer.write(json.dumps(all_predictions, indent=4) + "\n") with tf.gfile.GFile(output_nbest_file, "w") as writer: writer.write(json.dumps(all_nbest_json, indent=4) + "\n") qid_to_has_ans = squad_utils.make_qid_to_has_ans(orig_data) has_ans_qids = [k for k, v in qid_to_has_ans.items() if v] no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v] exact_raw, f1_raw = squad_utils.get_raw_scores(orig_data, all_predictions) out_eval = {} squad_utils.find_all_best_thresh_v2(out_eval, all_predictions, exact_raw, f1_raw, scores_diff_json, qid_to_has_ans) return out_eval def _get_best_indexes(logits, n_best_size): """Get the n-best logits from a list.""" index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) best_indexes = [] for i in range(len(index_and_score)): if i >= n_best_size: break best_indexes.append(index_and_score[i][0]) return best_indexes def _compute_softmax(scores): """Compute softmax probability over raw logits.""" if not scores: return [] max_score = None for score in scores: if max_score is None or score > max_score: max_score = score exp_scores = [] total_sum = 0.0 for score in scores: x = math.exp(score - max_score) exp_scores.append(x) total_sum += x probs = [] for score in exp_scores: probs.append(score / total_sum) return probs def input_fn_builder(input_glob, seq_length, is_training, drop_remainder, num_hosts, num_threads=8): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = { "unique_ids": tf.FixedLenFeature([], tf.int64), "input_ids": tf.FixedLenFeature([seq_length], tf.int64), "input_mask": tf.FixedLenFeature([seq_length], tf.float32), "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), "cls_index": tf.FixedLenFeature([], tf.int64), "p_mask": tf.FixedLenFeature([seq_length], tf.float32) } if is_training: name_to_features["start_positions"] = tf.FixedLenFeature([], tf.int64) name_to_features["end_positions"] = tf.FixedLenFeature([], tf.int64) name_to_features["is_impossible"] = tf.FixedLenFeature([], tf.float32) tf.logging.info("Input tfrecord file glob {}".format(input_glob)) global_input_paths = tf.gfile.Glob(input_glob) tf.logging.info("Find {} input paths {}".format( len(global_input_paths), global_input_paths)) def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.cast(t, tf.int32) example[name] = t return example def input_fn(params): """The actual input function.""" if FLAGS.use_tpu: batch_size = params["batch_size"] elif is_training: batch_size = FLAGS.train_batch_size else: batch_size = FLAGS.predict_batch_size # Split tfrecords across hosts if num_hosts > 1: host_id = params["context"].current_host num_files = len(global_input_paths) if num_files >= num_hosts: num_files_per_host = (num_files + num_hosts - 1) // num_hosts my_start_file_id = host_id * num_files_per_host my_end_file_id = min((host_id + 1) * num_files_per_host, num_files) input_paths = global_input_paths[my_start_file_id: my_end_file_id] tf.logging.info("Host {} handles {} files".format(host_id, len(input_paths))) else: input_paths = global_input_paths if len(input_paths) == 1: d = tf.data.TFRecordDataset(input_paths[0]) # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. if is_training: d = d.shuffle(buffer_size=FLAGS.shuffle_buffer) d = d.repeat() else: d = tf.data.Dataset.from_tensor_slices(input_paths) # file level shuffle d = d.shuffle(len(input_paths)).repeat() # `cycle_length` is the number of parallel files that get read. cycle_length = min(num_threads, len(input_paths)) d = d.apply( tf.contrib.data.parallel_interleave( tf.data.TFRecordDataset, sloppy=is_training, cycle_length=cycle_length)) if is_training: # sample level shuffle d = d.shuffle(buffer_size=FLAGS.shuffle_buffer) d = d.apply( tf.contrib.data.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, num_parallel_batches=num_threads, drop_remainder=drop_remainder)) d = d.prefetch(1024) return d return input_fn def get_model_fn(): def model_fn(features, labels, mode, params): #### Training or Evaluation is_training = (mode == tf.estimator.ModeKeys.TRAIN) #### Get loss from inputs outputs = function_builder.get_qa_outputs(FLAGS, features, is_training) #### Check model parameters num_params = sum([np.prod(v.shape) for v in tf.trainable_variables()]) tf.logging.info('#params: {}'.format(num_params)) scaffold_fn = None #### Evaluation mode if mode == tf.estimator.ModeKeys.PREDICT: if FLAGS.init_checkpoint: tf.logging.info("init_checkpoint not being used in predict mode.") predictions = { "unique_ids": features["unique_ids"], "start_top_index": outputs["start_top_index"], "start_top_log_probs": outputs["start_top_log_probs"], "end_top_index": outputs["end_top_index"], "end_top_log_probs": outputs["end_top_log_probs"] } if FLAGS.use_tpu: output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions=predictions, scaffold_fn=scaffold_fn) else: output_spec = tf.estimator.EstimatorSpec( mode=mode, predictions=predictions) return output_spec ### Compute loss seq_length = tf.shape(features["input_ids"])[1] def compute_loss(log_probs, positions): one_hot_positions = tf.one_hot( positions, depth=seq_length, dtype=tf.float32) loss = - tf.reduce_sum(one_hot_positions * log_probs, axis=-1) loss = tf.reduce_mean(loss) return loss start_loss = compute_loss( outputs["start_log_probs"], features["start_positions"]) end_loss = compute_loss( outputs["end_log_probs"], features["end_positions"]) total_loss = (start_loss + end_loss) * 0.5 #### Configuring the optimizer train_op, learning_rate, _ = model_utils.get_train_op(FLAGS, total_loss) monitor_dict = {} monitor_dict["lr"] = learning_rate #### load pretrained models scaffold_fn = model_utils.init_from_checkpoint(FLAGS) #### Constucting training TPUEstimatorSpec with new cache. if FLAGS.use_tpu: host_call = function_builder.construct_scalar_host_call( monitor_dict=monitor_dict, model_dir=FLAGS.model_dir, prefix="train/", reduce_fn=tf.reduce_mean) train_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, host_call=host_call, scaffold_fn=scaffold_fn) else: train_spec = tf.estimator.EstimatorSpec( mode=mode, loss=total_loss, train_op=train_op) return train_spec return model_fn def _get_spm_basename(): spm_basename = os.path.basename(FLAGS.spiece_model_file) return spm_basename def preprocess(): sp_model = spm.SentencePieceProcessor() sp_model.Load(FLAGS.spiece_model_file) spm_basename = _get_spm_basename() train_rec_file = os.path.join( FLAGS.output_dir, "{}.{}.slen-{}.qlen-{}.train.tf_record".format( spm_basename, FLAGS.proc_id, FLAGS.max_seq_length, FLAGS.max_query_length)) tf.logging.info("Read examples from {}".format(FLAGS.train_file)) train_examples = read_squad_examples(FLAGS.train_file, is_training=True) train_examples = train_examples[FLAGS.proc_id::FLAGS.num_proc] # Pre-shuffle the input to avoid having to make a very large shuffle # buffer in the `input_fn`. random.shuffle(train_examples) tf.logging.info("Write to {}".format(train_rec_file)) train_writer = FeatureWriter( filename=train_rec_file, is_training=True) convert_examples_to_features( examples=train_examples, sp_model=sp_model, max_seq_length=FLAGS.max_seq_length, doc_stride=FLAGS.doc_stride, max_query_length=FLAGS.max_query_length, is_training=True, output_fn=train_writer.process_feature) train_writer.close() def main(_): tf.logging.set_verbosity(tf.logging.INFO) if not tf.gfile.Exists(FLAGS.output_dir): tf.gfile.MakeDirs(FLAGS.output_dir) if FLAGS.do_prepro: preprocess() return #### Validate flags if FLAGS.save_steps is not None: FLAGS.iterations = min(FLAGS.iterations, FLAGS.save_steps) if not FLAGS.do_train and not FLAGS.do_predict: raise ValueError( "At least one of `do_train` and `do_predict` must be True.") if FLAGS.do_predict and not tf.gfile.Exists(FLAGS.predict_dir): tf.gfile.MakeDirs(FLAGS.predict_dir) sp_model = spm.SentencePieceProcessor() sp_model.Load(FLAGS.spiece_model_file) ### TPU Configuration run_config = model_utils.configure_tpu(FLAGS) model_fn = get_model_fn() spm_basename = _get_spm_basename() # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. if FLAGS.use_tpu: estimator = tf.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, predict_batch_size=FLAGS.predict_batch_size) else: estimator = tf.estimator.Estimator( model_fn=model_fn, config=run_config) if FLAGS.do_train: train_rec_glob = os.path.join( FLAGS.output_dir, "{}.*.slen-{}.qlen-{}.train.tf_record".format( spm_basename, FLAGS.max_seq_length, FLAGS.max_query_length)) train_input_fn = input_fn_builder( input_glob=train_rec_glob, seq_length=FLAGS.max_seq_length, is_training=True, drop_remainder=True, num_hosts=FLAGS.num_hosts) estimator.train(input_fn=train_input_fn, max_steps=FLAGS.train_steps) if FLAGS.do_predict: for eval_set in ['dev','test','challenge']: new_predict_file = FLAGS.predict_file + "_" + eval_set + ".json" eval_examples = read_squad_examples(new_predict_file, is_training=False) with tf.gfile.Open(new_predict_file) as f: orig_data = json.load(f)["data"] eval_rec_file = os.path.join( FLAGS.output_dir, "{}.slen-{}.qlen-{}.{}.tf_record".format( spm_basename, FLAGS.max_seq_length, FLAGS.max_query_length, eval_set)) eval_feature_file = os.path.join( FLAGS.output_dir, "{}.slen-{}.qlen-{}.{}.features.pkl".format( spm_basename, FLAGS.max_seq_length, FLAGS.max_query_length, eval_set)) if tf.gfile.Exists(eval_rec_file) and tf.gfile.Exists( eval_feature_file) and not FLAGS.overwrite_data: tf.logging.info("Loading eval features from {}".format(eval_feature_file)) with tf.gfile.Open(eval_feature_file, 'rb') as fin: eval_features = pickle.load(fin) else: eval_writer = FeatureWriter(filename=eval_rec_file, is_training=False) eval_features = [] def append_feature(feature): eval_features.append(feature) eval_writer.process_feature(feature) convert_examples_to_features( examples=eval_examples, sp_model=sp_model, max_seq_length=FLAGS.max_seq_length, doc_stride=FLAGS.doc_stride, max_query_length=FLAGS.max_query_length, is_training=False, output_fn=append_feature) eval_writer.close() with tf.gfile.Open(eval_feature_file, 'wb') as fout: pickle.dump(eval_features, fout) eval_input_fn = input_fn_builder( input_glob=eval_rec_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=False, num_hosts=1) cur_results = [] for result in estimator.predict( input_fn=eval_input_fn, yield_single_examples=True): if len(cur_results) % 1000 == 0: tf.logging.info("Processing example: %d" % (len(cur_results))) unique_id = int(result["unique_ids"]) start_top_log_probs = ( [float(x) for x in result["start_top_log_probs"].flat]) start_top_index = [int(x) for x in result["start_top_index"].flat] end_top_log_probs = ( [float(x) for x in result["end_top_log_probs"].flat]) end_top_index = [int(x) for x in result["end_top_index"].flat] cur_results.append( RawResult( unique_id=unique_id, start_top_log_probs=start_top_log_probs, start_top_index=start_top_index, end_top_log_probs=end_top_log_probs, end_top_index=end_top_index)) output_prediction_file = os.path.join( FLAGS.predict_dir, eval_set+"_predictions.json") output_nbest_file = os.path.join( FLAGS.predict_dir, eval_set+"_nbest_predictions.json") ret = write_predictions(eval_examples, eval_features, cur_results, FLAGS.n_best_size, FLAGS.max_answer_length, output_prediction_file, output_nbest_file, orig_data) # Log current result tf.logging.info("=" * 80) log_str = "Result | " for key, val in ret.items(): log_str += "{} {} | ".format(key, val) tf.logging.info(log_str) tf.logging.info("=" * 80) if __name__ == "__main__": tf.app.run()
ymcui/Chinese-XLNet
1,650
Pre-Trained Chinese XLNet(中文XLNet预训练模型)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
src/squad_utils.py
Python
"""Official evaluation script for SQuAD version 2.0. In addition to basic functionality, we also compute additional statistics and plot precision-recall curves if an additional na_prob.json file is provided. This file is expected to map question ID's to the model's predicted probability that a question is unanswerable. """ import argparse import collections import json import numpy as np import os import re import string import sys OPTS = None def parse_args(): parser = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.') parser.add_argument('data_file', metavar='data.json', help='Input data JSON file.') parser.add_argument('pred_file', metavar='pred.json', help='Model predictions.') parser.add_argument('--out-file', '-o', metavar='eval.json', help='Write accuracy metrics to file (default is stdout).') parser.add_argument('--na-prob-file', '-n', metavar='na_prob.json', help='Model estimates of probability of no answer.') parser.add_argument('--na-prob-thresh', '-t', type=float, default=1.0, help='Predict "" if no-answer probability exceeds this (default = 1.0).') parser.add_argument('--out-image-dir', '-p', metavar='out_images', default=None, help='Save precision-recall curves to directory.') parser.add_argument('--verbose', '-v', action='store_true') if len(sys.argv) == 1: parser.print_help() sys.exit(1) return parser.parse_args() def make_qid_to_has_ans(dataset): qid_to_has_ans = {} for article in dataset: for p in article['paragraphs']: for qa in p['qas']: qid_to_has_ans[qa['id']] = bool(qa['answers']) return qid_to_has_ans def normalize_answer(s): """Lower text and remove punctuation, articles and extra whitespace.""" def remove_articles(text): regex = re.compile(r'\b(a|an|the)\b', re.UNICODE) return re.sub(regex, ' ', text) def white_space_fix(text): return ' '.join(text.split()) def remove_punc(text): exclude = set(string.punctuation) return ''.join(ch for ch in text if ch not in exclude) def lower(text): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(s)))) def get_tokens(s): if not s: return [] return normalize_answer(s).split() def compute_exact(a_gold, a_pred): return int(normalize_answer(a_gold) == normalize_answer(a_pred)) def compute_f1(a_gold, a_pred): gold_toks = get_tokens(a_gold) pred_toks = get_tokens(a_pred) common = collections.Counter(gold_toks) & collections.Counter(pred_toks) num_same = sum(common.values()) if len(gold_toks) == 0 or len(pred_toks) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks) if num_same == 0: return 0 precision = 1.0 * num_same / len(pred_toks) recall = 1.0 * num_same / len(gold_toks) f1 = (2 * precision * recall) / (precision + recall) return f1 def get_raw_scores(dataset, preds): exact_scores = {} f1_scores = {} for article in dataset: for p in article['paragraphs']: for qa in p['qas']: qid = qa['id'] gold_answers = [a['text'] for a in qa['answers'] if normalize_answer(a['text'])] if not gold_answers: # For unanswerable questions, only correct answer is empty string gold_answers = [''] if qid not in preds: print('Missing prediction for %s' % qid) continue a_pred = preds[qid] # Take max over all gold answers exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers) f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers) return exact_scores, f1_scores def apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh): new_scores = {} for qid, s in scores.items(): pred_na = na_probs[qid] > na_prob_thresh if pred_na: new_scores[qid] = float(not qid_to_has_ans[qid]) else: new_scores[qid] = s return new_scores def make_eval_dict(exact_scores, f1_scores, qid_list=None): if not qid_list: total = len(exact_scores) return collections.OrderedDict([ ('exact', 100.0 * sum(exact_scores.values()) / total), ('f1', 100.0 * sum(f1_scores.values()) / total), ('total', total), ]) else: total = len(qid_list) return collections.OrderedDict([ ('exact', 100.0 * sum(exact_scores[k] for k in qid_list) / total), ('f1', 100.0 * sum(f1_scores[k] for k in qid_list) / total), ('total', total), ]) def merge_eval(main_eval, new_eval, prefix): for k in new_eval: main_eval['%s_%s' % (prefix, k)] = new_eval[k] def plot_pr_curve(precisions, recalls, out_image, title): plt.step(recalls, precisions, color='b', alpha=0.2, where='post') plt.fill_between(recalls, precisions, step='post', alpha=0.2, color='b') plt.xlabel('Recall') plt.ylabel('Precision') plt.xlim([0.0, 1.05]) plt.ylim([0.0, 1.05]) plt.title(title) plt.savefig(out_image) plt.clf() def make_precision_recall_eval(scores, na_probs, num_true_pos, qid_to_has_ans, out_image=None, title=None): qid_list = sorted(na_probs, key=lambda k: na_probs[k]) true_pos = 0.0 cur_p = 1.0 cur_r = 0.0 precisions = [1.0] recalls = [0.0] avg_prec = 0.0 for i, qid in enumerate(qid_list): if qid_to_has_ans[qid]: true_pos += scores[qid] cur_p = true_pos / float(i+1) cur_r = true_pos / float(num_true_pos) if i == len(qid_list) - 1 or na_probs[qid] != na_probs[qid_list[i+1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(cur_p) recalls.append(cur_r) if out_image: plot_pr_curve(precisions, recalls, out_image, title) return {'ap': 100.0 * avg_prec} def run_precision_recall_analysis(main_eval, exact_raw, f1_raw, na_probs, qid_to_has_ans, out_image_dir): if out_image_dir and not os.path.exists(out_image_dir): os.makedirs(out_image_dir) num_true_pos = sum(1 for v in qid_to_has_ans.values() if v) if num_true_pos == 0: return pr_exact = make_precision_recall_eval( exact_raw, na_probs, num_true_pos, qid_to_has_ans, out_image=os.path.join(out_image_dir, 'pr_exact.png'), title='Precision-Recall curve for Exact Match score') pr_f1 = make_precision_recall_eval( f1_raw, na_probs, num_true_pos, qid_to_has_ans, out_image=os.path.join(out_image_dir, 'pr_f1.png'), title='Precision-Recall curve for F1 score') oracle_scores = {k: float(v) for k, v in qid_to_has_ans.items()} pr_oracle = make_precision_recall_eval( oracle_scores, na_probs, num_true_pos, qid_to_has_ans, out_image=os.path.join(out_image_dir, 'pr_oracle.png'), title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)') merge_eval(main_eval, pr_exact, 'pr_exact') merge_eval(main_eval, pr_f1, 'pr_f1') merge_eval(main_eval, pr_oracle, 'pr_oracle') def histogram_na_prob(na_probs, qid_list, image_dir, name): if not qid_list: return x = [na_probs[k] for k in qid_list] weights = np.ones_like(x) / float(len(x)) plt.hist(x, weights=weights, bins=20, range=(0.0, 1.0)) plt.xlabel('Model probability of no-answer') plt.ylabel('Proportion of dataset') plt.title('Histogram of no-answer probability: %s' % name) plt.savefig(os.path.join(image_dir, 'na_prob_hist_%s.png' % name)) plt.clf() def find_best_thresh(preds, scores, na_probs, qid_to_has_ans): num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k]) cur_score = num_no_ans best_score = cur_score best_thresh = 0.0 qid_list = sorted(na_probs, key=lambda k: na_probs[k]) for i, qid in enumerate(qid_list): if qid not in scores: continue if qid_to_has_ans[qid]: diff = scores[qid] else: if preds[qid]: diff = -1 else: diff = 0 cur_score += diff if cur_score > best_score: best_score = cur_score best_thresh = na_probs[qid] return 100.0 * best_score / len(scores), best_thresh def find_best_thresh_v2(preds, scores, na_probs, qid_to_has_ans): num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k]) cur_score = num_no_ans best_score = cur_score best_thresh = 0.0 qid_list = sorted(na_probs, key=lambda k: na_probs[k]) for i, qid in enumerate(qid_list): if qid not in scores: continue if qid_to_has_ans[qid]: diff = scores[qid] else: if preds[qid]: diff = -1 else: diff = 0 cur_score += diff if cur_score > best_score: best_score = cur_score best_thresh = na_probs[qid] has_ans_score, has_ans_cnt = 0, 0 for qid in qid_list: if not qid_to_has_ans[qid]: continue has_ans_cnt += 1 if qid not in scores: continue has_ans_score += scores[qid] return 100.0 * best_score / len(scores), best_thresh, 1.0 * has_ans_score / has_ans_cnt def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans): best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans) best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans) main_eval['best_exact'] = best_exact main_eval['best_exact_thresh'] = exact_thresh main_eval['best_f1'] = best_f1 main_eval['best_f1_thresh'] = f1_thresh def find_all_best_thresh_v2(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans): best_exact, exact_thresh, has_ans_exact = find_best_thresh_v2(preds, exact_raw, na_probs, qid_to_has_ans) best_f1, f1_thresh, has_ans_f1 = find_best_thresh_v2(preds, f1_raw, na_probs, qid_to_has_ans) main_eval['best_exact'] = best_exact main_eval['best_exact_thresh'] = exact_thresh main_eval['best_f1'] = best_f1 main_eval['best_f1_thresh'] = f1_thresh main_eval['has_ans_exact'] = has_ans_exact main_eval['has_ans_f1'] = has_ans_f1 def main(): with open(OPTS.data_file) as f: dataset_json = json.load(f) dataset = dataset_json['data'] with open(OPTS.pred_file) as f: preds = json.load(f) new_orig_data = [] for article in dataset: for p in article['paragraphs']: for qa in p['qas']: if qa['id'] in preds: new_para = {'qas': [qa]} new_article = {'paragraphs': [new_para]} new_orig_data.append(new_article) dataset = new_orig_data if OPTS.na_prob_file: with open(OPTS.na_prob_file) as f: na_probs = json.load(f) else: na_probs = {k: 0.0 for k in preds} qid_to_has_ans = make_qid_to_has_ans(dataset) # maps qid to True/False has_ans_qids = [k for k, v in qid_to_has_ans.items() if v] no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v] exact_raw, f1_raw = get_raw_scores(dataset, preds) exact_thresh = apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans, OPTS.na_prob_thresh) f1_thresh = apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans, OPTS.na_prob_thresh) out_eval = make_eval_dict(exact_thresh, f1_thresh) if has_ans_qids: has_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=has_ans_qids) merge_eval(out_eval, has_ans_eval, 'HasAns') if no_ans_qids: no_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=no_ans_qids) merge_eval(out_eval, no_ans_eval, 'NoAns') if OPTS.na_prob_file: find_all_best_thresh(out_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(out_eval, exact_raw, f1_raw, na_probs, qid_to_has_ans, OPTS.out_image_dir) histogram_na_prob(na_probs, has_ans_qids, OPTS.out_image_dir, 'hasAns') histogram_na_prob(na_probs, no_ans_qids, OPTS.out_image_dir, 'noAns') if OPTS.out_file: with open(OPTS.out_file, 'w') as f: json.dump(out_eval, f) else: print(json.dumps(out_eval, indent=2)) if __name__ == '__main__': OPTS = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt main()
ymcui/Chinese-XLNet
1,650
Pre-Trained Chinese XLNet(中文XLNet预训练模型)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
src/summary.py
Python
# -*- coding: utf-8 -*- ''' print summary ''' from __future__ import print_function from collections import Counter, OrderedDict import string import re import argparse import json import sys reload(sys) sys.setdefaultencoding('utf-8') import pdb import os import math import numpy as np import collections from prettytable import PrettyTable def print_summary(): lscmd = os.popen('ls '+sys.argv[1]+'/result.*').read() result_list = lscmd.split() num_args = len(result_list) assert num_args==2 or num_args==3 dev_input_file = open(sys.argv[1]+'/result.dev', 'rb') test_input_file = open(sys.argv[1]+'/result.test', 'rb') if num_args==2: print_table = PrettyTable(['#','DEV-AVG','DEV-EM','DEV-F1','TEST-AVG','TEST-EM','TEST-F1','FILE']) elif num_args==3: chl_input_file = open(sys.argv[1]+'/result.challenge', 'rb') print_table = PrettyTable(['#','DEV-AVG','DEV-EM','DEV-F1','TEST-AVG','TEST-EM','TEST-F1','CHL-AVG','CHL-EM','CHL-F1','FILE']) # style set print_table.align['FILE'] = 'l' print_table.float_format = '2.3' # data fill dev_avg = [] dev_em = [] dev_f1 = [] dev_file = [] for dline in dev_input_file.readlines(): dline = dline.strip() if re.search('^{', dline): ddict = json.loads(dline) dev_avg.append(float(ddict['AVERAGE'])) dev_em.append(float(ddict['EM'])) dev_f1.append(float(ddict['F1'])) dev_file.append(ddict['FILE']) test_avg = [] test_em = [] test_f1 = [] test_file = [] for dline in test_input_file.readlines(): dline = dline.strip() if re.search('^{', dline): ddict = json.loads(dline) test_avg.append(float(ddict['AVERAGE'])) test_em.append(float(ddict['EM'])) test_f1.append(float(ddict['F1'])) test_file.append(ddict['FILE']) if num_args==3: chl_avg = [] chl_em = [] chl_f1 = [] chl_file = [] for dline in chl_input_file.readlines(): dline = dline.strip() if re.search('^{', dline): ddict = json.loads(dline) chl_avg.append(float(ddict['AVERAGE'])) chl_em.append(float(ddict['EM'])) chl_f1.append(float(ddict['F1'])) chl_file.append(ddict['FILE']) # print if num_args == 2: min_len = min(len(dev_avg),len(test_avg)) for k in range(min_len): print_table.add_row([k+1, dev_avg[k], dev_em[k], dev_f1[k], test_avg[k], test_em[k], test_f1[k], dev_file[k]]) elif num_args == 3: min_len = min(len(dev_avg),len(test_avg),len(chl_avg)) for k in range(min_len): print_table.add_row([k+1, dev_avg[k], dev_em[k], dev_f1[k], test_avg[k], test_em[k], test_f1[k], chl_avg[k], chl_em[k], chl_f1[k], dev_file[k]]) if len(sys.argv)==3: sk = sys.argv[2].upper() print('sort key detected: {}'.format(sk)) print(print_table.get_string(sortby=sk, reversesort=True)) else: print(print_table) if num_args == 2: summary_table = PrettyTable(['#','DEV-AVG','DEV-EM','DEV-F1','TEST-AVG','TEST-EM','TEST-F1','FILE']) summary_table.add_row(["M", np.max(dev_avg), np.max(dev_em), np.max(dev_f1), np.max(test_avg), np.max(test_em), np.max(test_f1),"-"]) summary_table.add_row(["A", np.mean(dev_avg), np.mean(dev_em), np.mean(dev_f1), np.mean(test_avg), np.mean(test_em), np.mean(test_f1),"-"]) summary_table.add_row(["D", np.std(dev_avg), np.std(dev_em), np.std(dev_f1), np.std(test_avg), np.std(test_em), np.std(test_f1),"-"]) elif num_args == 3: summary_table = PrettyTable(['#','DEV-AVG','DEV-EM','DEV-F1','TEST-AVG','TEST-EM','TEST-F1','CHL-AVG','CHL-EM','CHL-F1','FILE']) summary_table.add_row(["M", np.max(dev_avg), np.max(dev_em), np.max(dev_f1), np.max(test_avg), np.max(test_em), np.max(test_f1), np.max(chl_avg), np.max(chl_em), np.max(chl_f1), "-"]) summary_table.add_row(["A", np.mean(dev_avg), np.mean(dev_em), np.mean(dev_f1), np.mean(test_avg), np.mean(test_em), np.mean(test_f1), np.mean(chl_avg), np.mean(chl_em), np.mean(chl_f1), "-"]) summary_table.add_row(["D", np.std(dev_avg), np.std(dev_em), np.std(dev_f1), np.std(test_avg), np.std(test_em), np.std(test_f1), np.std(chl_avg), np.std(chl_em), np.std(chl_f1), "-"]) # style set summary_table.align['FILE'] = 'l' summary_table.float_format = '2.3' print(summary_table) return 0 if __name__ == '__main__': print_summary()
ymcui/Chinese-XLNet
1,650
Pre-Trained Chinese XLNet(中文XLNet预训练模型)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
src/tpu_estimator.py
Python
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =================================================================== """TPUEstimator class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import os import signal import sys import threading import time import numpy as np import six from six.moves import queue as Queue # pylint: disable=redefined-builtin from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.contrib.tpu.proto import compilation_result_pb2 as tpu_compilation_result from tensorflow.contrib.tpu.python.tpu import tensor_tracer from tensorflow.contrib.tpu.python.ops import tpu_ops from tensorflow.contrib.tpu.python.tpu import error_handling from tensorflow.contrib.tpu.python.tpu import session_support from tensorflow.contrib.tpu.python.tpu import tpu from tensorflow.contrib.tpu.python.tpu import tpu_config from tensorflow.contrib.tpu.python.tpu import tpu_context from tensorflow.contrib.tpu.python.tpu import tpu_feed from tensorflow.contrib.tpu.python.tpu import training_loop from tensorflow.contrib.tpu.python.tpu import util as util_lib from tensorflow.contrib.training.python.training import hparam from tensorflow.core.framework import variable_pb2 from tensorflow.core.framework.summary_pb2 import Summary from tensorflow.core.protobuf import config_pb2 from tensorflow.python.client import session as tf_session from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.util import nest as data_nest from tensorflow.python.estimator import estimator as estimator_lib from tensorflow.python.estimator import model_fn as model_fn_lib from tensorflow.python.estimator.export import export_output as export_output_lib from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import summary_ops_v2 as contrib_summary from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import tf_logging as logging from tensorflow.python.saved_model import tag_constants from tensorflow.python.summary import summary from tensorflow.python.training import basic_session_run_hooks from tensorflow.python.training import evaluation from tensorflow.python.training import session_run_hook from tensorflow.python.training import training from tensorflow.python.training import training_util from tensorflow.python.util import function_utils from tensorflow.python.util import nest from tensorflow.python.util import tf_inspect _INITIAL_LOSS = 1e7 _ZERO_LOSS = 0. _TPU_ESTIMATOR = 'custom_tpu_estimator' _ITERATIONS_PER_LOOP_VAR = 'iterations_per_loop' _BATCH_SIZE_KEY = 'batch_size' _CTX_KEY = 'context' _USE_TPU_KEY = 'use_tpu' _CROSS_REPLICA_SUM_OP = 'CrossReplicaSum' _ONE_GIGABYTE = 1024 * 1024 * 1024 _TPU_ENQUEUE_OPS = '_tpu_enqueue_ops' _TPU_TRAIN_OP = '_tpu_train_op' _REWRITE_FOR_INFERENCE_MODE = '_rewrite_for_inference' # Ideally _USE_TPU_KEY should be reserved as well. However there are already # models that make use of this key, thus it can not be reserved now to prevent # breakage. In the long run, we would like to mitigate this by migrating models # off of using _USE_TPU_KEY. _RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY, _CTX_KEY] # TODO(b/65703635): Flip the value and remove all dead code. Currently, this is # only used for per-core based deployments. For per-host based pipelines, if a # user returns a Dataset instance it will be automatically wrapped in a # tf.while_loop (This can be disabled by returning features and labels # explicitly). _WRAP_INPUT_FN_INTO_WHILE_LOOP = False ops.register_proto_function( '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR), proto_type=variable_pb2.VariableDef, to_proto=resource_variable_ops._to_proto_fn, # pylint: disable=protected-access from_proto=resource_variable_ops._from_proto_fn) # pylint: disable=protected-access def _is_iterable(obj): """A Python 2 and 3 compatible util to check whether `obj` is iterable.""" try: iter(obj) return True except TypeError: return False def _create_global_step(graph): graph = graph or ops.get_default_graph() if training.get_global_step(graph) is not None: raise ValueError('"global_step" already exists.') # Create in proper graph and base name_scope. with graph.as_default() as g, g.name_scope(None): return variable_scope.get_variable( ops.GraphKeys.GLOBAL_STEP, shape=[], dtype=dtypes.int64, initializer=init_ops.zeros_initializer(), trainable=False, use_resource=True, collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP]) def _create_or_get_iterations_per_loop(): """Creates or gets the iterations_per_loop variable. In TPUEstimator, the user provided computation, the model_fn, is wrapped inside a tf.while_loop for peak performance. The iterations of the loop are specified by this variable, which adjusts its value on the CPU after each TPU program execution and before the next TPU execution. The purpose of using a variable, rather then a constant, is to allow TPUEstimator adapt the TPU training iterations according to the final steps specified by users. For example, if the user sets the iterations_per_loop as 4 in TPUConfig and steps as 10 in TPUEstimator.train(), the iterations_per_loop variable will have the following value before each TPU training. - 1-th TPU execution: iterations_per_loop = 4 - 2-th TPU execution: iterations_per_loop = 4 - 3-th TPU execution: iterations_per_loop = 2 As model_fn increases the global step once per train_op invocation, the global step is 10 after all TPU executions, matching the steps=10 inputs passed in by users. Returns: A TF non-trainable resource variable. Raises: RuntimeError: If multi iterations_per_loop variables were found. """ graph = ops.get_default_graph() collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR) iter_vars = graph.get_collection(collection_name) if len(iter_vars) == 1: return iter_vars[0] elif len(iter_vars) > 1: raise RuntimeError('Multiple iterations_per_loop_var in collection.') with ops.colocate_with(training_util.get_global_step()): with variable_scope.variable_scope( _TPU_ESTIMATOR, reuse=variable_scope.AUTO_REUSE): return variable_scope.get_variable( _ITERATIONS_PER_LOOP_VAR, initializer=init_ops.zeros_initializer(), shape=[], dtype=dtypes.int32, trainable=False, collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES], use_resource=True) def _sync_variables_ops(ctx): """Create varriables synchronization ops. Gets the variables back from TPU nodes. This means the variables updated by TPU will now be *synced* to host memory. In BROADCAST mode, we skip this sync since the variables are ususally too big to transmit via RPC. Args: ctx: A `_InternalTPUContext` instance with mode. Returns: A list of sync ops. """ if not ctx.is_input_broadcast_with_iterators(): return [ array_ops.check_numerics(v.read_value(), 'Gradient for %s is NaN' % v.name).op for v in variables.trainable_variables() ] else: return [control_flow_ops.no_op()] def _increase_eval_step_op(iterations_per_loop): """Returns an op to increase the eval step for TPU evaluation. Args: iterations_per_loop: Tensor. The number of eval steps running in TPU system before returning to CPU host for each `Session.run`. Returns: An operation """ eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access # Estimator evaluate increases 1 by default. So, we increase the difference. return state_ops.assign_add( eval_step, math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype), use_locking=True) def _extract_key_names(tensor_or_dict): if isinstance(tensor_or_dict, dict): return sorted(tensor_or_dict.keys()) return [] class _SIGNAL(object): """Signal used to control the thread of infeed/outfeed. All preserved signals must be negative numbers. Positive numbers are used to indicate the number of iterations for next training/evaluation loop. """ NEXT_BATCH = -1 STOP = -2 class TPUEstimatorSpec(model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access """Ops and objects returned from a `model_fn` and passed to `TPUEstimator`. See `EstimatorSpec` for `mode`, `predictions`, `loss`, `train_op`, and `export_outputs`. For evaluation, `eval_metrics `is a tuple of `metric_fn` and `tensors`, where `metric_fn` runs on CPU to generate metrics and `tensors` represents the `Tensor`s transferred from TPU system to CPU host and passed to `metric_fn`. To be precise, TPU evaluation expects a slightly different signature from the `tf.estimator.Estimator`. While `EstimatorSpec.eval_metric_ops` expects a dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`. The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The `tensors` usually specify the model logits, which are transferred back from TPU system to CPU host. All tensors must have be batch-major, i.e., the batch size is the first dimension. Once all tensors are available at CPU host from all shards, they are concatenated (on CPU) and passed as positional arguments to the `metric_fn` if `tensors` is list or keyword arguments if `tensors` is a dict. `metric_fn` takes the `tensors` and returns a dict from metric string name to the result of calling a metric function, namely a `(metric_tensor, update_op)` tuple. See `TPUEstimator` for MNIST example how to specify the `eval_metrics`. `scaffold_fn` is a function running on CPU to generate the `Scaffold`. This function should not capture any Tensors in `model_fn`. `host_call` is a tuple of a `function` and a list or dictionary of `tensors` to pass to that function and returns a list of Tensors. `host_call` currently works for train() and evaluate(). The Tensors returned by the function is executed on the CPU on every step, so there is communication overhead when sending tensors from TPU to CPU. To reduce the overhead, try reducing the size of the tensors. The `tensors` are concatenated along their major (batch) dimension, and so must be >= rank 1. The `host_call` is useful for writing summaries with `tf.contrib.summary.create_file_writer`. """ def __new__(cls, mode, predictions=None, loss=None, train_op=None, eval_metrics=None, export_outputs=None, scaffold_fn=None, host_call=None, training_hooks=None, evaluation_hooks=None, prediction_hooks=None): """Creates a validated `TPUEstimatorSpec` instance.""" host_calls = {} if eval_metrics is not None: host_calls['eval_metrics'] = eval_metrics if host_call is not None: host_calls['host_call'] = host_call _OutfeedHostCall.validate(host_calls) training_hooks = tuple(training_hooks or []) evaluation_hooks = tuple(evaluation_hooks or []) prediction_hooks = tuple(prediction_hooks or []) for hook in training_hooks + evaluation_hooks + prediction_hooks: if not isinstance(hook, session_run_hook.SessionRunHook): raise TypeError('All hooks must be SessionRunHook instances, given: {}' .format(hook)) return super(TPUEstimatorSpec, cls).__new__( cls, mode=mode, predictions=predictions, loss=loss, train_op=train_op, eval_metrics=eval_metrics, export_outputs=export_outputs, scaffold_fn=scaffold_fn, host_call=host_call, training_hooks=training_hooks, evaluation_hooks=evaluation_hooks, prediction_hooks=prediction_hooks) def as_estimator_spec(self): """Creates an equivalent `EstimatorSpec` used by CPU train/eval.""" host_calls = {} if self.eval_metrics is not None: host_calls['eval_metrics'] = self.eval_metrics if self.host_call is not None: host_calls['host_call'] = self.host_call host_call_ret = _OutfeedHostCall.create_cpu_hostcall(host_calls) eval_metric_ops = None if self.eval_metrics is not None: eval_metric_ops = host_call_ret['eval_metrics'] hooks = None if self.host_call is not None: hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])] if tensor_tracer.TensorTracer.is_enabled(): tt = tensor_tracer.TensorTracer() tracing_calls = tt.trace_cpu(ops.get_default_graph()) tracing_call_ret = _OutfeedHostCall.create_cpu_hostcall(tracing_calls) tracing_functions = tracing_call_ret.values() if tracing_functions: if hooks: hooks.extend([_OutfeedHostCallHook(tracing_functions)]) else: hooks = [_OutfeedHostCallHook(tracing_functions)] hooks = tuple(hooks or []) scaffold = self.scaffold_fn() if self.scaffold_fn else None return model_fn_lib.EstimatorSpec( mode=self.mode, predictions=self.predictions, loss=self.loss, train_op=self.train_op, eval_metric_ops=eval_metric_ops, export_outputs=self.export_outputs, scaffold=scaffold, training_hooks=self.training_hooks + hooks, evaluation_hooks=self.evaluation_hooks + hooks, prediction_hooks=self.prediction_hooks + hooks) class _OpQueueContext(object): """Manages work queue and thread for a infeed/outfeed thread.""" def __init__(self, name, target, args): self._name = name self._queue = Queue.Queue() args = (self,) + args self._thread = threading.Thread(name=name, target=target, args=args) self._thread.daemon = True self._thread.start() def stop(self): self._queue.put(_SIGNAL.STOP) def send_next_batch_signal(self, iterations): self._queue.put(iterations) def read_iteration_counts(self): while True: iterations = self._queue.get(block=True) logging.debug('%s read iterations %s', self._name, iterations) if iterations == _SIGNAL.STOP: logging.info('%s received shutdown signal, stopping.', self._name) return yield iterations def join(self): logging.info('Shutting down %s thread.', self._name) self.stop() self._thread.join() class _OpSignalOnceQueueContext(_OpQueueContext): """Manages work queue and thread for a infeed/outfeed thread. This subclass only signals once. """ def __init__(self, name, target, args): super(_OpSignalOnceQueueContext, self).__init__(name, target, args) self._has_signaled = False def send_next_batch_signal(self, iterations): if not self._has_signaled: self._queue.put(iterations) self._has_signaled = True class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook): """A Session hook setting up the TPU initialization, infeed, and outfeed. This hook does two major things: 1. initialize and shutdown TPU system. 2. launch and join the threads for infeed enqueue and (optional) outfeed dequeue. """ def __init__(self, ctx, enqueue_ops, dequeue_ops, tpu_compile_op, run_infeed_loop_on_coordinator=True, rendezvous=None, master=None, session_config=None): self._master_job = ctx.master_job self._enqueue_ops = enqueue_ops self._dequeue_ops = dequeue_ops self._rendezvous = rendezvous self._master = master self._session_config = session_config self._run_infeed_loop_on_coordinator = run_infeed_loop_on_coordinator self._initial_infeed_sleep_secs = ( ctx.config.tpu_config.initial_infeed_sleep_secs) self._feed_error = None self._finished = False self._should_initialize_tpu = True self._tpu_compile_op = tpu_compile_op def begin(self): logging.info('TPU job name %s', self._master_job) self._iterations_per_loop_var = _create_or_get_iterations_per_loop() self._init_ops = [] if self._should_initialize_tpu: self._finalize_ops = [tpu.shutdown_system(job=self._master_job)] else: self._finalize_ops = [] summary_writer_init_ops = contrib_summary.summary_writer_initializer_op() self._init_ops.extend(summary_writer_init_ops) # Get all the writer resources from the initializer, so we know what to # flush. for op in summary_writer_init_ops: self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0])) def _run_infeed(self, queue_ctx, session): logging.info('Starting infeed thread controller.') if self._initial_infeed_sleep_secs: logging.info('Infeed thread sleeping for %d seconds.', self._initial_infeed_sleep_secs) time.sleep(self._initial_infeed_sleep_secs) logging.info('Infeed thread starting after sleep') with self._rendezvous.catch_errors(source='infeed', session=session): if self._run_infeed_loop_on_coordinator: for count, steps in enumerate(queue_ctx.read_iteration_counts()): for i in xrange(steps): logging.debug('Infeed enqueue for iteration (%d, %d)', count, i) session.run(self._enqueue_ops) else: for _ in queue_ctx.read_iteration_counts(): session.run(self._enqueue_ops) logging.info('Infeed thread finished, shutting down.') def _run_outfeed(self, queue_ctx, session): logging.info('Starting outfeed thread controller.') with self._rendezvous.catch_errors(source='outfeed', session=session): for count, steps in enumerate(queue_ctx.read_iteration_counts()): for i in xrange(steps): logging.debug('Outfeed dequeue for iteration (%d, %d)', count, i) session.run(self._dequeue_ops) logging.info('Outfeed thread finished, shutting down.') def _create_infeed_controller(self, name, target, args): return _OpQueueContext(name=name, target=target, args=args) def _assertCompilationSucceeded(self, result, coord): proto = tpu_compilation_result.CompilationResultProto() proto.ParseFromString(result) if proto.status_error_message: logging.error('Compilation failed: {}'.format(proto.status_error_message)) coord.request_stop() else: logging.info('Compilation succeeded') def after_create_session(self, session, coord): if self._should_initialize_tpu: logging.info('Init TPU system') start = time.time() with ops.Graph().as_default(): with tf_session.Session( self._master, config=self._session_config) as sess: sess.run(tpu.initialize_system(job=self._master_job)) logging.info('Initialized TPU in %d seconds', time.time() - start) session.run(self._init_ops, options=config_pb2.RunOptions(timeout_in_ms=5 * 60 * 1000)) if os.environ.get('TPU_SPLIT_COMPILE_AND_EXECUTE', '') == '1': logging.info('Compiling user program: this may take a while...') self._assertCompilationSucceeded(session.run(self._tpu_compile_op), coord) self._infeed_controller = self._create_infeed_controller( name='InfeedController', target=self._run_infeed, args=(session,)) self._outfeed_controller = _OpQueueContext( name='OutfeedController', target=self._run_outfeed, args=(session,)) # Enable the worker watchdog to terminate workers on coordinator exit. watchdog_timeout = int(os.environ.get('TF_TPU_WATCHDOG_TIMEOUT', '0')) if watchdog_timeout > 0: session_support.start_worker_watchdog(session, shutdown_timeout=watchdog_timeout) def before_run(self, run_context): self._feed_error = None iterations = run_context.session.run(self._iterations_per_loop_var) logging.info('Enqueue next (%d) batch(es) of data to infeed.', iterations) self._infeed_controller.send_next_batch_signal(iterations) logging.info('Dequeue next (%d) batch(es) of data from outfeed.', iterations) self._outfeed_controller.send_next_batch_signal(iterations) def end(self, session): self._finished = True logging.info('Stop infeed thread controller') self._infeed_controller.join() self._rendezvous.record_done('infeed') logging.info('Stop output thread controller') self._outfeed_controller.join() self._rendezvous.record_done('outfeed') logging.info('Shutdown TPU system.') session.run(self._finalize_ops) class TPUInfeedOutfeedSessionHookForPrediction(TPUInfeedOutfeedSessionHook): def __init__(self, ctx, enqueue_ops, dequeue_ops, tpu_compile_op, rendezvous=None, master=None, session_config=None): super(TPUInfeedOutfeedSessionHookForPrediction, self).__init__( ctx, enqueue_ops, dequeue_ops, tpu_compile_op=tpu_compile_op, run_infeed_loop_on_coordinator=False, rendezvous=rendezvous, master=master, session_config=session_config) def _create_infeed_controller(self, name, target, args): return _OpSignalOnceQueueContext(name=name, target=target, args=args) class _TPUStopAtStepHook(session_run_hook.SessionRunHook): """Hook that requests stop at a specified step. This hook is similar to the `session_run_hook._StopAfterNEvalsHook` with following differences for TPU training: 1. This hook sets the variable for iterations_per_loop, which is used by `TPUInfeedOutfeedSessionHook` to control the iterations for infeed/outfeed. As the hook execution order is not guaranteed, the variable update is handled in `after_create_session` and `after_run` as `TPUInfeedOutfeedSessionHook` reads the variable value in `before_run`. 2. For each training loop (session.run), the global step could be increased multiple times on TPU. The global step tensor value will be explicitly read again in `after_run` to ensure the latest value is retrieved to avoid race condition. """ def __init__(self, iterations, num_steps=None, last_step=None): """Initializes a `StopAtStepHook`. Args: iterations: The number of iterations to run optimizer per training loop. num_steps: Number of steps to execute. last_step: Step after which to stop. Raises: ValueError: If one of the arguments is invalid. """ if num_steps is None and last_step is None: raise ValueError('One of num_steps or last_step must be specified.') if num_steps is not None and last_step is not None: raise ValueError('Only one of num_steps or last_step can be specified.') self._num_steps = num_steps self._last_step = last_step self._iterations = iterations def _next_iterations(self, global_step, last_step): gap = last_step - global_step return min(gap, self._iterations) def begin(self): self._global_step_tensor = training_util.get_global_step() if self._global_step_tensor is None: raise RuntimeError('Global step should be created.') self._iterations_per_loop_var = _create_or_get_iterations_per_loop() def after_create_session(self, session, coord): global_step = session.run(self._global_step_tensor) if self._last_step is None: self._last_step = global_step + self._num_steps iterations = self._next_iterations(global_step, self._last_step) self._iterations_per_loop_var.load(iterations, session=session) def after_run(self, run_context, run_values): # Global step cannot be retrieved via SessionRunArgs and before_run due to # race condition. global_step = run_context.session.run(self._global_step_tensor) if global_step >= self._last_step: run_context.request_stop() else: iterations = self._next_iterations(global_step, self._last_step) self._iterations_per_loop_var.load( iterations, session=run_context.session) class _SetEvalIterationsHook(session_run_hook.SessionRunHook): """Hook that requests stop at a specified step.""" def __init__(self, num_steps): """Initializes a `_SetEvalIterationsHook`. Args: num_steps: Number of steps to execute. """ self._num_steps = num_steps def begin(self): self._iterations_per_loop_var = _create_or_get_iterations_per_loop() def after_create_session(self, session, coord): self._iterations_per_loop_var.load(self._num_steps, session=session) class _StoppingPredictHook(session_run_hook.SessionRunHook): """Hook that requests stop according to the stopping signal in prediction.""" def __init__(self, scalar_stopping_signal): self._scalar_stopping_signal = scalar_stopping_signal def begin(self): self._iterations_per_loop_var = _create_or_get_iterations_per_loop() def after_create_session(self, session, coord): # This is not necessary as we do not run infeed enqueue and outfeed dequeue # in side threads for prediction model. But it makes the # TPUInfeedOutfeedSessionHook prints nice message. self._iterations_per_loop_var.load(1, session=session) def before_run(self, run_context): return session_run_hook.SessionRunArgs(self._scalar_stopping_signal) def after_run(self, run_context, run_values): _ = run_context scalar_stopping_signal = run_values.results if _StopSignals.should_stop(scalar_stopping_signal): # NOTE(xiejw): In prediction, stopping signals are inserted for each # batch. And we append one more batch to signal the system it should stop. # The data flow might look like # # batch 0: images, labels, stop = 0 (user provided) # batch 1: images, labels, stop = 0 (user provided) # ... # batch 99: images, labels, stop = 0 (user provided) # batch 100: images, labels, stop = 1 (TPUEstimator appended) # # where the final batch (id = 100) is appended by TPUEstimator, so we # should drop it before returning the predictions to user. # To achieve that, we throw the OutOfRangeError in after_run. Once # Monitored Session sees this error in SessionRunHook.after_run, the # "current" prediction, i.e., batch with id=100, will be discarded # immediately raise errors.OutOfRangeError(None, None, 'Stopped by stopping signal.') def generate_per_core_enqueue_ops_fn_for_host( ctx, input_fn, inputs_structure_recorder, host_device, host_id): """Generates infeed enqueue ops for per-core input_fn on a single host.""" captured_infeed_queue = _CapturedObject() tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id) def enqueue_ops_fn(): """A fn returns enqueue_ops.""" num_cores_per_host = ctx.num_of_cores_per_host per_host_sharded_inputs = [] for core_ordinal in range(num_cores_per_host): with ops.name_scope('ordinal_%d' % (core_ordinal)): user_context = tpu_context.TPUContext( internal_ctx=ctx, input_device=host_device, invocation_index=host_id * ctx.num_of_cores_per_host + core_ordinal) inputs = _Inputs.from_input_fn(input_fn(user_context)) if inputs.is_dataset: raise TypeError( '`input_fn` returning `Dataset` is not yet supported in ' 'per-Core input pipeline deployment yet. Please set ' 'TPUConfig.per_host_input_for_training to True or return ' '`features` and `labels` from `input_fn`') features, labels = inputs.features_and_labels() inputs_structure_recorder.validate_and_record_structure( features, labels) flattened_inputs = ( inputs_structure_recorder.flatten_features_and_labels( features, labels)) per_host_sharded_inputs.append(flattened_inputs) infeed_queue = tpu_feed.InfeedQueue( number_of_tuple_elements=len(per_host_sharded_inputs[0])) captured_infeed_queue.capture(infeed_queue) per_host_enqueue_ops = infeed_queue.generate_enqueue_ops( per_host_sharded_inputs, tpu_ordinal_function=tpu_ordinal_function_impl) return per_host_enqueue_ops return enqueue_ops_fn, captured_infeed_queue def generate_per_host_enqueue_ops_fn_for_host( ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id): """Generates infeed enqueue ops for per-host input_fn on a single host.""" captured_infeed_queue = _CapturedObject() dataset_initializer = None with ops.device(device): user_context = tpu_context.TPUContext( internal_ctx=ctx, input_device=device, invocation_index=host_id) inputs = _Inputs.from_input_fn(input_fn(user_context)) is_dataset = inputs.is_dataset if ctx.mode == model_fn_lib.ModeKeys.PREDICT: if not is_dataset: raise TypeError( 'For mode PREDICT, `input_fn` must return `Dataset` instead of ' '`features` and `labels`.') if batch_axis is not None: raise TypeError('For mode PREDICT, batch_axis is not supported yet.') inputs = _InputsWithStoppingSignals( dataset=inputs.dataset, batch_size=ctx.batch_size_for_input_fn, add_padding=True) if is_dataset: dataset_initializer = inputs.dataset_initializer() tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id) def enqueue_ops_fn(): """A Fn returning the TPU infeed enqueue ops. By providing as a Fn, it can be invoked inside the tf.while_loop such that the input pipeline for multiple iterations can be executed by one Session.run call. Returns: list of dict of ops. """ with ops.device(device): num_of_replicas_per_host = ctx.num_of_replicas_per_host # Convert user input to features and labels. If the user returns a # dataset, it is initialized and the features and labels extracted via # `dataset.iterator.get_next()` features, labels = inputs.features_and_labels() signals = inputs.signals() inputs_structure_recorder.validate_and_record_structure(features, labels) unsharded_tensor_list = ( inputs_structure_recorder.flatten_features_and_labels( features, labels, signals)) infeed_queue = tpu_feed.InfeedQueue( tuple_types=[t.dtype for t in unsharded_tensor_list], tuple_shapes=[t.shape for t in unsharded_tensor_list], shard_dimensions=batch_axis) captured_infeed_queue.capture(infeed_queue) infeed_queue.set_number_of_shards(num_of_replicas_per_host) per_host_enqueue_ops = ( infeed_queue.split_inputs_and_generate_enqueue_ops( unsharded_tensor_list, placement_function=lambda x: device, tpu_ordinal_function=tpu_ordinal_function_impl)) if signals is None: return per_host_enqueue_ops else: return { 'ops': per_host_enqueue_ops, 'signals': signals, } return enqueue_ops_fn, captured_infeed_queue, dataset_initializer def generate_per_host_v2_enqueue_ops_fn_for_host( ctx, input_fn, inputs_structure_recorder, device, host_id): """Generates infeed enqueue ops for per-host input_fn on a single host.""" captured_infeed_queue = _CapturedObject() dataset_initializer = None with ops.device(device): user_context = tpu_context.TPUContext( internal_ctx=ctx, input_device=device, invocation_index=host_id) inputs = _Inputs.from_input_fn(input_fn(user_context)) is_dataset = inputs.is_dataset if not is_dataset: raise TypeError('`input_fn` must return a `Dataset` for the PER_HOST_V2 ' 'input pipeline configuration.') if ctx.mode == model_fn_lib.ModeKeys.PREDICT: inputs = _InputsWithStoppingSignals( dataset=inputs.dataset, batch_size=ctx.batch_size_for_input_fn, add_padding=True, num_invocations_per_step=ctx.num_of_replicas_per_host) dataset_initializer = inputs.dataset_initializer() tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id) def enqueue_ops_fn(): """Generates the per_host enqueue ops.""" control_deps = [] per_host_sharded_inputs = [] num_replicas_per_host = ctx.num_of_replicas_per_host cached_signals = None with ops.device(device): if not inputs.is_dataset: raise TypeError('`input_fn` must return a `Dataset` for this mode.') for _ in range(num_replicas_per_host): # Use control dependencies to ensure a deterministic ordering. with ops.control_dependencies(control_deps): features, labels = inputs.features_and_labels() # Calls get_next() signals = inputs.signals() # All the replicas share the replica 0's stopping singal. # This avoids inconsistent state among different model replcias. if cached_signals: signals['stopping'] = cached_signals['stopping'] else: cached_signals = signals inputs_structure_recorder.validate_and_record_structure( features, labels) flattened_inputs = ( inputs_structure_recorder.flatten_features_and_labels( features, labels, signals)) control_deps.extend(flattened_inputs) per_host_sharded_inputs.append(flattened_inputs) if inputs_structure_recorder.flattened_input_dims: input_partition_dims = inputs_structure_recorder.flattened_input_dims if signals: input_partition_dims += [None] * len(signals) # pylint: disable=protected-access infeed_queue = tpu_feed._PartitionedInfeedQueue( number_of_tuple_elements=len(per_host_sharded_inputs[0]), host_id=host_id, input_partition_dims=input_partition_dims, device_assignment=ctx.device_assignment) per_host_enqueue_ops = infeed_queue.generate_enqueue_ops( per_host_sharded_inputs) else: infeed_queue = tpu_feed.InfeedQueue( number_of_tuple_elements=len(per_host_sharded_inputs[0])) per_host_enqueue_ops = infeed_queue.generate_enqueue_ops( per_host_sharded_inputs, tpu_ordinal_function=tpu_ordinal_function_impl) captured_infeed_queue.capture(infeed_queue) if signals is None: return per_host_enqueue_ops else: return { 'ops': per_host_enqueue_ops, 'signals': signals, } return enqueue_ops_fn, captured_infeed_queue, dataset_initializer def generate_broadcast_enqueue_ops_fn(ctx, input_fn, inputs_structure_recorder, num_hosts): """Generates infeed enqueue ops for one input_fn on all the hosts.""" captured_infeed_queue = _CapturedObject() dataset_initializer = None device_0 = ctx.tpu_host_placement_function(host_id=0) with ops.device(device_0): user_context = tpu_context.TPUContext( internal_ctx=ctx, input_device=device_0, invocation_index=0) inputs = _Inputs.from_input_fn(input_fn(user_context)) is_dataset = inputs.is_dataset if ctx.mode == model_fn_lib.ModeKeys.PREDICT: if not is_dataset: raise TypeError( 'For mode PREDICT, `input_fn` must return `Dataset` instead of ' '`features` and `labels`.') inputs = _InputsWithStoppingSignals( dataset=inputs.dataset, batch_size=ctx.batch_size_for_input_fn, add_padding=True) if is_dataset: dataset_initializer = inputs.dataset_initializer() num_replicas_per_host = ctx.num_of_replicas_per_host def tpu_ordinal_function_impl(replica_id): if ctx.device_assignment: return ctx.device_assignment.tpu_ordinal(replica=replica_id) else: return replica_id % num_replicas_per_host def device_function_impl(replica_id): return ctx.tpu_host_placement_function(replica_id=replica_id) def enqueue_ops_fn(): """Generates enqueue ops for all the hosts.""" broadcasted_inputs = [] flattened_inputs = None # Cache result from input_fn. signals = None for host_id in xrange(num_hosts): with ops.device(ctx.tpu_host_placement_function(host_id=host_id)): for _ in xrange(ctx.num_of_replicas_per_host): # Note: input_fn is only called once at host 0 for the first replica. # The features and labels returned from that invocation are # broadcasted to other replicas(including the replicas on other # hosts). if flattened_inputs is None: features, labels = inputs.features_and_labels() # Calls get_next() signals = inputs.signals() inputs_structure_recorder.validate_and_record_structure( features, labels) flattened_inputs = ( inputs_structure_recorder.flatten_features_and_labels( features, labels, signals)) broadcasted_inputs.append(flattened_inputs) infeed_queue = tpu_feed.InfeedQueue( number_of_tuple_elements=len(broadcasted_inputs[0])) captured_infeed_queue.capture(infeed_queue) enqueue_ops = infeed_queue.generate_enqueue_ops( broadcasted_inputs, tpu_ordinal_function=tpu_ordinal_function_impl, placement_function=device_function_impl) if signals is None: return enqueue_ops else: return { 'ops': enqueue_ops, 'signals': signals, } return enqueue_ops_fn, captured_infeed_queue, dataset_initializer class _InputPipeline(object): """`_InputPipeline` handles invoking `input_fn` and piping to infeed queue. `_InputPipeline` abstracts the per-core/per-host `input_fn` invocation from call site. To be precise, based on the configuration in `_InternalTPUContext`, it invokes `input_fn` for all cores (usually multi-host TPU training) or for one host (usually for single-host TPU evaluation), and sends all `features` and `labels` returned by `input_fn` to TPU infeed. For per-core invocation, `features` and `labels` are piped to infeed directly, one tuple for each core. For per-host invocation, `features` and `labels` are split at host (with respect to `batch_axis`) and piped to all cores accordingly. In addition, flatten/unflatten are handled by `_InputPipeline` also. Model inputs returned by the `input_fn` can have one of the following forms: 1. features 2. (features, labels) 3. ((arbitrarily nested structure of features), labels) Internally, form 1 is reformed to `(features, None)` as features and labels are passed separately to underlying methods. For TPU training, TPUEstimator may expect multiple `features` and `labels` tuples one for each core. TPUEstimator allows various different structures for inputs (namely `features` and `labels`). Both `features` and `labels` can be any nested sturcture supported by TF nest (namely, dict, tuples, namedtuples or any nested structure of such of Tensors). `labels` could be `None` as well. These are flattened before they are passed to the infeed/outfeed library as that expectes flattend lists. """ class InputsStructureRecorder(object): """The recorder to record inputs structure.""" def __init__(self, input_partition_dims=None): # Holds the structure of inputs self._feature_structure = {} self._flattened_input_dims = None if input_partition_dims: # This should have been validated in TPUConfig. assert len(input_partition_dims) <= 2, 'must have 1 or 2 elements.' if len(input_partition_dims) == 2: self._feature_dims, self._label_dims = input_partition_dims else: self._feature_dims = input_partition_dims[0] self._label_dims = None assert self._feature_dims is not None, ('input_partition_dims[0] must ' 'not be None') else: self._feature_dims = None self._label_dims = None # Internal state. self._initialized = False @property def flattened_input_dims(self): assert self._initialized, 'InputsStructureRecorder is not initialized.' return self._flattened_input_dims def has_labels(self): return 'labels' in self._feature_structure def _flatten_input_dims(self, feature_dims, feature_dims_names, label_dims, label_dims_names, label_names, has_labels): """Flatten input dims with the same order as flattened input tensors.""" flattened_input_dims = [] if feature_dims_names: # We need a fixed ordering for matching the tensors in features. flattened_input_dims.extend( [feature_dims[name] for name in feature_dims_names]) else: flattened_input_dims.append(feature_dims) if label_dims_names: # We need a fixed ordering for matching the tensors in labels. flattened_input_dims.extend( [label_dims[name] for name in label_dims_names]) else: if label_names: num_tensors_in_label = len(label_names) else: num_tensors_in_label = int(has_labels) # Setting `None` in input_partition_dims[1] will apply `None` to # all the tensors in labels, regardless of internal structure. flattened_input_dims.extend([label_dims] * num_tensors_in_label) return flattened_input_dims def validate_and_record_structure(self, features, labels): """Validates and records the structure of `features` and `labels`.""" # Extract structure. has_labels = labels is not None feature_names = _extract_key_names(features) label_names = _extract_key_names(labels) if not self._initialized: # Record structure. self._initialized = True if self._feature_dims is not None: feature_dims_names = _extract_key_names(self._feature_dims) if feature_dims_names != feature_names: raise ValueError( 'TPUConfig.input_partition_dims[0] mismatched feature' ' keys. Expected {}, got {}'.format(feature_names, feature_dims_names)) label_dims_names = _extract_key_names(self._label_dims) if self._label_dims is not None and label_dims_names != label_names: raise ValueError( 'TPUConfig.input_partition_dims[1] mismatched label' ' keys. Expected {}, got {}'.format(label_names, label_dims_names)) self._flattened_input_dims = self._flatten_input_dims( self._feature_dims, feature_dims_names, self._label_dims, label_dims_names, label_names, has_labels) def flatten_features_and_labels(self, features, labels, signals=None): """Flattens the `features` and `labels` to a single tensor list.""" self._feature_structure['features'] = features if labels is not None: self._feature_structure['labels'] = labels if signals is not None: self._feature_structure['signals'] = signals return data_nest.flatten(self._feature_structure) def unflatten_features_and_labels(self, flattened_inputs): """Restores the flattened inputs to original features and labels form. Args: flattened_inputs: Flattened inputs for each shard. Returns: A tuple of (`features`, `labels`), where `labels` could be None. Each one, if present, should have identical structure (single tensor vs dict) as the one returned by input_fn. Raises: ValueError: If the number of expected tensors from `flattened_inputs` mismatches the recorded structure. """ unflattened_inputs = data_nest.pack_sequence_as(self._feature_structure, flattened_inputs) return _Inputs( unflattened_inputs['features'], unflattened_inputs.get('labels'), signals=unflattened_inputs.get('signals')) def __init__(self, input_fn, batch_axis, ctx): """Constructor. Args: input_fn: input fn for train or eval. batch_axis: A python tuple of int values describing how each tensor produced by the Estimator `input_fn` should be split across the TPU compute shards. ctx: A `_InternalTPUContext` instance with mode. Raises: ValueError: If both `sharded_features` and `num_cores` are `None`. """ self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder( ctx.input_partition_dims) self._sharded_per_core = ctx.is_input_sharded_per_core() self._input_fn = input_fn self._infeed_queue = None self._ctx = ctx self._batch_axis = batch_axis def generate_infeed_enqueue_ops_and_dequeue_fn(self): """Generates infeed enqueue ops and dequeue_fn.""" # While tf.while_loop is called, the body function, which invokes # `enqueue_fn` passed in, is called to construct the graph. So, input_fn # structure is recorded. enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = ( self._invoke_input_fn_and_record_structure()) self._validate_input_pipeline() def dequeue_fn(): """dequeue_fn is used by TPU to retrieve the tensors.""" # In the model-parallel case, both the host-side and device-side # computations must agree on the core on which infeed takes place. We # choose to perform infeed on logical core 0 of each replica. values = self._infeed_queue.generate_dequeue_op(tpu_device=0) # The unflatten process uses the structure information recorded above. return self._inputs_structure_recorder.unflatten_features_and_labels( values) return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator) def _invoke_input_fn_and_record_structure(self): """Deploys the input pipeline and record input structure.""" enqueue_ops = [] infeed_queues = [] all_dataset_initializers = [] num_hosts = self._ctx.num_hosts tpu_host_placement_fn = self._ctx.tpu_host_placement_function run_infeed_loop_on_coordinator = True if self._sharded_per_core: # Per-Core input pipeline deployment. # Invoke input pipeline for each core and placed on the corresponding # host. for host_id in range(num_hosts): host_device = tpu_host_placement_fn(host_id=host_id) with ops.device(host_device): with ops.name_scope('input_pipeline_task%d' % (host_id)): enqueue_ops_fn, captured_infeed_queue = ( generate_per_core_enqueue_ops_fn_for_host( self._ctx, self._input_fn, self._inputs_structure_recorder, host_device, host_id)) if _WRAP_INPUT_FN_INTO_WHILE_LOOP: run_infeed_loop_on_coordinator = False enqueue_ops.append( _wrap_computation_in_while_loop( device=host_device, op_fn=enqueue_ops_fn)) else: enqueue_ops.append(enqueue_ops_fn()) # Infeed_queue_getter must be called after enqueue_ops_fn is called. infeed_queues.append(captured_infeed_queue.get()) elif self._ctx.is_input_broadcast_with_iterators(): # Only calls input_fn in host 0. host_device = tpu_host_placement_fn(host_id=0) enqueue_ops_fn, captured_infeed_queue, dataset_initializer = ( generate_broadcast_enqueue_ops_fn(self._ctx, self._input_fn, self._inputs_structure_recorder, num_hosts)) if dataset_initializer: all_dataset_initializers.append(dataset_initializer) run_infeed_loop_on_coordinator = False wrap_fn = ( _wrap_computation_in_while_loop if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else _wrap_computation_in_while_loop_with_stopping_signals) enqueue_ops.append(wrap_fn(device=host_device, op_fn=enqueue_ops_fn)) else: enqueue_ops.append(enqueue_ops_fn()) infeed_queues.append(captured_infeed_queue.get()) else: for host_id in range(num_hosts): host_device = tpu_host_placement_fn(host_id=host_id) with ops.device(host_device): with ops.name_scope('input_pipeline_task%d' % (host_id)): if self._ctx.is_input_per_host_with_iterators(): enqueue_ops_fn, captured_infeed_queue, dataset_initializer = ( generate_per_host_v2_enqueue_ops_fn_for_host( self._ctx, self._input_fn, self._inputs_structure_recorder, host_device, host_id)) else: enqueue_ops_fn, captured_infeed_queue, dataset_initializer = ( generate_per_host_enqueue_ops_fn_for_host( self._ctx, self._input_fn, self._inputs_structure_recorder, self._batch_axis, host_device, host_id)) # NOTE(xiejw): We dispatch here based on the return type of the # users `input_fn`. # # 1. If input_fn returns a Dataset instance, we initialize the # iterator outside of tf.while_loop, and call the iterator.get_next # inside tf.while_loop. This should be always safe. # # 2. If input_fn returns (features, labels), it is too late to wrap # them inside tf.while_loop, as resource initialization cannot be # handled in TF control flow properly. In this case, we will use # python loop to enqueue the data into TPU system. This may be # slow compared to the previous case. if dataset_initializer: all_dataset_initializers.append(dataset_initializer) run_infeed_loop_on_coordinator = False wrap_fn = ( _wrap_computation_in_while_loop if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else _wrap_computation_in_while_loop_with_stopping_signals) enqueue_ops.append( wrap_fn(device=host_device, op_fn=enqueue_ops_fn)) else: enqueue_ops.append(enqueue_ops_fn()) infeed_queues.append(captured_infeed_queue.get()) # infeed_queue is used to generate dequeue ops. The only thing it uses for # dequeue is dtypes and types. So, any one can be used. Here, grab the # first one. self._infeed_queue = infeed_queues[0] return enqueue_ops, [ util_lib.MultiHostDatasetInitializerHook(all_dataset_initializers) ], run_infeed_loop_on_coordinator def _validate_input_pipeline(self): """Validates the input pipeline. Perform some sanity checks to log user friendly information. We should error out to give users better error message. But, if _WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break user code, so, log a warning. Raises: RuntimeError: If the validation failed. """ if ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS): err_msg = ('Input pipeline contains one or more QueueRunners. ' 'It could be slow and not scalable. Please consider ' 'converting your input pipeline to use `tf.data` instead (see ' 'https://www.tensorflow.org/guide/datasets for ' 'instructions.') if _WRAP_INPUT_FN_INTO_WHILE_LOOP: raise RuntimeError(err_msg) else: logging.warn(err_msg) class _ModelFnWrapper(object): """A `model_fn` wrapper. This makes calling model_fn on CPU and TPU easier and more consistent and performs necessary check and mutation required by TPU training and evaluation. In addition, this wrapper manages converting the `model_fn` to a single TPU train and eval step. """ def __init__(self, model_fn, train_cache_fn, eval_cache_fn, config, params, ctx): self._model_fn = model_fn self._train_cache_fn = train_cache_fn self._eval_cache_fn = eval_cache_fn self._config = config self._params = params self._ctx = ctx def call_without_tpu(self, features, labels, is_export_mode): return self._call_model_fn(features, labels, is_export_mode=is_export_mode) def convert_to_single_tpu_train_step(self, dequeue_fn): """Converts user provided model_fn` as a single train step on TPU. The user provided `model_fn` takes input tuple (features, labels) and produces the EstimatorSpec with train_op and loss for train `mode`. This usually represents a single train computation on CPU. For TPU training, a train (computation) step is first wrapped in a tf.while_loop control flow to repeat for many times and then replicated to all TPU shards. Besides the input should be taken from TPU infeed rather than input pipeline (input_fn) directly. To fit TPU loop and replicate pattern, the original train computation should be reformed, which is the returned `train_step`. Args: dequeue_fn: The function to retrieve inputs, features and labels, from TPU infeed dequeue channel. Returns: A tuple of train_fn, host_calls, and captured scaffold_fn. The train_fn representing the train step for TPU. """ host_call = _OutfeedHostCall(self._ctx) captured_scaffold_fn = _CapturedObject() captured_training_hooks = _CapturedObject() def train_step(loss, *cache): """Training step function for use inside a while loop.""" del loss # unused; required in function signature. inputs = dequeue_fn() features, labels = inputs.features_and_labels() # Consume the current cache estimator_spec = self._verify_estimator_spec( self._call_model_fn(features, labels, cache=cache)) # Retrieve the new returned cache """ `cache` consists of a list of tensors, potentially empty (of length 0) """ cache = estimator_spec.cache loss, train_op = estimator_spec.loss, estimator_spec.train_op if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access captured_scaffold_fn.capture(estimator_spec.scaffold_fn) else: captured_scaffold_fn.capture(None) captured_training_hooks.capture(estimator_spec.training_hooks) tracing_ops = [] if tensor_tracer.TensorTracer.is_enabled(): tt = tensor_tracer.TensorTracer() loss, tracing_ops = tt.trace_tpu(ops.get_default_graph(), loss, self._ctx.num_replicas) # We must run train_op to update the variables prior to running the # outfeed. with ops.control_dependencies([train_op]+tracing_ops): host_call_outfeed_ops = [] if (isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec) # pylint: disable=protected-access and estimator_spec.host_call is not None): host_call.record({'host_call': estimator_spec.host_call}) host_call_outfeed_ops = host_call.create_enqueue_op() with ops.control_dependencies(host_call_outfeed_ops): return [array_ops.identity(loss)] + cache return (train_step, host_call, captured_scaffold_fn, captured_training_hooks) def convert_to_single_tpu_eval_step(self, dequeue_fn): """Converts user provided model_fn` as a single eval step on TPU. Similar to training, the user provided `model_fn` takes input tuple (features, labels) and produces the TPUEstimatorSpec with eval_metrics for eval `mode`. This usually represents a single evaluation computation on CPU. For TPU evaluation, a eval (computation) step is first wrapped in a tf.while_loop control flow to repeat for many times and then replicated to all TPU shards. Besides the input and output are slightly different. Input, features and labels, should be taken from TPU infeed rather than input pipeline (input_fn) directly. Output is managed in two stages. First, the model outputs as the result of evaluation computation, usually model logits, should be transferred from TPU system to CPU. Then, all model outputs are concatenated first on CPU and sent to the metric_fn for metrics computation. To fit TPU evaluation pattern, the original eval computation should be reformed, which is the returned `eval_step`. Args: dequeue_fn: The function to retrieve inputs, features and labels, from TPU infeed dequeue channel. Returns: A tuple of eval_fn, host_calls, and captured scaffold_fn. The eval_fn representing the eval step for TPU. """ host_calls = _OutfeedHostCall(self._ctx) captured_scaffold_fn = _CapturedObject() captured_eval_hooks = _CapturedObject() def eval_step(total_loss, *cache): """Evaluation step function for use inside a while loop.""" inputs = dequeue_fn() features, labels = inputs.features_and_labels() # Consume the current cache tpu_estimator_spec = self._call_model_fn(features, labels, cache=cache) if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access raise RuntimeError( 'estimator_spec used by TPU evaluation must have type' '`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec))) # Retrieve the new returned cache cache = tpu_estimator_spec.cache loss = tpu_estimator_spec.loss captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn) captured_eval_hooks.capture(tpu_estimator_spec.evaluation_hooks) to_record = {} if tpu_estimator_spec.eval_metrics: to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics if tpu_estimator_spec.host_call is not None: # We assume that evaluate won't update global step, so we don't wrap # this host_call. to_record['host_call'] = tpu_estimator_spec.host_call host_calls.record(to_record) with ops.control_dependencies(host_calls.create_enqueue_op()): return [math_ops.add(total_loss, loss)] + cache return eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks def convert_to_single_tpu_predict_step(self, dequeue_fn): """Converts user provided model_fn` as a single predict step on TPU. Args: dequeue_fn: The function to retrieve inputs, features and labels, from TPU infeed dequeue channel. Returns: A tuple of predict_fn, host_calls, and captured scaffold_fn. The predict_fn representing the predict step for TPU. """ host_calls = _OutfeedHostCall(self._ctx) captured_scaffold_fn = _CapturedObject() captured_predict_hooks = _CapturedObject() def predict_step(unused_scalar_stopping_signal): """Evaluation step function for use inside a while loop.""" inputs = dequeue_fn() features, labels = inputs.features_and_labels() stopping_signals = inputs.signals() assert stopping_signals is not None, ( 'Internal Error: `signals` is missing.') tpu_estimator_spec = self._call_model_fn( features, labels, is_export_mode=False) if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access raise RuntimeError( 'estimator_spec used by TPU prediction must have type' '`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec))) self._verify_tpu_spec_predictions(tpu_estimator_spec.predictions) captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn) captured_predict_hooks.capture(tpu_estimator_spec.prediction_hooks) to_record = {} identity_fn = lambda **kwargs: kwargs to_record['predictions'] = [identity_fn, tpu_estimator_spec.predictions] to_record['signals'] = [identity_fn, stopping_signals] if tpu_estimator_spec.host_call is not None: to_record['host_call'] = tpu_estimator_spec.host_call host_calls.record(to_record) with ops.control_dependencies(host_calls.create_enqueue_op()): return _StopSignals.as_scalar_stopping_signal(stopping_signals) return (predict_step, host_calls, captured_scaffold_fn, captured_predict_hooks) def _verify_tpu_spec_predictions(self, predictions): """Validates TPUEstimatorSpec.predictions dict.""" # TODO(xiejw): Adds validation for prediction dictionrary. # TODO(xiejw): Adds support for single tensor as predictions. if not isinstance(predictions, dict): raise TypeError('TPUEstimatorSpec.predictions must be dict of Tensors.') for (key, tensor) in predictions.items(): if tensor.shape.dims[0].value is None: raise ValueError( 'The tensor with key ({}) in TPUEstimatorSpec.predictions has ' 'dynamic shape (should be static). Tensor: {}'.format(key, tensor)) return predictions def _validate_model_features_and_labels(self, features, labels, is_export_mode): """Validates that the features and labels for the model function are valid. A valid features/labels object is the one with: - Type: A tensor or any nested structure of tensors supported by TF nest, namely nested dictionary, tuple, namedtuple, or sequence of tensors. - Static shape if is_export_mode is False. Args: features: the features that would be input to the model function. labels: the labels that would be input to the model function. is_export_mode: boolean value specifying if in export mode. Raises: TypeError: If features/labels are not of the correct type. ValueError: If features/labels have dynamic shape. """ def validate(obj, obj_name): """Helper validate function.""" if is_export_mode or self._ctx.is_running_on_cpu(is_export_mode): return if isinstance(obj, ops.Tensor): if not obj.get_shape().is_fully_defined(): raise ValueError( 'The {} to the model returned by input_fn must have static shape.' ' Tensor: {}'.format(obj_name, obj)) else: for tensor in data_nest.flatten(obj): if not tensor.get_shape().is_fully_defined(): raise ValueError( ('The {} to the model returned by input_fn must have static ' 'shape. Tensor: {}').format(obj_name, tensor)) validate(features, 'features') if labels is not None: validate(labels, 'labels') def _call_model_fn(self, features, labels, cache=None, is_export_mode=False): """Calls the model_fn with required parameters.""" self._validate_model_features_and_labels(features, labels, is_export_mode) model_fn_args = function_utils.fn_args(self._model_fn) kwargs = {} # Makes deep copy with `config` and params` in case user mutates them. config = copy.deepcopy(self._config) params = copy.deepcopy(self._params) if 'labels' in model_fn_args: kwargs['labels'] = labels elif labels is not None: raise ValueError( 'model_fn does not take labels, but input_fn returns labels.') if 'mode' in model_fn_args: kwargs['mode'] = self._ctx.mode if 'config' in model_fn_args: kwargs['config'] = config if 'params' in model_fn_args: kwargs['params'] = params if cache is not None: params['cache'] = cache if 'params' not in model_fn_args: raise ValueError('model_fn ({}) does not include params argument, ' 'required by TPUEstimator to pass batch size as ' 'params[\'batch_size\']'.format(self._model_fn)) if is_export_mode: batch_size_for_model_fn = None else: batch_size_for_model_fn = self._ctx.batch_size_for_model_fn if batch_size_for_model_fn is not None: _add_item_to_params(params, _BATCH_SIZE_KEY, batch_size_for_model_fn) running_on_cpu = self._ctx.is_running_on_cpu(is_export_mode) _add_item_to_params(params, _USE_TPU_KEY, not running_on_cpu) if not running_on_cpu: user_context = tpu_context.TPUContext( internal_ctx=self._ctx, call_from_input_fn=False) _add_item_to_params(params, _CTX_KEY, user_context) estimator_spec = self._model_fn(features=features, **kwargs) if (running_on_cpu and isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec)): # pylint: disable=protected-access # The estimator_spec will be passed to `Estimator` directly, which expects # type `EstimatorSpec`. return estimator_spec.as_estimator_spec() else: return estimator_spec def _verify_estimator_spec(self, estimator_spec): """Validates the estimator_spec.""" if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access return estimator_spec err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.' if estimator_spec.training_chief_hooks: raise ValueError( err_msg.format('training_chief_hooks') + 'If you want' + ' to pass training hooks, please pass via training_hooks.') if estimator_spec.scaffold: logging.warning('EstimatorSpec.Scaffold is ignored by TPU train/eval. ' 'Please use TPUEstimatorSpec.') return estimator_spec class _OutfeedHostCall(object): """Support for `eval_metrics` and `host_call` in TPUEstimatorSpec.""" def __init__(self, ctx): self._ctx = ctx self._names = [] # All of these are dictionaries of lists keyed on the name. self._host_fns = {} self._tensor_keys = collections.defaultdict(list) self._tensors = collections.defaultdict(list) self._tensor_dtypes = collections.defaultdict(list) self._tensor_shapes = collections.defaultdict(list) @staticmethod def validate(host_calls): """Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`.""" for name, host_call in host_calls.items(): if not isinstance(host_call, (tuple, list)): raise ValueError('{} should be tuple or list'.format(name)) if len(host_call) != 2: raise ValueError('{} should have two elements.'.format(name)) if not callable(host_call[0]): raise TypeError('{}[0] should be callable.'.format(name)) if not isinstance(host_call[1], (tuple, list, dict)): raise ValueError('{}[1] should be tuple or list, or dict.'.format(name)) if isinstance(host_call[1], (tuple, list)): fullargspec = tf_inspect.getfullargspec(host_call[0]) fn_args = function_utils.fn_args(host_call[0]) # wrapped_hostcall_with_global_step uses varargs, so we allow that. if fullargspec.varargs is None and len(host_call[1]) != len(fn_args): raise RuntimeError( 'In TPUEstimatorSpec.{}, length of tensors {} does not match ' 'method args of the function, which takes {}.'.format( name, len(host_call[1]), len(fn_args))) @staticmethod def create_cpu_hostcall(host_calls): """Runs on the host_call on CPU instead of TPU when use_tpu=False.""" _OutfeedHostCall.validate(host_calls) ret = {} for name, host_call in host_calls.items(): host_fn, tensors = host_call if isinstance(tensors, (tuple, list)): ret[name] = host_fn(*tensors) else: # Must be dict. try: ret[name] = host_fn(**tensors) except TypeError as e: logging.warning( 'Exception while calling %s: %s. It is likely the tensors ' '(%s[1]) do not match the ' 'function\'s arguments', name, e, name) raise e return ret def record(self, host_calls): """Records the host_call structure.""" for name, host_call in host_calls.items(): host_fn, tensor_list_or_dict = host_call self._names.append(name) self._host_fns[name] = host_fn if isinstance(tensor_list_or_dict, dict): for (key, tensor) in six.iteritems(tensor_list_or_dict): self._tensor_keys[name].append(key) self._tensors[name].append(tensor) self._tensor_dtypes[name].append(tensor.dtype) self._tensor_shapes[name].append(tensor.shape) else: # List or tuple. self._tensor_keys[name] = None for tensor in tensor_list_or_dict: self._tensors[name].append(tensor) self._tensor_dtypes[name].append(tensor.dtype) self._tensor_shapes[name].append(tensor.shape) def create_enqueue_op(self): """Create the op to enqueue the recorded host_calls. Returns: A list of enqueue ops, which is empty if there are no host calls. """ if not self._names: return [] tensors = [] # TODO(jhseu): Consider deduping tensors. for name in self._names: tensors.extend(self._tensors[name]) with ops.device(tpu.core(0)): return [tpu_ops.outfeed_enqueue_tuple(tensors)] def create_tpu_hostcall(self): """Sends the tensors through outfeed and runs the host_fn on CPU. The tensors are concatenated along dimension 0 to form a global tensor across all shards. The concatenated function is passed to the host_fn and executed on the first host. Returns: A dictionary mapping name to the return type of the host_call by that name. Raises: RuntimeError: If outfeed tensor is scalar. """ if not self._names: return {} ret = {} # For each i, dequeue_ops[i] is a list containing the tensors from all # shards. This list is concatenated later. dequeue_ops = [] tensor_dtypes = [] tensor_shapes = [] for name in self._names: for _ in self._tensors[name]: dequeue_ops.append([]) for dtype in self._tensor_dtypes[name]: tensor_dtypes.append(dtype) for shape in self._tensor_shapes[name]: tensor_shapes.append(shape) # Outfeed ops execute on each replica's first logical core. Note: we must # constraint it such that we have at most one outfeed dequeue and enqueue # per replica. for i in xrange(self._ctx.num_replicas): host_device, ordinal_id = self._ctx.device_for_replica(i) with ops.device(host_device): outfeed_tensors = tpu_ops.outfeed_dequeue_tuple( dtypes=tensor_dtypes, shapes=tensor_shapes, device_ordinal=ordinal_id) for j, item in enumerate(outfeed_tensors): dequeue_ops[j].append(item) # Deconstruct dequeue ops. dequeue_ops_by_name = {} pos = 0 for name in self._names: dequeue_ops_by_name[name] = dequeue_ops[pos:pos + len(self._tensors[name])] pos += len(self._tensors[name]) # It is assumed evaluation always happens on single host TPU system. So, # place all ops on tpu host if possible. # # TODO(jhseu): Evaluate whether this is right for summaries. with ops.device(self._ctx.tpu_host_placement_function(replica_id=0)): for name in self._names: dequeue_ops = dequeue_ops_by_name[name] for i, item in enumerate(dequeue_ops): if dequeue_ops[i][0].shape.ndims == 0: raise RuntimeError( 'All tensors outfed from TPU should preserve batch size ' 'dimension, but got scalar {}'.format(dequeue_ops[i][0])) # TODO(xiejw): Allow users to specify the axis for batch size # dimension. dequeue_ops[i] = array_ops.concat(dequeue_ops[i], axis=0) if self._tensor_keys[name] is not None: # The user-provided eval_metrics[1] is a dict. dequeue_ops = dict(zip(self._tensor_keys[name], dequeue_ops)) try: ret[name] = self._host_fns[name](**dequeue_ops) except TypeError as e: logging.warning( 'Exception while calling %s: %s. It is likely the tensors ' '(%s[1]) do not match the ' 'function\'s arguments', name, e, name) raise e else: ret[name] = self._host_fns[name](*dequeue_ops) return ret class _OutfeedHostCallHook(session_run_hook.SessionRunHook): """Hook to run host calls when use_tpu=False.""" def __init__(self, tensors): self._tensors = tensors def begin(self): # We duplicate this code from the TPUInfeedOutfeedSessionHook rather than # create a separate hook to guarantee execution order, because summaries # need to be initialized before the outfeed thread starts. # TODO(jhseu): Make a wrapper hook instead? self._init_ops = contrib_summary.summary_writer_initializer_op() # Get all the writer resources from the initializer, so we know what to # flush. self._finalize_ops = [] for op in self._init_ops: self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0])) def after_create_session(self, session, coord): session.run(self._init_ops) def before_run(self, run_context): return basic_session_run_hooks.SessionRunArgs(self._tensors) def end(self, session): session.run(self._finalize_ops) class ExamplesPerSecondHook(basic_session_run_hooks.StepCounterHook): """Calculate and report global_step/sec and examples/sec during runtime.""" def __init__(self, batch_size, every_n_steps=100, every_n_secs=None, output_dir=None, summary_writer=None): self._batch_size = batch_size super(ExamplesPerSecondHook, self).__init__( every_n_steps=every_n_steps, every_n_secs=every_n_secs, output_dir=output_dir, summary_writer=summary_writer) def _log_and_record(self, elapsed_steps, elapsed_time, global_step): global_step_per_sec = elapsed_steps / elapsed_time examples_per_sec = self._batch_size * global_step_per_sec if self._summary_writer is not None: global_step_summary = Summary(value=[ Summary.Value(tag='global_step/sec', simple_value=global_step_per_sec) ]) example_summary = Summary(value=[ Summary.Value(tag='examples/sec', simple_value=examples_per_sec) ]) self._summary_writer.add_summary(global_step_summary, global_step) self._summary_writer.add_summary(example_summary, global_step) logging.info('global_step/sec: %g', global_step_per_sec) logging.info('examples/sec: %g', examples_per_sec) class InstallSignalHandlerHook(session_run_hook.SessionRunHook): """Change SIGINT (CTRL^C) handler to force quit the process. The default behavior often results in hanging processes. The original handler is restored after training/evaluation. """ def __init__(self): self._signal_fn = signal.getsignal(signal.SIGINT) def before_run(self, run_context): signal.signal(signal.SIGINT, signal.SIG_DFL) def end(self, session): signal.signal(signal.SIGINT, self._signal_fn) class TPUEstimator(estimator_lib.Estimator): """Estimator with TPU support. TPUEstimator also supports training on CPU and GPU. You don't need to define a separate `tf.estimator.Estimator`. TPUEstimator handles many of the details of running on TPU devices, such as replicating inputs and models for each core, and returning to host periodically to run hooks. TPUEstimator transforms a global batch size in params to a per-shard batch size when calling the `input_fn` and `model_fn`. Users should specify global batch size in constructor, and then get the batch size for each shard in `input_fn` and `model_fn` by `params['batch_size']`. - For training, `model_fn` gets per-core batch size; `input_fn` may get per-core or per-host batch size depending on `per_host_input_for_training` in `TPUConfig` (See docstring for TPUConfig for details). - For evaluation and prediction, `model_fn` gets per-core batch size and `input_fn` get per-host batch size. Evaluation ========== `model_fn` should return `TPUEstimatorSpec`, which expects the `eval_metrics` for TPU evaluation. However, if eval_on_tpu is False, `model_fn` must return `EstimatorSpec` and the evaluation will execute on CPU or GPU; in this case the following discussion on TPU evaluation does not apply. `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`, where `tensors` could be a list of any nested structure of `Tensor`s (See `TPUEstimatorSpec` for details). `metric_fn` takes the `tensors` and returns a dict from metric string name to the result of calling a metric function, namely a `(metric_tensor, update_op)` tuple. One can set `use_tpu` to `False` for testing. All training, evaluation, and predict will be executed on CPU. `input_fn` and `model_fn` will receive `train_batch_size` or `eval_batch_size` unmodified as `params['batch_size']`. Current limitations: -------------------- 1. TPU evaluation only works on a single host (one TPU worker) except BROADCAST mode. 2. `input_fn` for evaluation should **NOT** raise an end-of-input exception (`OutOfRangeError` or `StopIteration`). And all evaluation steps and all batches should have the same size. Example (MNIST): ---------------- ``` # The metric Fn which runs on CPU. def metric_fn(labels, logits): predictions = tf.argmax(logits, 1) return { 'accuracy': tf.metrics.precision( labels=labels, predictions=predictions), } # Your model Fn which runs on TPU (eval_metrics is list in this example) def model_fn(features, labels, mode, config, params): ... logits = ... if mode = tf.estimator.ModeKeys.EVAL: return tpu_estimator.TPUEstimatorSpec( mode=mode, loss=loss, eval_metrics=(metric_fn, [labels, logits])) # or specify the eval_metrics tensors as dict. def model_fn(features, labels, mode, config, params): ... final_layer_output = ... if mode = tf.estimator.ModeKeys.EVAL: return tpu_estimator.TPUEstimatorSpec( mode=mode, loss=loss, eval_metrics=(metric_fn, { 'labels': labels, 'logits': final_layer_output, })) ``` Prediction ========== Prediction on TPU is an experimental feature to support large batch inference. It is not designed for latency-critical system. In addition, due to some usability issues, for prediction with small dataset, CPU `.predict`, i.e., creating a new `TPUEstimator` instance with `use_tpu=False`, might be more convenient. Note: In contrast to TPU training/evaluation, the `input_fn` for prediction *should* raise an end-of-input exception (`OutOfRangeError` or `StopIteration`), which serves as the stopping signal to `TPUEstimator`. To be precise, the ops created by `input_fn` produce one batch of the data. The `predict()` API processes one batch at a time. When reaching the end of the data source, an end-of-input exception should be raised by one of these operations. The user usually does not need to do this manually. As long as the dataset is not repeated forever, the `tf.data` API will raise an end-of-input exception automatically after the last batch has been produced. Note: Estimator.predict returns a Python generator. Please consume all the data from the generator so that TPUEstimator can shutdown the TPU system properly for user. Current limitations: -------------------- 1. TPU prediction only works on a single host (one TPU worker). 2. `input_fn` must return a `Dataset` instance rather than `features`. In fact, .train() and .evaluate() also support Dataset as return value. Example (MNIST): ---------------- ``` height = 32 width = 32 total_examples = 100 def predict_input_fn(params): batch_size = params['batch_size'] images = tf.random_uniform( [total_examples, height, width, 3], minval=-1, maxval=1) dataset = tf.data.Dataset.from_tensor_slices(images) dataset = dataset.map(lambda images: {'image': images}) dataset = dataset.batch(batch_size) return dataset def model_fn(features, labels, params, mode): # Generate predictions, called 'output', from features['image'] if mode == tf.estimator.ModeKeys.PREDICT: return tf.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions={ 'predictions': output, 'is_padding': features['is_padding'] }) tpu_est = TPUEstimator( model_fn=model_fn, ..., predict_batch_size=16) # Fully consume the generator so that TPUEstimator can shutdown the TPU # system. for item in tpu_est.predict(input_fn=input_fn): # Filter out item if the `is_padding` is 1. # Process the 'predictions' ``` Exporting ========= `export_savedmodel` exports 2 metagraphs, one with `tag_constants.SERVING`, and another with `tag_constants.SERVING` and `tag_constants.TPU`. At serving time, these tags are used to select metagraph to load. Before running the graph on TPU, TPU system needs to be initialized. If TensorFlow Serving model-server is used, this is done automatically. If not, please call `session.run(tpu.initialize_system())`. `tpu.outside_compilation` can be used to wrap TPU incompatible ops in `model_fn`. Example: ---------------- ``` def model_fn(features, labels, mode, config, params): ... logits = ... export_outputs = { 'logits': export_output_lib.PredictOutput( {'logits': logits}) } def host_call(logits): class_ids = math_ops.argmax(logits) classes = string_ops.as_string(class_ids) export_outputs['classes'] = export_output_lib.ClassificationOutput(classes=classes) tpu.outside_compilation(host_call, logits) ... ``` """ def __init__(self, model_fn=None, train_cache_fn=None, eval_cache_fn=None, model_dir=None, config=None, params=None, use_tpu=True, train_batch_size=None, eval_batch_size=None, predict_batch_size=None, batch_axis=None, eval_on_tpu=True, export_to_tpu=True, warm_start_from=None): """Constructs an `TPUEstimator` instance. Args: model_fn: Model function as required by `Estimator` which returns EstimatorSpec or TPUEstimatorSpec. `training_hooks`, 'evaluation_hooks', and `prediction_hooks` must not capure any TPU Tensor inside the model_fn. model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. If `None`, the model_dir in `config` will be used if set. If both are set, they must be same. If both are `None`, a temporary directory will be used. config: An `tpu_config.RunConfig` configuration object. Cannot be `None`. params: An optional `dict` of hyper parameters that will be passed into `input_fn` and `model_fn`. Keys are names of parameters, values are basic python types. There are reserved keys for `TPUEstimator`, including 'batch_size'. use_tpu: A bool indicating whether TPU support is enabled. Currently, - TPU training and evaluation respect this bit, but eval_on_tpu can override execution of eval. See below. - Predict still happens on CPU. train_batch_size: An int representing the global training batch size. TPUEstimator transforms this global batch size to a per-shard batch size, as params['batch_size'], when calling `input_fn` and `model_fn`. Cannot be `None` if `use_tpu` is `True`. Must be divisible by total number of replicas. eval_batch_size: An int representing evaluation batch size. Must be divisible by total number of replicas. predict_batch_size: An int representing the prediction batch size. Must be divisible by total number of replicas. batch_axis: A python tuple of int values describing how each tensor produced by the Estimator `input_fn` should be split across the TPU compute shards. For example, if your input_fn produced (images, labels) where the images tensor is in `HWCN` format, your shard dimensions would be [3, 0], where 3 corresponds to the `N` dimension of your images Tensor, and 0 corresponds to the dimension along which to split the labels to match up with the corresponding images. If None is supplied, and per_host_input_for_training is True, batches will be sharded based on the major dimension. If tpu_config.per_host_input_for_training is False or `PER_HOST_V2`, batch_axis is ignored. eval_on_tpu: If False, evaluation runs on CPU or GPU. In this case, the model_fn must return `EstimatorSpec` when called with `mode` as `EVAL`. export_to_tpu: If True, `export_savedmodel()` exports a metagraph for serving on TPU besides the one on CPU. warm_start_from: Optional string filepath to a checkpoint or SavedModel to warm-start from, or a `tf.estimator.WarmStartSettings` object to fully configure warm-starting. If the string filepath is provided instead of a `WarmStartSettings`, then all variables are warm-started, and it is assumed that vocabularies and Tensor names are unchanged. Raises: ValueError: `params` has reserved keys already. """ if config is None or not isinstance(config, tpu_config.RunConfig): raise ValueError( '`config` must be provided with type `tpu_config.RunConfig`') if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS): raise ValueError('{} are reserved keys but existed in params {}.'.format( _RESERVED_PARAMS_KEYS, params)) if use_tpu: # Perform some very basic validations. More validations will be found in # _InternalTPUContext. if train_batch_size is None: raise ValueError('`train_batch_size` cannot be `None`') util_lib.check_positive_integer(train_batch_size, 'train_batch_size') if (config.tpu_config.per_host_input_for_training is tpu_config.InputPipelineConfig.PER_SHARD_V1 and config.tpu_config.num_cores_per_replica): raise ValueError( 'Model parallelism only supports per host input for training. ' 'Please adjust TPURunconfig.per_host_input_for_training.') if eval_batch_size is not None: util_lib.check_positive_integer(eval_batch_size, 'eval_batch_size') if predict_batch_size is not None: util_lib.check_positive_integer(predict_batch_size, 'predict_batch_size') # Verifies the model_fn signature according to Estimator framework. estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access # We cannot store config and params in this constructor as parent # constructor might change them, such as assigning a temp dir for # config.model_dir. model_function = self._augment_model_fn( model_fn, train_cache_fn, eval_cache_fn, batch_axis) # Overwrite log_step_count_steps to disable TensorLoggingHook and # StepCounterHook from being created in Estimator. TPUEstimator already # added equivalent hooks in _augment_model_fn above. self._log_every_n_steps = config.log_step_count_steps config = config.replace(log_step_count_steps=None) # Passing non-None params as wrapped model_fn has it. params = params or {} super(TPUEstimator, self).__init__( model_fn=model_function, model_dir=model_dir, config=config, params=params, warm_start_from=warm_start_from) self._iterations_per_training_loop = ( self._config.tpu_config.iterations_per_loop) # All properties passed to _InternalTPUContext are immutable. # pylint: disable=protected-access self._ctx = tpu_context._get_tpu_context( self._config, train_batch_size, eval_batch_size, predict_batch_size, use_tpu, eval_on_tpu) self._export_to_tpu = export_to_tpu self._is_input_fn_invoked = None self._rendezvous = {} def _add_meta_graph_for_mode(self, builder, input_receiver_fn_map, checkpoint_path, save_variables=True, mode=model_fn_lib.ModeKeys.PREDICT, export_tags=None, check_variables=True): if self._export_to_tpu and mode != model_fn_lib.ModeKeys.PREDICT: raise NotImplementedError( 'TPUEstimator only handles mode PREDICT for exporting ' 'when `export_to_tpu` is `True`; ' 'got {}.'.format(mode)) (super(TPUEstimator, self)._add_meta_graph_for_mode( builder, input_receiver_fn_map, checkpoint_path, save_variables, mode=mode, export_tags=export_tags, check_variables=check_variables)) if self._export_to_tpu: input_receiver_fn_map = { _REWRITE_FOR_INFERENCE_MODE: input_receiver_fn_map[mode] } export_tags = [tag_constants.SERVING, tag_constants.TPU] mode = _REWRITE_FOR_INFERENCE_MODE # See b/110052256 for why `check_variables` is `False`. (super(TPUEstimator, self)._add_meta_graph_for_mode( builder, input_receiver_fn_map, checkpoint_path, save_variables=False, mode=mode, export_tags=export_tags, check_variables=False)) def _call_model_fn(self, features, labels, mode, config): if mode == _REWRITE_FOR_INFERENCE_MODE: return self._call_model_fn_for_inference(features, labels, mode, config) else: return super(TPUEstimator, self)._call_model_fn(features, labels, mode, config) def _call_model_fn_for_inference(self, features, labels, mode, config): """Wraps `_call_model_fn` for `export_savedmodel`.""" if mode != _REWRITE_FOR_INFERENCE_MODE: raise ValueError('mode must be {}; ' 'got {}.'.format(_REWRITE_FOR_INFERENCE_MODE, mode)) capture = _CapturedObject() def computation(): """Compute tpu tensors used in export_outputs. Passed to rewrite_for_inference so that model_fn will be called under the rewriting contexts. Only tpu tensors are returned, but export_outputs and scaffold are captured. Returns: A list of Tensors used in export_outputs and not marked for outside_compilation. """ # We should only call model fn once and it should be inside `computation` # so that building the graph will happen under `rewrite_for_inference`. mode = model_fn_lib.ModeKeys.PREDICT estimator_spec = self._call_model_fn(features, labels, mode, config) # We pick the TPU tensors out from `export_output` and later return them # from `computation` for rewriting. tensors_dict = collections.OrderedDict( (k, _export_output_to_tensors(v)) for k, v in six.iteritems(estimator_spec.export_outputs)) tensors = nest.flatten(tensors_dict) tpu_tensors = [t for t in tensors if t is not None] # We cannot return anything other than `tpu_tensors` here so we capture # the rest for later use. capture.capture((estimator_spec, tensors_dict, tensors)) return tpu_tensors tpu_tensors_on_cpu = tpu.rewrite_for_inference(computation) estimator_spec, tensors_dict, tensors = capture.get() # Reconstruct `tensors`, but with `tpu_tensors` replaced with # `tpu_tensors_on_cpu`. new_tensors = [] for t in tensors: if t is None: new_tensors.append(None) else: new_tensors.append(tpu_tensors_on_cpu.pop(0)) # Reconstruct `tensors_dict`. new_tensors_dict = nest.pack_sequence_as(tensors_dict, new_tensors) # Reconstruct `export_outputs`. export_outputs = estimator_spec.export_outputs new_export_outputs = collections.OrderedDict( (k, _clone_export_output_with_tensors(export_outputs[k], v)) for k, v in six.iteritems(new_tensors_dict)) return estimator_spec._replace(export_outputs=new_export_outputs) def _create_global_step(self, graph): """Creates a global step suitable for TPUs. Args: graph: The graph in which to create the global step. Returns: A global step `Tensor`. Raises: ValueError: if the global step tensor is already defined. """ return _create_global_step(graph) def _convert_train_steps_to_hooks(self, steps, max_steps): with self._ctx.with_mode(model_fn_lib.ModeKeys.TRAIN) as ctx: if ctx.is_running_on_cpu(): return super(TPUEstimator, self)._convert_train_steps_to_hooks( steps, max_steps) # On TPU. if steps is None and max_steps is None: raise ValueError( 'For TPU training, one of `steps` or `max_steps` must be set. ' 'Cannot be both `None`.') # Estimator.train has explicit positiveness check. if steps is not None: util_lib.check_positive_integer(steps, 'Train steps') if max_steps is not None: util_lib.check_positive_integer(max_steps, 'Train max_steps') return [ _TPUStopAtStepHook(self._iterations_per_training_loop, steps, max_steps) ] def _convert_eval_steps_to_hooks(self, steps): with self._ctx.with_mode(model_fn_lib.ModeKeys.EVAL) as ctx: if ctx.is_running_on_cpu(): return super(TPUEstimator, self)._convert_eval_steps_to_hooks(steps) if steps is None: raise ValueError('Evaluate `steps` must be set on TPU. Cannot be `None`.') util_lib.check_positive_integer(steps, 'Eval steps') return [ evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access num_evals=steps), _SetEvalIterationsHook(steps) ] def _call_input_fn(self, input_fn, mode): """Calls the input function. Args: input_fn: The input function. mode: ModeKeys Returns: In TPU mode, returns an input_fn to be called later in model_fn. Otherwise, calls the input_fn and returns either fatures or (features, labels). Raises: ValueError: if input_fn takes invalid arguments or does not have `params`. """ input_fn_args = function_utils.fn_args(input_fn) config = self.config # a deep copy. kwargs = {} if 'params' in input_fn_args: kwargs['params'] = self.params # a deep copy. else: raise ValueError('input_fn ({}) does not include params argument, ' 'required by TPUEstimator to pass batch size as ' 'params["batch_size"]'.format(input_fn)) if 'config' in input_fn_args: kwargs['config'] = config if 'mode' in input_fn_args: kwargs['mode'] = mode # Records the fact input_fn has been invoked. self._is_input_fn_invoked = True with self._ctx.with_mode(mode) as ctx: # Setting the batch size in params first. This helps user to have same # input_fn for use_tpu=True/False. batch_size_for_input_fn = ctx.batch_size_for_input_fn if batch_size_for_input_fn is not None: _add_item_to_params(kwargs['params'], _BATCH_SIZE_KEY, batch_size_for_input_fn) # For export_savedmodel, input_fn is never passed to Estimator. So, # `is_export_mode` must be False. if ctx.is_running_on_cpu(is_export_mode=False): with ops.device('/device:CPU:0'): return input_fn(**kwargs) # For TPU computation, input_fn should be invoked in a tf.while_loop for # performance. While constructing the tf.while_loop, the structure of # inputs returned by the `input_fn` needs to be recorded. The structure # includes whether features or labels is dict or single Tensor, dict keys, # tensor shapes, and dtypes. The recorded structure is used to create the # infeed dequeue ops, which must be wrapped and passed as a Fn, called # inside the TPU computation, as the TPU computation is wrapped inside a # tf.while_loop also. So, we either pass input_fn to model_fn or pass # dequeue_fn to model_fn. Here, `input_fn` is passed directly as # `features` in `model_fn` signature. def _input_fn(ctx): _add_item_to_params(kwargs['params'], _CTX_KEY, ctx) return input_fn(**kwargs) return _input_fn def _validate_features_in_predict_input(self, result): """Skip the validation. For TPUEstimator, we do not need to check the result type. `_InputPipeline` has stronger check. Parent class's check generates confusing warning msg. Args: result: `features` returned by input_fn. """ pass def train(self, input_fn, hooks=None, steps=None, max_steps=None, saving_listeners=None): rendezvous = error_handling.ErrorRendezvous(num_sources=3) self._rendezvous[model_fn_lib.ModeKeys.TRAIN] = rendezvous try: return super(TPUEstimator, self).train( input_fn=input_fn, hooks=hooks, steps=steps, max_steps=max_steps, saving_listeners=saving_listeners) except Exception: # pylint: disable=broad-except rendezvous.record_error('training_loop', sys.exc_info()) finally: rendezvous.record_done('training_loop') rendezvous.raise_errors() def evaluate(self, input_fn, steps=None, hooks=None, checkpoint_path=None, name=None): rendezvous = error_handling.ErrorRendezvous(num_sources=3) self._rendezvous[model_fn_lib.ModeKeys.EVAL] = rendezvous try: return super(TPUEstimator, self).evaluate( input_fn, steps=steps, hooks=hooks, checkpoint_path=checkpoint_path, name=name) except Exception: # pylint: disable=broad-except rendezvous.record_error('evaluation_loop', sys.exc_info()) finally: rendezvous.record_done('evaluation_loop') rendezvous.raise_errors() def predict(self, input_fn, predict_keys=None, hooks=None, checkpoint_path=None, yield_single_examples=True): rendezvous = error_handling.ErrorRendezvous(num_sources=3) self._rendezvous[model_fn_lib.ModeKeys.PREDICT] = rendezvous try: for result in super(TPUEstimator, self).predict( input_fn=input_fn, predict_keys=predict_keys, hooks=hooks, checkpoint_path=checkpoint_path, yield_single_examples=yield_single_examples): yield result except Exception: # pylint: disable=broad-except rendezvous.record_error('prediction_loop', sys.exc_info()) finally: rendezvous.record_done('prediction_loop') rendezvous.raise_errors() rendezvous.record_done('prediction_loop') rendezvous.raise_errors() def _augment_model_fn(self, model_fn, train_cache_fn, eval_cache_fn, batch_axis): """Returns a new model_fn, which wraps the TPU support.""" def _model_fn(features, labels, mode, config, params): """A Estimator `model_fn` for TPUEstimator.""" with self._ctx.with_mode(mode) as ctx: model_fn_wrapper = _ModelFnWrapper(model_fn, train_cache_fn, eval_cache_fn, config, params, ctx) # `input_fn` is called in `train()`, `evaluate()`, and `predict()`, # but not in `export_savedmodel()`. if self._is_input_fn_invoked: is_export_mode = False else: is_export_mode = True # Clear the bit. self._is_input_fn_invoked = None # examples_hook is added to training_hooks for both CPU and TPU # execution. if self._log_every_n_steps is not None: examples_hook = ExamplesPerSecondHook( ctx.global_batch_size, output_dir=self.model_dir, every_n_steps=self._log_every_n_steps) if ctx.is_running_on_cpu(is_export_mode=is_export_mode): logging.info('Running %s on CPU', mode) estimator_spec = model_fn_wrapper.call_without_tpu( features, labels, is_export_mode=is_export_mode) if self._log_every_n_steps is not None: estimator_spec = estimator_spec._replace( training_hooks=estimator_spec.training_hooks + (examples_hook,)) return estimator_spec assert labels is None, '`labels` passed to `model_fn` must be `None`.' # TPUEstimator._call_input_fn passes `input_fn` as features to here. assert callable(features), '`input_fn` is not callable.' input_fn = features input_holders = _InputPipeline(input_fn, batch_axis, ctx) enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = ( input_holders.generate_infeed_enqueue_ops_and_dequeue_fn()) graph = ops.get_default_graph() for enqueue_op in enqueue_ops: if isinstance(enqueue_op, list): graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op) else: graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op) if mode == model_fn_lib.ModeKeys.TRAIN: compile_op, loss, host_call, scaffold, training_hooks = ( _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn)) host_ops = host_call.create_tpu_hostcall() if host_ops is None: host_ops = [] shutdown_hooks = [] shutdown_mode = os.environ.get('TF_TPU_GRACEFUL_SHUTDOWN_MODE', 'shutdown_worker') if shutdown_mode: if shutdown_mode == 'shutdown_worker': finalizer_hooks = [ session_support.ShutdownLameWorkers(timeout_ms=60 * 1000), ] elif shutdown_mode == 'shutdown_computation': finalizer_hooks = [ session_support.RestartComputation(timeout_ms=60 * 1000), ] else: raise ValueError( 'Unknown TF_TPU_GRACEFUL_SHUTDOWN_MODE "%s"' % shutdown_mode) shutdown_hooks.append( session_support.GracefulShutdownHook( checkpoint_prefix=self.model_dir + '/model.ckpt', on_shutdown_hooks=finalizer_hooks)) with ops.control_dependencies([loss]): global_step = array_ops.identity(training.get_global_step()) hooks = input_hooks + shutdown_hooks hooks.extend([ TPUInfeedOutfeedSessionHook( ctx, enqueue_ops, host_ops, tpu_compile_op=compile_op, run_infeed_loop_on_coordinator=( run_infeed_loop_on_coordinator), rendezvous=self._rendezvous[mode], master=self._config.master, session_config=self._session_config, ), InstallSignalHandlerHook() ]) if self._log_every_n_steps is not None: logging_hook_frequency = ( # Divide and round up (self._log_every_n_steps + self._config.tpu_config.iterations_per_loop - 1) // self._config.tpu_config.iterations_per_loop) hooks.append( training.LoggingTensorHook({ 'loss': array_ops.identity(loss), 'step': global_step, }, every_n_iter=logging_hook_frequency)) examples_hook._set_steps_per_run( # pylint: disable=protected-access self._config.tpu_config.iterations_per_loop) hooks.append(examples_hook) if training_hooks: hooks.extend(training_hooks) chief_hooks = [] if (self._config.save_checkpoints_secs or self._config.save_checkpoints_steps): checkpoint_hook = training.CheckpointSaverHook( self.model_dir, save_secs=self._config.save_checkpoints_secs, save_steps=self._config.save_checkpoints_steps, scaffold=scaffold) checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access self._config.tpu_config.iterations_per_loop) chief_hooks.append(checkpoint_hook) summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss) with ops.control_dependencies([loss]): update_ops = _sync_variables_ops(ctx) # Validate the TPU training graph to catch basic errors _validate_tpu_training_graph() train_op = control_flow_ops.group(*update_ops) graph.add_to_collection(_TPU_TRAIN_OP, train_op) return model_fn_lib.EstimatorSpec( mode, loss=loss, training_chief_hooks=chief_hooks, training_hooks=hooks, train_op=train_op, scaffold=scaffold) if mode == model_fn_lib.ModeKeys.EVAL: compile_op, total_loss, host_calls, scaffold, eval_hooks = ( _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn)) iterations_per_loop_var = _create_or_get_iterations_per_loop() mean_loss = math_ops.div( total_loss, math_ops.cast(iterations_per_loop_var, dtype=total_loss.dtype)) with ops.control_dependencies([mean_loss]): # After TPU evaluation computation is done (the mean_loss tensor), # reads all variables back from TPU and updates the eval step # counter properly internal_ops_to_run = _sync_variables_ops(ctx) internal_ops_to_run.append( _increase_eval_step_op(iterations_per_loop_var)) host_call_ret = host_calls.create_tpu_hostcall() eval_metric_ops = {} eval_update_ops = [] eval_metrics = host_call_ret.get('eval_metrics', {}) if eval_metrics: # Creates a dummy metric update_op for all metrics. Estimator # expects all metrics in `eval_metric_ops` have update_op and calls # them one by one. The real metric update_ops are invoked in a # separated thread. So, here give Estimator the dummy op for all # metrics. with ops.control_dependencies(internal_ops_to_run): dummy_update_op = control_flow_ops.no_op() for k, v in eval_metrics.items(): eval_metric_ops[k] = (v[0], dummy_update_op) eval_update_ops.append(v[1]) else: # If no eval metrics are passed, create an identity node for the # loss and add `internal_ops_to_run` to its dependencies. So # `internal_ops_to_run` can be executed. with ops.control_dependencies(internal_ops_to_run): mean_loss = array_ops.identity(mean_loss) if 'host_call' not in host_call_ret: host_ops = [] else: host_ops = host_call_ret['host_call'] hooks = [ TPUInfeedOutfeedSessionHook( ctx, enqueue_ops, eval_update_ops + host_ops, tpu_compile_op=compile_op, run_infeed_loop_on_coordinator=( run_infeed_loop_on_coordinator), rendezvous=self._rendezvous[mode], master=self._config.evaluation_master, session_config=self._session_config, )] + input_hooks if eval_hooks: hooks.extend(eval_hooks) return model_fn_lib.EstimatorSpec( mode, loss=mean_loss, evaluation_hooks=hooks, eval_metric_ops=eval_metric_ops, scaffold=scaffold) # Predict assert mode == model_fn_lib.ModeKeys.PREDICT (compile_op, dummy_predict_op, host_calls, scaffold, prediction_hooks) = _predict_on_tpu_system( ctx, model_fn_wrapper, dequeue_fn) with ops.control_dependencies([dummy_predict_op]): internal_ops_to_run = _sync_variables_ops(ctx) with ops.control_dependencies(internal_ops_to_run): dummy_predict_op = control_flow_ops.no_op() # In train and evaluation, the main TPU program is passed to monitored # training session to run. Infeed enqueue and outfeed dequeue are # executed in side threads. This is not the configuration for # prediction mode. # # For prediction, the Estimator executes the EstimatorSpec.predictions # directly and yield the element (via generator) to call site. So, the # outfeed based prediction must be passed to MonitoredSession directly. # Other parts of the TPU execution are organized as follows. # # 1. All outfeed based Tensors must be grouped with predictions Tensors # to form a single invocation. This avoid the issue we might trigger # multiple outfeeds incorrectly. To achieve this, `host_call` is # placed in control_dependencies of `stopping_signals`, and # `stopping_signals` is passed into _StoppingPredictHook, which sets # the `stopping_signals` as SessionRunArgs. MonitoredSession merges # all SessionRunArgs with the fetch in session.run together. # # 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue) # are grouped together. They will be launched once and only once in # side threads and they quit naturally according to the SAME stopping # condition. enqueue_ops.append(dummy_predict_op) host_call_ret = host_calls.create_tpu_hostcall() if 'host_call' not in host_call_ret: host_ops = [] else: host_ops = host_call_ret['host_call'] predictions = host_call_ret['predictions'] _verify_cross_hosts_transfer_size( predictions, message=( 'The estimated size for TPUEstimatorSpec.predictions is too ' 'large.')) signals = host_call_ret['signals'] with ops.control_dependencies(host_ops): host_ops = [] # Empty, we do do not need it anymore. scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal( signals) predictions = _PaddingSignals.slice_tensor_or_dict( predictions, signals) hooks = [ _StoppingPredictHook(scalar_stopping_signal), TPUInfeedOutfeedSessionHookForPrediction( ctx, enqueue_ops, host_ops, rendezvous=self._rendezvous[mode], tpu_compile_op=compile_op, master=self._config.master, session_config=self._session_config), ] + input_hooks if prediction_hooks: hooks.extend(prediction_hooks) return model_fn_lib.EstimatorSpec( mode, prediction_hooks=hooks, predictions=predictions, scaffold=scaffold) return _model_fn def _export_output_to_tensors(export_output): """Get a list of `Tensors` used in `export_output`. Args: export_output: an `ExportOutput` object such as `ClassificationOutput`, `RegressionOutput`, or `PredictOutput`. Returns: a list of tensors used in export_output. Raises: ValueError: if `export_output` is not one of `ClassificationOutput`, `RegressionOutput`, or `PredictOutput`. """ if isinstance(export_output, export_output_lib.ClassificationOutput): return [export_output.scores, export_output.classes] elif isinstance(export_output, export_output_lib.RegressionOutput): return [export_output.value] elif isinstance(export_output, export_output_lib.PredictOutput): return list(export_output.outputs.values()) else: raise ValueError( '`export_output` must be have type `ClassificationOutput`, ' '`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output)) def _clone_export_output_with_tensors(export_output, tensors): """Clones `export_output` but with new `tensors`. Args: export_output: an `ExportOutput` object such as `ClassificationOutput`, `RegressionOutput`, or `PredictOutput`. tensors: a list of `Tensors` used to construct a new `export_output`. Returns: A dict similar to `export_output` but with `tensors`. Raises: ValueError: if `export_output` is not one of `ClassificationOutput`, `RegressionOutput`, or `PredictOutput`. """ if isinstance(export_output, export_output_lib.ClassificationOutput): if len(tensors) != 2: raise ValueError('tensors must be of length 2; ' 'got {}.'.format(len(tensors))) return export_output_lib.ClassificationOutput(*tensors) elif isinstance(export_output, export_output_lib.RegressionOutput): if len(tensors) != 1: raise ValueError('tensors must be of length 1; ' 'got {}'.format(len(tensors))) return export_output_lib.RegressionOutput(*tensors) elif isinstance(export_output, export_output_lib.PredictOutput): return export_output_lib.PredictOutput( dict(zip(export_output.outputs.keys(), tensors))) else: raise ValueError( '`export_output` must be have type `ClassificationOutput`, ' '`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output)) def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn): """Executes `model_fn_wrapper` multiple times on all TPU shards.""" iterations_per_loop_var = _create_or_get_iterations_per_loop() (single_tpu_eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks ) = model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn) def multi_tpu_eval_steps_on_single_shard(): loop_vars = [_ZERO_LOSS] if model_fn_wrapper._eval_cache_fn is not None: batch_size = ctx.global_batch_size num_shards = ctx._config._tpu_config.num_shards loop_vars += model_fn_wrapper._eval_cache_fn(batch_size // num_shards) return training_loop.repeat( iterations_per_loop_var, single_tpu_eval_step, loop_vars) compile_op, ret = tpu.split_compile_and_shard( multi_tpu_eval_steps_on_single_shard, inputs=[], num_shards=ctx.num_replicas, outputs_from_all_shards=False, device_assignment=ctx.device_assignment) loss = ret[0] scaffold = _get_scaffold(captured_scaffold_fn) return compile_op, loss, host_calls, scaffold, captured_eval_hooks.get() def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn): """Executes `model_fn_wrapper` multiple times on all TPU shards.""" iterations_per_loop_var = _create_or_get_iterations_per_loop() (single_tpu_train_step, host_call, captured_scaffold_fn, captured_training_hooks) = ( model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn)) def multi_tpu_train_steps_on_single_shard(): loop_vars = [_INITIAL_LOSS] if model_fn_wrapper._train_cache_fn is not None: batch_size = ctx.global_batch_size num_shards = ctx._config._tpu_config.num_shards loop_vars += model_fn_wrapper._train_cache_fn(batch_size // num_shards) return training_loop.repeat( iterations_per_loop_var, single_tpu_train_step, loop_vars) compile_op, ret = tpu.split_compile_and_shard( multi_tpu_train_steps_on_single_shard, inputs=[], num_shards=ctx.num_replicas, outputs_from_all_shards=False, device_assignment=ctx.device_assignment) loss = ret[0] scaffold = _get_scaffold(captured_scaffold_fn) return compile_op, loss, host_call, scaffold, captured_training_hooks.get() def _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn): """Executes `model_fn_wrapper` multiple times on all TPU shards.""" (single_tpu_predict_step, host_calls, captured_scaffold_fn, captured_predict_hooks ) = model_fn_wrapper.convert_to_single_tpu_predict_step(dequeue_fn) def multi_tpu_predict_steps_on_single_shard(): def cond(scalar_stopping_signal): return math_ops.logical_not( _StopSignals.should_stop(scalar_stopping_signal)) inputs = [_StopSignals.NON_STOPPING_SIGNAL] outputs = training_loop.while_loop( cond, single_tpu_predict_step, inputs=inputs, name=b'loop') return outputs (compile_op, dummy_predict_op,) = tpu.split_compile_and_shard( multi_tpu_predict_steps_on_single_shard, inputs=[], num_shards=ctx.num_replicas, outputs_from_all_shards=False, device_assignment=ctx.device_assignment) dummy_predict_op = dummy_predict_op[0] scaffold = _get_scaffold(captured_scaffold_fn) return (compile_op, dummy_predict_op, host_calls, scaffold, captured_predict_hooks.get()) def _wrap_computation_in_while_loop(device, op_fn): """Wraps the ops generated by `op_fn` in tf.while_loop.""" def computation(i): with ops.control_dependencies(op_fn()): return i + 1 iterations_per_loop_var = _create_or_get_iterations_per_loop() # By setting parallel_iterations=1, the parallel execution in while_loop is # basically turned off. with ops.device(device): iterations = array_ops.identity(iterations_per_loop_var) return control_flow_ops.while_loop( lambda i: i < iterations, computation, [constant_op.constant(0)], parallel_iterations=1) def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn): """Wraps the ops generated by `op_fn` in tf.while_loop.""" def cond(scalar_stopping_signal): return math_ops.logical_not( _StopSignals.should_stop(scalar_stopping_signal)) def computation(unused_scalar_stopping_signal): return_value = op_fn() execute_ops = return_value['ops'] signals = return_value['signals'] with ops.control_dependencies(execute_ops): return _StopSignals.as_scalar_stopping_signal(signals) # By setting parallel_iterations=1, the parallel execution in while_loop is # basically turned off. with ops.device(device): return control_flow_ops.while_loop( cond, computation, [_StopSignals.NON_STOPPING_SIGNAL], parallel_iterations=1) def _validate_tpu_training_graph(): """Validate graph before running distributed training. Raises: ValueError: If the graph seems invalid for running on device """ operations = ops.get_default_graph().get_operations() # Check if there is atleast one CrossReplicaSum operation in the graph # This should be introduced by using the CrossShardOptimizer wrapper cross_replica_sum_ops = [ o for o in operations if o.type == _CROSS_REPLICA_SUM_OP ] if not cross_replica_sum_ops: raise ValueError( 'CrossShardOptimizer must be used for model training on TPUs.') class _CapturedObject(object): """A placeholder to capture an object. This is useful when we need to capture a Python object in the Tensorflow control flow body function and use it outside the control flow. """ def __init__(self): self._object = None self._captured = False def capture(self, o): if self._captured: raise RuntimeError( 'InternalError: Object can capture only once. Please file bug.') self._captured = True self._object = o def get(self): if not self._captured: raise RuntimeError( 'InternalError: Object is not captured properly before `get`. ' 'Please file bug.') return self._object def _get_scaffold(captured_scaffold_fn): """Retrieves the Scaffold from `captured_scaffold_fn`.""" with _CapturingContext(message='Inside scaffold_fn'): scaffold_fn = captured_scaffold_fn.get() if scaffold_fn: scaffold = scaffold_fn() if scaffold is None: raise ValueError( 'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed') else: scaffold = None if scaffold: wrapped_finalize = scaffold.finalize def _finalize(): with _CapturingContext('Inside Scaffold.finalize'): wrapped_finalize() scaffold.finalize = _finalize return scaffold class _CapturingContext(control_flow_ops.ControlFlowContext): """Tracks references to Tensors defined in TPU replication.""" def __init__(self, message): control_flow_ops.ControlFlowContext.__init__(self) self._message = message def to_control_flow_context_def(self, context_def, export_scope=None): # pylint: disable=useless-super-delegation # NOTE(slebedev): the method is required by `ControlFlowContext`. super(_CapturingContext, self).to_control_flow_context_def( context_def, export_scope) def AddOp(self, op): # pylint: disable=invalid-name for c in op.inputs: if tpu._TPU_REPLICATE_ATTR in c.op.node_def.attr: # pylint: disable=protected-access raise ValueError('{}: Op {} depends on TPU computation {}, ' 'which is not allowed.'.format(self._message, op, c)) def __enter__(self): # pylint: disable=protected-access self._g = ops.get_default_graph() self._old = self._g._get_control_flow_context() self._g._set_control_flow_context(self) # pylint: enable=protected-access def __exit__(self, _, __, ___): # pylint: disable=invalid-name self._g._set_control_flow_context(self._old) # pylint: disable=protected-access class _Inputs(object): """A data structure representing the input_fn returned values. This also supports the returned value from input_fn as `Dataset`. """ def __init__(self, features=None, labels=None, dataset=None, signals=None): if dataset is not None and (features is not None or labels is not None or signals is not None): raise RuntimeError('Internal Error: Either (features and labels) or ' 'dataset should be provided, not both. Please file ' 'bug') self._features = features self._labels = labels self._signals = signals self._dataset = dataset self._iterator = None @staticmethod def from_input_fn(return_values): """Returns an `_Inputs` instance according to `input_fn` return value.""" if isinstance(return_values, dataset_ops.DatasetV2): dataset = return_values return _Inputs(dataset=dataset) features, labels = _Inputs._parse_inputs(return_values) return _Inputs(features, labels) @staticmethod def _parse_inputs(return_values): if isinstance(return_values, tuple): features, labels = return_values else: features, labels = return_values, None return features, labels @property def is_dataset(self): """Returns True if the return value from input_fn is Dataset.""" return self._dataset is not None def dataset_initializer(self): """Returns the dataset's initializer. The initializer must be run before calling `features_and_labels`. """ self._iterator = dataset_ops.make_initializable_iterator(self._dataset) return self._iterator.initializer def features_and_labels(self): """Gets `features` and `labels`.""" if self.is_dataset: if self._iterator is None: raise RuntimeError('Internal error: Must run dataset_initializer ' 'before calling features_and_labels(). Please file ' 'a bug!') return _Inputs._parse_inputs(self._iterator.get_next()) return (self._features, self._labels) def signals(self): return self._signals @property def dataset(self): return self._dataset class _InputsWithStoppingSignals(_Inputs): """Inputs with `_StopSignals` inserted into the dataset.""" def __init__(self, dataset, batch_size, add_padding=False, num_invocations_per_step=1): assert dataset is not None user_provided_dataset = dataset.map( _InputsWithStoppingSignals.insert_stopping_signal( stop=False, batch_size=batch_size, add_padding=add_padding)) if num_invocations_per_step == 1: final_batch_dataset = dataset.take(1).map( _InputsWithStoppingSignals.insert_stopping_signal( stop=True, batch_size=batch_size, add_padding=add_padding)) else: # We append (2 * num_invocations_per_step - 1) batches for exhausting the # user_provided_dataset and stop properly. # For example, if num_invocations_per_step is 2, we append 3 additional # padding batches: b1, b2, b3. # If user_provided_dataset contains two batches: a1, a2 # Step 1: [a1, a2] # Step 2: [b1, b2] -> STOP # If user_provided_dataset contains three batches: a1, a2, a3. # The training loops: # Step 1: [a1, a2] # Step 2: [a3, b1] # Step 3: [b2, b3] -> STOP. final_batch_dataset = dataset.take(1).map( _InputsWithStoppingSignals.insert_stopping_signal( stop=True, batch_size=batch_size, add_padding=add_padding)) final_batch_dataset = final_batch_dataset.repeat( 2 * num_invocations_per_step - 1) def _set_mask(data_dict): signals = data_dict['signals'] signals['padding_mask'] = array_ops.ones_like(signals['padding_mask']) data_dict['signals'] = signals return data_dict # Mask out the extra batch. final_batch_dataset = final_batch_dataset.map(_set_mask) dataset = user_provided_dataset.concatenate(final_batch_dataset).prefetch(2) super(_InputsWithStoppingSignals, self).__init__(dataset=dataset) self._current_inputs = None def features_and_labels(self): if self._current_inputs is not None: raise RuntimeError( 'Internal Error: The previous inputs have not been properly ' 'consumed. First call features_and_labels, then call signals.') inputs_with_signals = self._iterator.get_next() features = inputs_with_signals['features'] labels = inputs_with_signals.get('labels') self._current_inputs = inputs_with_signals return features, labels def signals(self): """Returns the `Signals` from `_Inputs`.""" if self._current_inputs is None: raise RuntimeError( 'Internal Error: The current inputs have not been properly ' 'generated. First call features_and_labels, then call signals.') signals = self._current_inputs['signals'] self._current_inputs = None return signals @staticmethod def insert_stopping_signal(stop, batch_size, add_padding=False): """Inserts stopping_signal into dataset via _map_fn. Here we change the data structure in the dataset, such that the return value is a dictionary now and `features`, `labels`, and `signals` are three distinguished keys in that dict. This provides a better structure, which eases the process to decompose the inputs (see `features_and_labels`). Args: stop: bool, state of current stopping signals. batch_size: int, batch size. add_padding: bool, whether to pad the tensor to full batch size. Returns: A map_fn passed to dataset.map API. """ def _map_fn(*args): """The map fn to insert signals.""" if len(args) == 1: # Unpack the single Tensor/dict argument as features. This is required # for the input_fn returns no labels. args = args[0] features, labels = _Inputs._parse_inputs(args) new_input_dict = {} if add_padding: padding_mask, features, labels = ( _PaddingSignals.pad_features_and_labels(features, labels, batch_size)) new_input_dict['features'] = features if labels is not None: new_input_dict['labels'] = labels else: new_input_dict['features'] = features if labels is not None: new_input_dict['labels'] = labels padding_mask = None new_input_dict['signals'] = _StopSignals( stop=stop, batch_size=batch_size, padding_mask=padding_mask).as_dict() return new_input_dict return _map_fn class _StopSignals(object): """Signals class holding all logic to handle TPU stopping condition.""" NON_STOPPING_SIGNAL = False STOPPING_SIGNAL = True def __init__(self, stop, batch_size, padding_mask=None): self._stop = stop self._batch_size = batch_size self._padding_mask = padding_mask def as_dict(self): """Returns the signals as Python dict.""" shape = [self._batch_size, 1] dtype = dtypes.bool if self._stop: stopping = array_ops.ones(shape=shape, dtype=dtype) else: stopping = array_ops.zeros(shape=shape, dtype=dtype) signals = {'stopping': stopping} if self._padding_mask is not None: signals['padding_mask'] = self._padding_mask return signals @staticmethod def as_scalar_stopping_signal(signals): return array_ops.identity(signals['stopping'][0][0]) @staticmethod def should_stop(scalar_stopping_signal): """Detects whether scalar_stopping_signal indicates stopping.""" if isinstance(scalar_stopping_signal, ops.Tensor): # STOPPING_SIGNAL is a constant True. Here, the logical_and is just the TF # way to express the bool check whether scalar_stopping_signal is True. return math_ops.logical_and(scalar_stopping_signal, _StopSignals.STOPPING_SIGNAL) else: # For non Tensor case, it is used in SessionRunHook. So, we cannot modify # the graph anymore. Here, we use pure Python. return bool(scalar_stopping_signal) class _PaddingSignals(object): """Signals class holding all logic to handle padding.""" @staticmethod def pad_features_and_labels(features, labels, batch_size): """Pads out the batch dimension of features and labels.""" real_batch_size = array_ops.shape( _PaddingSignals._find_any_tensor(features))[0] batch_size_tensor = constant_op.constant(batch_size, dtypes.int32) check_greater = check_ops.assert_greater_equal( batch_size_tensor, real_batch_size, data=(batch_size_tensor, real_batch_size), message='The real batch size should not be greater than batch_size.') with ops.control_dependencies([check_greater]): missing_count = batch_size_tensor - real_batch_size def pad_single_tensor(tensor): """Pads out the batch dimension of a tensor to the complete batch_size.""" rank = len(tensor.shape) assert rank > 0 padding = array_ops.stack([[0, missing_count]] + [[0, 0]] * (rank - 1)) padded_shape = (batch_size,) + tuple(tensor.shape[1:]) padded_tensor = array_ops.pad(tensor, padding) padded_tensor.set_shape(padded_shape) return padded_tensor def nest_pad(tensor_or_dict): return nest.map_structure(pad_single_tensor, tensor_or_dict) features = nest_pad(features) if labels is not None: labels = nest_pad(labels) padding_mask = _PaddingSignals._padding_mask(real_batch_size, missing_count, batch_size) return padding_mask, features, labels @staticmethod def slice_tensor_or_dict(tensor_or_dict, signals): """Slice the real Tensors according to padding mask in signals.""" padding_mask = signals['padding_mask'] batch_size = array_ops.shape(padding_mask)[0] def verify_batch_size(tensor): check_batch_size = math_ops.equal(batch_size, tensor.shape[0]) with ops.control_dependencies([check_batch_size]): return array_ops.identity(tensor) def slice_single_tensor(tensor): rank = len(tensor.shape) assert rank > 0 real_batch_size = batch_size - math_ops.reduce_sum(padding_mask) return verify_batch_size(tensor)[0:real_batch_size] # As we split the Tensors to all TPU cores and concat them back, it is # important to ensure the real data is placed before padded ones, i.e., # order is preserved. By that, the sliced padding mask should have all 0's. # If this assertion failed, # the slice logic here would not hold. sliced_padding_mask = slice_single_tensor(padding_mask) assert_padding_mask = math_ops.equal( math_ops.reduce_sum(sliced_padding_mask), 0) with ops.control_dependencies([assert_padding_mask]): should_stop = _StopSignals.should_stop( _StopSignals.as_scalar_stopping_signal(signals)) is_full_batch = math_ops.equal(math_ops.reduce_sum(padding_mask), 0) def slice_fn(tensor): # If the current batch is full batch or part of stopping signals, we do # not need to slice to save performance. return control_flow_ops.cond( math_ops.logical_or(should_stop, is_full_batch), (lambda: verify_batch_size(tensor)), (lambda: slice_single_tensor(tensor))) return nest.map_structure(slice_fn, tensor_or_dict) @staticmethod def _find_any_tensor(batch_features): tensors = [ x for x in nest.flatten(batch_features) if isinstance(x, ops.Tensor) ] if not tensors: raise ValueError('Cannot find any Tensor in features dict.') return tensors[0] @staticmethod def _padding_mask(real_batch_size, missing_count, batch_size): padding_mask = array_ops.concat([ array_ops.zeros((real_batch_size,), dtype=dtypes.int32), array_ops.ones((missing_count,), dtype=dtypes.int32) ], axis=0) padding_mask.set_shape((batch_size,)) return padding_mask def _verify_cross_hosts_transfer_size(tensor_dict, message): total_size = 0 tensor_structure = {} for key, tensor in tensor_dict.items(): shape = tensor.shape size = np.product(shape) * tensor.dtype.size tensor_structure[key] = shape total_size += size if total_size >= _ONE_GIGABYTE: raise ValueError( '{} The transfer size is larger than the protobuf limit. Please ' 'consider to use Tensors with smaller shapes or reduce batch ' 'size. Given:\n' '{}'.format( message, '\n'.join([ ' -- Key: {}, Shape: {}'.format(k, v) for k, v in tensor_structure.items() ]))) def _add_item_to_params(params, key, value): """Adds a new item into `params`.""" if isinstance(params, hparam.HParams): # For HParams, we need to use special API. if key in params: params.set_hparam(key, value) else: params.add_hparam(key, value) else: # Now params is Python dict. params[key] = value def export_estimator_savedmodel(estimator, export_dir_base, serving_input_receiver_fn, assets_extra=None, as_text=False, checkpoint_path=None, strip_default_attrs=False): """Export `Estimator` trained model for TPU inference. Args: estimator: `Estimator` with which model has been trained. export_dir_base: A string containing a directory in which to create timestamped subdirectories containing exported SavedModels. serving_input_receiver_fn: A function that takes no argument and returns a `ServingInputReceiver` or `TensorServingInputReceiver`. assets_extra: A dict specifying how to populate the assets.extra directory within the exported SavedModel, or `None` if no extra assets are needed. as_text: whether to write the SavedModel proto in text format. checkpoint_path: The checkpoint path to export. If `None` (the default), the most recent checkpoint found within the model directory is chosen. strip_default_attrs: Boolean. If `True`, default-valued attributes will be removed from the NodeDefs. Returns: The string path to the exported directory. """ # `TPUEstimator` requires `tpu_config.RunConfig`, so we cannot use # `estimator.config`. config = tpu_config.RunConfig(model_dir=estimator.model_dir) est = TPUEstimator( estimator._model_fn, # pylint: disable=protected-access config=config, params=estimator.params, use_tpu=True, train_batch_size=2048, # Does not matter. eval_batch_size=2048, # Does not matter. ) return est.export_savedmodel(export_dir_base, serving_input_receiver_fn, assets_extra, as_text, checkpoint_path, strip_default_attrs)
ymcui/Chinese-XLNet
1,650
Pre-Trained Chinese XLNet(中文XLNet预训练模型)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
src/xlnet.py
Python
from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import os import tensorflow as tf import modeling def _get_initializer(FLAGS): """Get variable intializer.""" if FLAGS.init == "uniform": initializer = tf.initializers.random_uniform( minval=-FLAGS.init_range, maxval=FLAGS.init_range, seed=None) elif FLAGS.init == "normal": initializer = tf.initializers.random_normal( stddev=FLAGS.init_std, seed=None) else: raise ValueError("Initializer {} not supported".format(FLAGS.init)) return initializer class XLNetConfig(object): """XLNetConfig contains hyperparameters that are specific to a model checkpoint; i.e., these hyperparameters should be the same between pretraining and finetuning. The following hyperparameters are defined: n_layer: int, the number of layers. d_model: int, the hidden size. n_head: int, the number of attention heads. d_head: int, the dimension size of each attention head. d_inner: int, the hidden size in feed-forward layers. ff_activation: str, "relu" or "gelu". untie_r: bool, whether to untie the biases in attention. n_token: int, the vocab size. """ def __init__(self, FLAGS=None, json_path=None): """Constructing an XLNetConfig. One of FLAGS or json_path should be provided.""" assert FLAGS is not None or json_path is not None self.keys = ["n_layer", "d_model", "n_head", "d_head", "d_inner", "ff_activation", "untie_r", "n_token"] if FLAGS is not None: self.init_from_flags(FLAGS) if json_path is not None: self.init_from_json(json_path) def init_from_flags(self, FLAGS): for key in self.keys: setattr(self, key, getattr(FLAGS, key)) def init_from_json(self, json_path): with tf.gfile.Open(json_path) as f: json_data = json.load(f) for key in self.keys: setattr(self, key, json_data[key]) def to_json(self, json_path): """Save XLNetConfig to a json file.""" json_data = {} for key in self.keys: json_data[key] = getattr(self, key) json_dir = os.path.dirname(json_path) if not tf.gfile.Exists(json_dir): tf.gfile.MakeDirs(json_dir) with tf.gfile.Open(json_path, "w") as f: json.dump(json_data, f, indent=4, sort_keys=True) def create_run_config(is_training, is_finetune, FLAGS): kwargs = dict( is_training=is_training, use_tpu=FLAGS.use_tpu, use_bfloat16=FLAGS.use_bfloat16, dropout=FLAGS.dropout, dropatt=FLAGS.dropatt, init=FLAGS.init, init_range=FLAGS.init_range, init_std=FLAGS.init_std, clamp_len=FLAGS.clamp_len) if not is_finetune: kwargs.update(dict( mem_len=FLAGS.mem_len, reuse_len=FLAGS.reuse_len, bi_data=FLAGS.bi_data, clamp_len=FLAGS.clamp_len, same_length=FLAGS.same_length)) return RunConfig(**kwargs) class RunConfig(object): """RunConfig contains hyperparameters that could be different between pretraining and finetuning. These hyperparameters can also be changed from run to run. We store them separately from XLNetConfig for flexibility. """ def __init__(self, is_training, use_tpu, use_bfloat16, dropout, dropatt, init="normal", init_range=0.1, init_std=0.02, mem_len=None, reuse_len=None, bi_data=False, clamp_len=-1, same_length=False): """ Args: is_training: bool, whether in training mode. use_tpu: bool, whether TPUs are used. use_bfloat16: bool, use bfloat16 instead of float32. dropout: float, dropout rate. dropatt: float, dropout rate on attention probabilities. init: str, the initialization scheme, either "normal" or "uniform". init_range: float, initialize the parameters with a uniform distribution in [-init_range, init_range]. Only effective when init="uniform". init_std: float, initialize the parameters with a normal distribution with mean 0 and stddev init_std. Only effective when init="normal". mem_len: int, the number of tokens to cache. reuse_len: int, the number of tokens in the currect batch to be cached and reused in the future. bi_data: bool, whether to use bidirectional input pipeline. Usually set to True during pretraining and False during finetuning. clamp_len: int, clamp all relative distances larger than clamp_len. -1 means no clamping. same_length: bool, whether to use the same attention length for each token. """ self.init = init self.init_range = init_range self.init_std = init_std self.is_training = is_training self.dropout = dropout self.dropatt = dropatt self.use_tpu = use_tpu self.use_bfloat16 = use_bfloat16 self.mem_len = mem_len self.reuse_len = reuse_len self.bi_data = bi_data self.clamp_len = clamp_len self.same_length = same_length class XLNetModel(object): """A wrapper of the XLNet model used during both pretraining and finetuning.""" def __init__(self, xlnet_config, run_config, input_ids, seg_ids, input_mask, mems=None, perm_mask=None, target_mapping=None, inp_q=None, **kwargs): """ Args: xlnet_config: XLNetConfig, run_config: RunConfig, input_ids: int32 Tensor in shape [len, bsz], the input token IDs. seg_ids: int32 Tensor in shape [len, bsz], the input segment IDs. input_mask: float32 Tensor in shape [len, bsz], the input mask. 0 for real tokens and 1 for padding. mems: a list of float32 Tensors in shape [mem_len, bsz, d_model], memory from previous batches. The length of the list equals n_layer. If None, no memory is used. perm_mask: float32 Tensor in shape [len, len, bsz]. If perm_mask[i, j, k] = 0, i attend to j in batch k; if perm_mask[i, j, k] = 1, i does not attend to j in batch k. If None, each position attends to all the others. target_mapping: float32 Tensor in shape [num_predict, len, bsz]. If target_mapping[i, j, k] = 1, the i-th predict in batch k is on the j-th token. Only used during pretraining for partial prediction. Set to None during finetuning. inp_q: float32 Tensor in shape [len, bsz]. 1 for tokens with losses and 0 for tokens without losses. Only used during pretraining for two-stream attention. Set to None during finetuning. """ initializer = _get_initializer(run_config) tfm_args = dict( n_token=xlnet_config.n_token, initializer=initializer, attn_type="bi", n_layer=xlnet_config.n_layer, d_model=xlnet_config.d_model, n_head=xlnet_config.n_head, d_head=xlnet_config.d_head, d_inner=xlnet_config.d_inner, ff_activation=xlnet_config.ff_activation, untie_r=xlnet_config.untie_r, is_training=run_config.is_training, use_bfloat16=run_config.use_bfloat16, use_tpu=run_config.use_tpu, dropout=run_config.dropout, dropatt=run_config.dropatt, mem_len=run_config.mem_len, reuse_len=run_config.reuse_len, bi_data=run_config.bi_data, clamp_len=run_config.clamp_len, same_length=run_config.same_length ) input_args = dict( inp_k=input_ids, seg_id=seg_ids, input_mask=input_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, inp_q=inp_q) tfm_args.update(input_args) with tf.variable_scope("model", reuse=tf.AUTO_REUSE): (self.output, self.new_mems, self.lookup_table ) = modeling.transformer_xl(**tfm_args) self.input_mask = input_mask self.initializer = initializer self.xlnet_config = xlnet_config self.run_config = run_config def get_pooled_out(self, summary_type, use_summ_proj=True): """ Args: summary_type: str, "last", "first", "mean", or "attn". The method to pool the input to get a vector representation. use_summ_proj: bool, whether to use a linear projection during pooling. Returns: float32 Tensor in shape [bsz, d_model], the pooled representation. """ xlnet_config = self.xlnet_config run_config = self.run_config with tf.variable_scope("model", reuse=tf.AUTO_REUSE): summary = modeling.summarize_sequence( summary_type=summary_type, hidden=self.output, d_model=xlnet_config.d_model, n_head=xlnet_config.n_head, d_head=xlnet_config.d_head, dropout=run_config.dropout, dropatt=run_config.dropatt, is_training=run_config.is_training, input_mask=self.input_mask, initializer=self.initializer, use_proj=use_summ_proj) return summary def get_sequence_output(self): """ Returns: float32 Tensor in shape [len, bsz, d_model]. The last layer hidden representation of XLNet. """ return self.output def get_new_memory(self): """ Returns: list of float32 Tensors in shape [mem_len, bsz, d_model], the new memory that concatenates the previous memory with the current input representations. The length of the list equals n_layer. """ return self.new_mems def get_embedding_table(self): """ Returns: float32 Tensor in shape [n_token, d_model]. The embedding lookup table. Used for tying embeddings between input and output layers. """ return self.lookup_table def get_initializer(self): """ Returns: A tf initializer. Used to initialize variables in layers on top of XLNet. """ return self.initializer
ymcui/Chinese-XLNet
1,650
Pre-Trained Chinese XLNet(中文XLNet预训练模型)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
src/modeling.py
Python
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The main BERT model and related functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import json import math import re import six import tensorflow as tf class BertConfig(object): """Configuration for `BertModel`.""" def __init__(self, vocab_size, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, initializer_range=0.02): """Constructs BertConfig. Args: vocab_size: Vocabulary size of `inputs_ids` in `BertModel`. hidden_size: Size of the encoder layers and the pooler layer. num_hidden_layers: Number of hidden layers in the Transformer encoder. num_attention_heads: Number of attention heads for each attention layer in the Transformer encoder. intermediate_size: The size of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act: The non-linear activation function (function or string) in the encoder and pooler. hidden_dropout_prob: The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob: The dropout ratio for the attention probabilities. max_position_embeddings: The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size: The vocabulary size of the `token_type_ids` passed into `BertModel`. initializer_range: The stdev of the truncated_normal_initializer for initializing all weight matrices. """ self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range @classmethod def from_dict(cls, json_object): """Constructs a `BertConfig` from a Python dictionary of parameters.""" config = BertConfig(vocab_size=None) for (key, value) in six.iteritems(json_object): config.__dict__[key] = value return config @classmethod def from_json_file(cls, json_file): """Constructs a `BertConfig` from a json file of parameters.""" with tf.gfile.GFile(json_file, "r") as reader: text = reader.read() return cls.from_dict(json.loads(text)) def to_dict(self): """Serializes this instance to a Python dictionary.""" output = copy.deepcopy(self.__dict__) return output def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" class BertModel(object): """BERT model ("Bidirectional Embedding Representations from a Transformer"). Example usage: ```python # Already been converted into WordPiece token ids input_ids = tf.constant([[31, 51, 99], [15, 5, 0]]) input_mask = tf.constant([[1, 1, 1], [1, 1, 0]]) token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]]) config = modeling.BertConfig(vocab_size=32000, hidden_size=512, num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024) model = modeling.BertModel(config=config, is_training=True, input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids) label_embeddings = tf.get_variable(...) pooled_output = model.get_pooled_output() logits = tf.matmul(pooled_output, label_embeddings) ... ``` """ def __init__(self, config, is_training, input_ids, input_mask=None, token_type_ids=None, use_one_hot_embeddings=True, scope=None, embedding_trainable=True): """Constructor for BertModel. Args: config: `BertConfig` instance. is_training: bool. rue for training model, false for eval model. Controls whether dropout will be applied. input_ids: int32 Tensor of shape [batch_size, seq_length]. input_mask: (optional) int32 Tensor of shape [batch_size, seq_length]. token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length]. use_one_hot_embeddings: (optional) bool. Whether to use one-hot word embeddings or tf.embedding_lookup() for the word embeddings. On the TPU, it is must faster if this is True, on the CPU or GPU, it is faster if this is False. scope: (optional) variable scope. Defaults to "bert". # embedding_trainable: indicate if the embedding matrix is trainable (default: True) Raises: ValueError: The config is invalid or one of the input tensor shapes is invalid. """ config = copy.deepcopy(config) if not is_training: config.hidden_dropout_prob = 0.0 config.attention_probs_dropout_prob = 0.0 input_shape = get_shape_list(input_ids, expected_rank=2) batch_size = input_shape[0] seq_length = input_shape[1] if input_mask is None: input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32) if token_type_ids is None: token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32) with tf.variable_scope(scope, default_name="bert"): with tf.variable_scope("embeddings"): # Perform embedding lookup on the word ids. (self.embedding_output, self.embedding_table) = embedding_lookup( input_ids=input_ids, vocab_size=config.vocab_size, embedding_size=config.hidden_size, initializer_range=config.initializer_range, word_embedding_name="word_embeddings", use_one_hot_embeddings=use_one_hot_embeddings, trainable=embedding_trainable) # Add positional embeddings and token type embeddings, then layer # normalize and perform dropout. self.embedding_output = embedding_postprocessor( input_tensor=self.embedding_output, use_token_type=True, token_type_ids=token_type_ids, token_type_vocab_size=config.type_vocab_size, token_type_embedding_name="token_type_embeddings", use_position_embeddings=True, position_embedding_name="position_embeddings", initializer_range=config.initializer_range, max_position_embeddings=config.max_position_embeddings, dropout_prob=config.hidden_dropout_prob) with tf.variable_scope("encoder"): # This converts a 2D mask of shape [batch_size, seq_length] to a 3D # mask of shape [batch_size, seq_length, seq_length] which is used # for the attention scores. self.attention_mask = create_attention_mask_from_input_mask( input_ids, input_mask) # Run the stacked transformer. # `sequence_output` shape = [batch_size, seq_length, hidden_size]. self.all_encoder_layers = transformer_model( input_tensor=self.embedding_output, attention_mask=self.attention_mask, hidden_size=config.hidden_size, num_hidden_layers=config.num_hidden_layers, num_attention_heads=config.num_attention_heads, intermediate_size=config.intermediate_size, intermediate_act_fn=get_activation(config.hidden_act), hidden_dropout_prob=config.hidden_dropout_prob, attention_probs_dropout_prob=config.attention_probs_dropout_prob, initializer_range=config.initializer_range, do_return_all_layers=True) self.sequence_output = self.all_encoder_layers[-1] # The "pooler" converts the encoded sequence tensor of shape # [batch_size, seq_length, hidden_size] to a tensor of shape # [batch_size, hidden_size]. This is necessary for segment-level # (or segment-pair-level) classification tasks where we need a fixed # dimensional representation of the segment. with tf.variable_scope("pooler"): # We "pool" the model by simply taking the hidden state corresponding # to the first token. We assume that this has been pre-trained first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1) self.pooled_output = tf.layers.dense( first_token_tensor, config.hidden_size, activation=tf.tanh, kernel_initializer=create_initializer(config.initializer_range)) def get_attention_mask(self): return self.attention_mask def get_pooled_output(self): return self.pooled_output def get_sequence_output(self): """Gets final hidden layer of encoder. Returns: float Tensor of shape [batch_size, seq_length, hidden_size] corresponding to the final hidden of the transformer encoder. """ return self.sequence_output def get_all_encoder_layers(self): return self.all_encoder_layers def get_embedding_output(self): """Gets output of the embedding lookup (i.e., input to the transformer). Returns: float Tensor of shape [batch_size, seq_length, hidden_size] corresponding to the output of the embedding layer, after summing the word embeddings with the positional embeddings and the token type embeddings, then performing layer normalization. This is the input to the transformer. """ return self.embedding_output def get_embedding_table(self): return self.embedding_table def gelu(input_tensor): """Gaussian Error Linear Unit. This is a smoother version of the RELU. Original paper: https://arxiv.org/abs/1606.08415 Args: input_tensor: float Tensor to perform activation. Returns: `input_tensor` with the GELU activation applied. """ cdf = 0.5 * (1.0 + tf.erf(input_tensor / tf.sqrt(2.0))) return input_tensor * cdf def get_activation(activation_string): """Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`. Args: activation_string: String name of the activation function. Returns: A Python function corresponding to the activation function. If `activation_string` is None, empty, or "linear", this will return None. If `activation_string` is not a string, it will return `activation_string`. Raises: ValueError: The `activation_string` does not correspond to a known activation. """ # We assume that anything that"s not a string is already an activation # function, so we just return it. if not isinstance(activation_string, six.string_types): return activation_string if not activation_string: return None act = activation_string.lower() if act == "linear": return None elif act == "relu": return tf.nn.relu elif act == "gelu": return gelu elif act == "tanh": return tf.tanh else: raise ValueError("Unsupported activation: %s" % act) def get_assignment_map_from_checkpoint(tvars, init_checkpoint): """Compute the union of the current variables and checkpoint variables.""" assignment_map = {} initialized_variable_names = {} name_to_variable = collections.OrderedDict() for var in tvars: name = var.name m = re.match("^(.*):\\d+$", name) if m is not None: name = m.group(1) name_to_variable[name] = var init_vars = tf.train.list_variables(init_checkpoint) assignment_map = collections.OrderedDict() for x in init_vars: (name, var) = (x[0], x[1]) if name not in name_to_variable: continue assignment_map[name] = name initialized_variable_names[name] = 1 initialized_variable_names[name + ":0"] = 1 return (assignment_map, initialized_variable_names) def dropout(input_tensor, dropout_prob): """Perform dropout. Args: input_tensor: float Tensor. dropout_prob: Python float. The probability of dropping out a value (NOT of *keeping* a dimension as in `tf.nn.dropout`). Returns: A version of `input_tensor` with dropout applied. """ if dropout_prob is None or dropout_prob == 0.0: return input_tensor output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob) return output def layer_norm(input_tensor, name=None): """Run layer normalization on the last dimension of the tensor.""" return tf.contrib.layers.layer_norm( inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name) def layer_norm_and_dropout(input_tensor, dropout_prob, name=None): """Runs layer normalization followed by dropout.""" output_tensor = layer_norm(input_tensor, name) output_tensor = dropout(output_tensor, dropout_prob) return output_tensor def create_initializer(initializer_range=0.02): """Creates a `truncated_normal_initializer` with the given range.""" return tf.truncated_normal_initializer(stddev=initializer_range) def embedding_lookup(input_ids, vocab_size, embedding_size=128, initializer_range=0.02, word_embedding_name="word_embeddings", use_one_hot_embeddings=False, trainable=True): """Looks up words embeddings for id tensor. Args: input_ids: int32 Tensor of shape [batch_size, seq_length] containing word ids. vocab_size: int. Size of the embedding vocabulary. embedding_size: int. Width of the word embeddings. initializer_range: float. Embedding initialization range. word_embedding_name: string. Name of the embedding table. use_one_hot_embeddings: bool. If True, use one-hot method for word embeddings. If False, use `tf.nn.embedding_lookup()`. One hot is better for TPUs. Returns: float Tensor of shape [batch_size, seq_length, embedding_size]. """ # This function assumes that the input is of shape [batch_size, seq_length, # num_inputs]. # # If the input is a 2D tensor of shape [batch_size, seq_length], we # reshape to [batch_size, seq_length, 1]. if input_ids.shape.ndims == 2: input_ids = tf.expand_dims(input_ids, axis=[-1]) embedding_table = tf.get_variable( name=word_embedding_name, shape=[vocab_size, embedding_size], initializer=create_initializer(initializer_range), trainable=trainable) if use_one_hot_embeddings: flat_input_ids = tf.reshape(input_ids, [-1]) one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size) output = tf.matmul(one_hot_input_ids, embedding_table) else: output = tf.nn.embedding_lookup(embedding_table, input_ids) input_shape = get_shape_list(input_ids) output = tf.reshape(output, input_shape[0:-1] + [input_shape[-1] * embedding_size]) return (output, embedding_table) def embedding_postprocessor(input_tensor, use_token_type=False, token_type_ids=None, token_type_vocab_size=16, token_type_embedding_name="token_type_embeddings", use_position_embeddings=True, position_embedding_name="position_embeddings", initializer_range=0.02, max_position_embeddings=512, dropout_prob=0.1): """Performs various post-processing on a word embedding tensor. Args: input_tensor: float Tensor of shape [batch_size, seq_length, embedding_size]. use_token_type: bool. Whether to add embeddings for `token_type_ids`. token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length]. Must be specified if `use_token_type` is True. token_type_vocab_size: int. The vocabulary size of `token_type_ids`. token_type_embedding_name: string. The name of the embedding table variable for token type ids. use_position_embeddings: bool. Whether to add position embeddings for the position of each token in the sequence. position_embedding_name: string. The name of the embedding table variable for positional embeddings. initializer_range: float. Range of the weight initialization. max_position_embeddings: int. Maximum sequence length that might ever be used with this model. This can be longer than the sequence length of input_tensor, but cannot be shorter. dropout_prob: float. Dropout probability applied to the final output tensor. Returns: float tensor with same shape as `input_tensor`. Raises: ValueError: One of the tensor shapes or input values is invalid. """ input_shape = get_shape_list(input_tensor, expected_rank=3) batch_size = input_shape[0] seq_length = input_shape[1] width = input_shape[2] output = input_tensor if use_token_type: if token_type_ids is None: raise ValueError("`token_type_ids` must be specified if" "`use_token_type` is True.") token_type_table = tf.get_variable( name=token_type_embedding_name, shape=[token_type_vocab_size, width], initializer=create_initializer(initializer_range)) # This vocab will be small so we always do one-hot here, since it is always # faster for a small vocabulary. flat_token_type_ids = tf.reshape(token_type_ids, [-1]) one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size) token_type_embeddings = tf.matmul(one_hot_ids, token_type_table) token_type_embeddings = tf.reshape(token_type_embeddings, [batch_size, seq_length, width]) output += token_type_embeddings if use_position_embeddings: assert_op = tf.assert_less_equal(seq_length, max_position_embeddings) with tf.control_dependencies([assert_op]): full_position_embeddings = tf.get_variable( name=position_embedding_name, shape=[max_position_embeddings, width], initializer=create_initializer(initializer_range)) # Since the position embedding table is a learned variable, we create it # using a (long) sequence length `max_position_embeddings`. The actual # sequence length might be shorter than this, for faster training of # tasks that do not have long sequences. # # So `full_position_embeddings` is effectively an embedding table # for position [0, 1, 2, ..., max_position_embeddings-1], and the current # sequence has positions [0, 1, 2, ... seq_length-1], so we can just # perform a slice. position_embeddings = tf.slice(full_position_embeddings, [0, 0], [seq_length, -1]) num_dims = len(output.shape.as_list()) # Only the last two dimensions are relevant (`seq_length` and `width`), so # we broadcast among the first dimensions, which is typically just # the batch size. position_broadcast_shape = [] for _ in range(num_dims - 2): position_broadcast_shape.append(1) position_broadcast_shape.extend([seq_length, width]) position_embeddings = tf.reshape(position_embeddings, position_broadcast_shape) output += position_embeddings output = layer_norm_and_dropout(output, dropout_prob) return output def create_attention_mask_from_input_mask(from_tensor, to_mask): """Create 3D attention mask from a 2D tensor mask. Args: from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...]. to_mask: int32 Tensor of shape [batch_size, to_seq_length]. Returns: float Tensor of shape [batch_size, from_seq_length, to_seq_length]. """ from_shape = get_shape_list(from_tensor, expected_rank=[2, 3]) batch_size = from_shape[0] from_seq_length = from_shape[1] to_shape = get_shape_list(to_mask, expected_rank=2) to_seq_length = to_shape[1] to_mask = tf.cast( tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32) # We don't assume that `from_tensor` is a mask (although it could be). We # don't actually care if we attend *from* padding tokens (only *to* padding) # tokens so we create a tensor of all ones. # # `broadcast_ones` = [batch_size, from_seq_length, 1] broadcast_ones = tf.ones( shape=[batch_size, from_seq_length, 1], dtype=tf.float32) # Here we broadcast along two dimensions to create the mask. mask = broadcast_ones * to_mask return mask def attention_layer(from_tensor, to_tensor, attention_mask=None, num_attention_heads=1, size_per_head=512, query_act=None, key_act=None, value_act=None, attention_probs_dropout_prob=0.0, initializer_range=0.02, do_return_2d_tensor=False, batch_size=None, from_seq_length=None, to_seq_length=None): """Performs multi-headed attention from `from_tensor` to `to_tensor`. This is an implementation of multi-headed attention based on "Attention is all you Need". If `from_tensor` and `to_tensor` are the same, then this is self-attention. Each timestep in `from_tensor` attends to the corresponding sequence in `to_tensor`, and returns a fixed-with vector. This function first projects `from_tensor` into a "query" tensor and `to_tensor` into "key" and "value" tensors. These are (effectively) a list of tensors of length `num_attention_heads`, where each tensor is of shape [batch_size, seq_length, size_per_head]. Then, the query and key tensors are dot-producted and scaled. These are softmaxed to obtain attention probabilities. The value tensors are then interpolated by these probabilities, then concatenated back to a single tensor and returned. In practice, the multi-headed attention are done with transposes and reshapes rather than actual separate tensors. Args: from_tensor: float Tensor of shape [batch_size, from_seq_length, from_width]. to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width]. attention_mask: (optional) int32 Tensor of shape [batch_size, from_seq_length, to_seq_length]. The values should be 1 or 0. The attention scores will effectively be set to -infinity for any positions in the mask that are 0, and will be unchanged for positions that are 1. num_attention_heads: int. Number of attention heads. size_per_head: int. Size of each attention head. query_act: (optional) Activation function for the query transform. key_act: (optional) Activation function for the key transform. value_act: (optional) Activation function for the value transform. attention_probs_dropout_prob: (optional) float. Dropout probability of the attention probabilities. initializer_range: float. Range of the weight initializer. do_return_2d_tensor: bool. If True, the output will be of shape [batch_size * from_seq_length, num_attention_heads * size_per_head]. If False, the output will be of shape [batch_size, from_seq_length, num_attention_heads * size_per_head]. batch_size: (Optional) int. If the input is 2D, this might be the batch size of the 3D version of the `from_tensor` and `to_tensor`. from_seq_length: (Optional) If the input is 2D, this might be the seq length of the 3D version of the `from_tensor`. to_seq_length: (Optional) If the input is 2D, this might be the seq length of the 3D version of the `to_tensor`. Returns: float Tensor of shape [batch_size, from_seq_length, num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is true, this will be of shape [batch_size * from_seq_length, num_attention_heads * size_per_head]). Raises: ValueError: Any of the arguments or tensor shapes are invalid. """ def transpose_for_scores(input_tensor, batch_size, num_attention_heads, seq_length, width): output_tensor = tf.reshape( input_tensor, [batch_size, seq_length, num_attention_heads, width]) output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3]) return output_tensor from_shape = get_shape_list(from_tensor, expected_rank=[2, 3]) to_shape = get_shape_list(to_tensor, expected_rank=[2, 3]) if len(from_shape) != len(to_shape): raise ValueError( "The rank of `from_tensor` must match the rank of `to_tensor`.") if len(from_shape) == 3: batch_size = from_shape[0] from_seq_length = from_shape[1] to_seq_length = to_shape[1] elif len(from_shape) == 2: if (batch_size is None or from_seq_length is None or to_seq_length is None): raise ValueError( "When passing in rank 2 tensors to attention_layer, the values " "for `batch_size`, `from_seq_length`, and `to_seq_length` " "must all be specified.") # Scalar dimensions referenced here: # B = batch size (number of sequences) # F = `from_tensor` sequence length # T = `to_tensor` sequence length # N = `num_attention_heads` # H = `size_per_head` from_tensor_2d = reshape_to_matrix(from_tensor) to_tensor_2d = reshape_to_matrix(to_tensor) # `query_layer` = [B*F, N*H] query_layer = tf.layers.dense( from_tensor_2d, num_attention_heads * size_per_head, activation=query_act, name="query", kernel_initializer=create_initializer(initializer_range)) # `key_layer` = [B*T, N*H] key_layer = tf.layers.dense( to_tensor_2d, num_attention_heads * size_per_head, activation=key_act, name="key", kernel_initializer=create_initializer(initializer_range)) # `value_layer` = [B*T, N*H] value_layer = tf.layers.dense( to_tensor_2d, num_attention_heads * size_per_head, activation=value_act, name="value", kernel_initializer=create_initializer(initializer_range)) # `query_layer` = [B, N, F, H] query_layer = transpose_for_scores(query_layer, batch_size, num_attention_heads, from_seq_length, size_per_head) # `key_layer` = [B, N, T, H] key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads, to_seq_length, size_per_head) # Take the dot product between "query" and "key" to get the raw # attention scores. # `attention_scores` = [B, N, F, T] attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) attention_scores = tf.multiply(attention_scores, 1.0 / math.sqrt(float(size_per_head))) if attention_mask is not None: # `attention_mask` = [B, 1, F, T] attention_mask = tf.expand_dims(attention_mask, axis=[1]) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0 # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. attention_scores += adder # Normalize the attention scores to probabilities. # `attention_probs` = [B, N, F, T] attention_probs = tf.nn.softmax(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = dropout(attention_probs, attention_probs_dropout_prob) # `value_layer` = [B, T, N, H] value_layer = tf.reshape( value_layer, [batch_size, to_seq_length, num_attention_heads, size_per_head]) # `value_layer` = [B, N, T, H] value_layer = tf.transpose(value_layer, [0, 2, 1, 3]) # `context_layer` = [B, N, F, H] context_layer = tf.matmul(attention_probs, value_layer) # `context_layer` = [B, F, N, H] context_layer = tf.transpose(context_layer, [0, 2, 1, 3]) if do_return_2d_tensor: # `context_layer` = [B*F, N*V] context_layer = tf.reshape( context_layer, [batch_size * from_seq_length, num_attention_heads * size_per_head]) else: # `context_layer` = [B, F, N*V] context_layer = tf.reshape( context_layer, [batch_size, from_seq_length, num_attention_heads * size_per_head]) return context_layer def transformer_model(input_tensor, attention_mask=None, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, intermediate_act_fn=gelu, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, do_return_all_layers=False): """Multi-headed, multi-layer Transformer from "Attention is All You Need". This is almost an exact implementation of the original Transformer encoder. See the original paper: https://arxiv.org/abs/1706.03762 Also see: https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py Args: input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size]. attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length, seq_length], with 1 for positions that can be attended to and 0 in positions that should not be. hidden_size: int. Hidden size of the Transformer. num_hidden_layers: int. Number of layers (blocks) in the Transformer. num_attention_heads: int. Number of attention heads in the Transformer. intermediate_size: int. The size of the "intermediate" (a.k.a., feed forward) layer. intermediate_act_fn: function. The non-linear activation function to apply to the output of the intermediate/feed-forward layer. hidden_dropout_prob: float. Dropout probability for the hidden layers. attention_probs_dropout_prob: float. Dropout probability of the attention probabilities. initializer_range: float. Range of the initializer (stddev of truncated normal). do_return_all_layers: Whether to also return all layers or just the final layer. Returns: float Tensor of shape [batch_size, seq_length, hidden_size], the final hidden layer of the Transformer. Raises: ValueError: A Tensor shape or parameter is invalid. """ if hidden_size % num_attention_heads != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (hidden_size, num_attention_heads)) attention_head_size = int(hidden_size / num_attention_heads) input_shape = get_shape_list(input_tensor, expected_rank=3) batch_size = input_shape[0] seq_length = input_shape[1] input_width = input_shape[2] # The Transformer performs sum residuals on all layers so the input needs # to be the same as the hidden size. if input_width != hidden_size: raise ValueError("The width of the input tensor (%d) != hidden size (%d)" % (input_width, hidden_size)) # We keep the representation as a 2D tensor to avoid re-shaping it back and # forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on # the GPU/CPU but may not be free on the TPU, so we want to minimize them to # help the optimizer. prev_output = reshape_to_matrix(input_tensor) all_layer_outputs = [] for layer_idx in range(num_hidden_layers): with tf.variable_scope("layer_%d" % layer_idx): layer_input = prev_output with tf.variable_scope("attention"): attention_heads = [] with tf.variable_scope("self"): attention_head = attention_layer( from_tensor=layer_input, to_tensor=layer_input, attention_mask=attention_mask, num_attention_heads=num_attention_heads, size_per_head=attention_head_size, attention_probs_dropout_prob=attention_probs_dropout_prob, initializer_range=initializer_range, do_return_2d_tensor=True, batch_size=batch_size, from_seq_length=seq_length, to_seq_length=seq_length) attention_heads.append(attention_head) attention_output = None if len(attention_heads) == 1: attention_output = attention_heads[0] else: # In the case where we have other sequences, we just concatenate # them to the self-attention head before the projection. attention_output = tf.concat(attention_heads, axis=-1) # Run a linear projection of `hidden_size` then add a residual # with `layer_input`. with tf.variable_scope("output"): attention_output = tf.layers.dense( attention_output, hidden_size, kernel_initializer=create_initializer(initializer_range)) attention_output = dropout(attention_output, hidden_dropout_prob) attention_output = layer_norm(attention_output + layer_input) # The activation is only applied to the "intermediate" hidden layer. with tf.variable_scope("intermediate"): intermediate_output = tf.layers.dense( attention_output, intermediate_size, activation=intermediate_act_fn, kernel_initializer=create_initializer(initializer_range)) # Down-project back to `hidden_size` then add the residual. with tf.variable_scope("output"): layer_output = tf.layers.dense( intermediate_output, hidden_size, kernel_initializer=create_initializer(initializer_range)) layer_output = dropout(layer_output, hidden_dropout_prob) layer_output = layer_norm(layer_output + attention_output) prev_output = layer_output all_layer_outputs.append(layer_output) if do_return_all_layers: final_outputs = [] for layer_output in all_layer_outputs: final_output = reshape_from_matrix(layer_output, input_shape) final_outputs.append(final_output) return final_outputs else: final_output = reshape_from_matrix(prev_output, input_shape) return final_output def get_shape_list(tensor, expected_rank=None, name=None): """Returns a list of the shape of tensor, preferring static dimensions. Args: tensor: A tf.Tensor object to find the shape of. expected_rank: (optional) int. The expected rank of `tensor`. If this is specified and the `tensor` has a different rank, and exception will be thrown. name: Optional name of the tensor for the error message. Returns: A list of dimensions of the shape of tensor. All static dimensions will be returned as python integers, and dynamic dimensions will be returned as tf.Tensor scalars. """ if name is None: name = tensor.name if expected_rank is not None: assert_rank(tensor, expected_rank, name) shape = tensor.shape.as_list() non_static_indexes = [] for (index, dim) in enumerate(shape): if dim is None: non_static_indexes.append(index) if not non_static_indexes: return shape dyn_shape = tf.shape(tensor) for index in non_static_indexes: shape[index] = dyn_shape[index] return shape def reshape_to_matrix(input_tensor): """Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).""" ndims = input_tensor.shape.ndims if ndims < 2: raise ValueError("Input tensor must have at least rank 2. Shape = %s" % (input_tensor.shape)) if ndims == 2: return input_tensor width = input_tensor.shape[-1] output_tensor = tf.reshape(input_tensor, [-1, width]) return output_tensor def reshape_from_matrix(output_tensor, orig_shape_list): """Reshapes a rank 2 tensor back to its original rank >= 2 tensor.""" if len(orig_shape_list) == 2: return output_tensor output_shape = get_shape_list(output_tensor) orig_dims = orig_shape_list[0:-1] width = output_shape[-1] return tf.reshape(output_tensor, orig_dims + [width]) def assert_rank(tensor, expected_rank, name=None): """Raises an exception if the tensor rank is not of the expected rank. Args: tensor: A tf.Tensor to check the rank of. expected_rank: Python integer or list of integers, expected rank. name: Optional name of the tensor for the error message. Raises: ValueError: If the expected shape doesn't match the actual shape. """ if name is None: name = tensor.name expected_rank_dict = {} if isinstance(expected_rank, six.integer_types): expected_rank_dict[expected_rank] = True else: for x in expected_rank: expected_rank_dict[x] = True actual_rank = tensor.shape.ndims if actual_rank not in expected_rank_dict: scope_name = tf.get_variable_scope().name raise ValueError( "For the tensor `%s` in scope `%s`, the actual rank " "`%d` (shape = %s) is not equal to the expected rank `%s`" % (name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
ymcui/LERT
221
LERT: A Linguistically-motivated Pre-trained Language Model(语言学信息增强的预训练模型LERT)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
src/optimization.py
Python
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functions and classes related to optimization (weight updates).""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import re import tensorflow as tf def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu): """Creates an optimizer training op.""" global_step = tf.train.get_or_create_global_step() learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32) # Implements linear decay of the learning rate. learning_rate = tf.train.polynomial_decay( learning_rate, global_step, num_train_steps, end_learning_rate=0.0, power=1.0, cycle=False) # Implements linear warmup. I.e., if global_step < num_warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. if num_warmup_steps: global_steps_int = tf.cast(global_step, tf.int32) warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32) global_steps_float = tf.cast(global_steps_int, tf.float32) warmup_steps_float = tf.cast(warmup_steps_int, tf.float32) warmup_percent_done = global_steps_float / warmup_steps_float warmup_learning_rate = init_lr * warmup_percent_done is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32) learning_rate = ( (1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate) # It is recommended that you use this optimizer for fine tuning, since this # is how the model was trained (note that the Adam m/v variables are NOT # loaded from init_checkpoint.) optimizer = AdamWeightDecayOptimizer( learning_rate=learning_rate, weight_decay_rate=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-6, exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"]) if use_tpu: optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer) tvars = tf.trainable_variables() grads = tf.gradients(loss, tvars) # This is how the model was pre-trained. (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0) train_op = optimizer.apply_gradients( zip(grads, tvars), global_step=global_step) # Normally the global step update is done inside of `apply_gradients`. # However, `AdamWeightDecayOptimizer` doesn't do this. But if you use # a different optimizer, you should probably take this line out. new_global_step = global_step + 1 train_op = tf.group(train_op, [global_step.assign(new_global_step)]) return train_op class AdamWeightDecayOptimizer(tf.train.Optimizer): """A basic Adam optimizer that includes "correct" L2 weight decay.""" def __init__(self, learning_rate, weight_decay_rate=0.0, beta_1=0.9, beta_2=0.999, epsilon=1e-6, exclude_from_weight_decay=None, name="AdamWeightDecayOptimizer"): """Constructs a AdamWeightDecayOptimizer.""" super(AdamWeightDecayOptimizer, self).__init__(False, name) self.learning_rate = learning_rate self.weight_decay_rate = weight_decay_rate self.beta_1 = beta_1 self.beta_2 = beta_2 self.epsilon = epsilon self.exclude_from_weight_decay = exclude_from_weight_decay def apply_gradients(self, grads_and_vars, global_step=None, name=None): """See base class.""" assignments = [] for (grad, param) in grads_and_vars: if grad is None or param is None: continue param_name = self._get_variable_name(param.name) m = tf.get_variable( name=param_name + "/adam_m", shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer()) v = tf.get_variable( name=param_name + "/adam_v", shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer()) # Standard Adam update. next_m = ( tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad)) next_v = ( tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2, tf.square(grad))) update = next_m / (tf.sqrt(next_v) + self.epsilon) # Just adding the square of the weights to the loss function is *not* # the correct way of using L2 regularization/weight decay with Adam, # since that will interact with the m and v parameters in strange ways. # # Instead we want ot decay the weights in a manner that doesn't interact # with the m/v parameters. This is equivalent to adding the square # of the weights to the loss with plain (non-momentum) SGD. if self._do_use_weight_decay(param_name): update += self.weight_decay_rate * param update_with_lr = self.learning_rate * update next_param = param - update_with_lr assignments.extend( [param.assign(next_param), m.assign(next_m), v.assign(next_v)]) return tf.group(*assignments, name=name) def _do_use_weight_decay(self, param_name): """Whether to use L2 weight decay for `param_name`.""" if not self.weight_decay_rate: return False if self.exclude_from_weight_decay: for r in self.exclude_from_weight_decay: if re.search(r, param_name) is not None: return False return True def _get_variable_name(self, param_name): """Get the variable name from the tensor name.""" m = re.match("^(.*):\\d+$", param_name) if m is not None: param_name = m.group(1) return param_name
ymcui/LERT
221
LERT: A Linguistically-motivated Pre-trained Language Model(语言学信息增强的预训练模型LERT)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
src/run.pretrain.sh
Shell
#!/bin/bash set -ex TPU_NAME="your-tpu-name" TPU_ZONE="your-tpu-zone" DATA_DIR=./your-path-to-tfrecords MODEL_DIR=./your-path-to-model-saving CONFIG_FILE=./your-path-to-config-file # run pretraining python run_pretraining.py \ --input_file=${DATA_DIR}/tf_examples.tfrecord.* \ --output_dir=${MODEL_DIR} \ --do_train=True \ --bert_config_file=${CONFIG_FILE} \ --train_batch_size=1024 \ --eval_batch_size=1024 \ --max_seq_length=512 \ --max_predictions_per_seq=75 \ --num_train_steps=2000000 \ --num_warmup_steps=10000 \ --save_checkpoints_steps=50000 \ --learning_rate=1e-4 \ --do_lower_case=True \ --use_tpu=True \ --tpu_name=${TPU_NAME} \ --tpu_zone=${TPU_ZONE}
ymcui/LERT
221
LERT: A Linguistically-motivated Pre-trained Language Model(语言学信息增强的预训练模型LERT)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
src/run_pretraining.py
Python
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Run masked LM/next sentence masked_lm pre-training for BERT.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import modeling import optimization import tensorflow as tf flags = tf.flags FLAGS = flags.FLAGS ## Required parameters flags.DEFINE_string( "bert_config_file", None, "The config json file corresponding to the pre-trained BERT model. " "This specifies the model architecture.") flags.DEFINE_string( "input_file", None, "Input TF example files (can be a glob or comma separated).") flags.DEFINE_string( "output_dir", None, "The output directory where the model checkpoints will be written.") ## Other parameters flags.DEFINE_string( "init_checkpoint", None, "Initial checkpoint (usually from a pre-trained BERT model).") flags.DEFINE_integer( "max_seq_length", 128, "The maximum total input sequence length after WordPiece tokenization. " "Sequences longer than this will be truncated, and sequences shorter " "than this will be padded. Must match data generation.") flags.DEFINE_integer( "max_predictions_per_seq", 20, "Maximum number of masked LM predictions per sequence. " "Must match data generation.") flags.DEFINE_bool("do_train", False, "Whether to run training.") flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.") flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.") flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.") flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.") flags.DEFINE_integer("num_train_steps", 100000, "Number of training steps.") flags.DEFINE_integer("num_warmup_steps", 10000, "Number of warmup steps.") flags.DEFINE_integer("save_checkpoints_steps", 50000, "How often to save the model checkpoint.") flags.DEFINE_integer("iterations_per_loop", 1000, "How many steps to make in each estimator call.") flags.DEFINE_integer("max_eval_steps", 100, "Maximum number of eval steps.") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") tf.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") tf.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.") flags.DEFINE_integer( "num_tpu_cores", 8, "Only used if `use_tpu` is True. Total number of TPU cores to use.") tf.get_logger().propagate = False # LINGUISTIC FEATURES USED IN THE PAPER POS_LIST = ["POS-n", "POS-v", "POS-wp", "POS-u", "POS-d", "POS-a", "POS-m", "POS-p", "POS-r", "POS-ns", "POS-c", "POS-q", "POS-nt", "POS-nh", "POS-nd", "POS-j", "POS-i", "POS-b", "POS-ni", "POS-nz", "POS-nl", "POS-z", "POS-k", "POS-ws", "POS-o", "POS-h", "POS-e", "POS-%"] NER_LIST = ["NER-O", "NER-S-Ns", "NER-S-Nh", "NER-B-Ni", "NER-E-Ni", "NER-I-Ni", "NER-S-Ni", "NER-B-Ns", "NER-E-Ns", "NER-I-Ns", "NER-B-Nh", "NER-E-Nh", "NER-I-Nh"] DEP_LIST = ["DEP-ATT", "DEP-WP", "DEP-ADV", "DEP-VOB", "DEP-SBV", "DEP-COO", "DEP-RAD", "DEP-HED", "DEP-POB", "DEP-CMP", "DEP-LAD", "DEP-FOB", "DEP-DBL", "DEP-IOB"] def model_fn_builder(bert_config, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] masked_lm_positions = features["masked_lm_positions"] masked_lm_ids = features["masked_lm_ids"] masked_lm_weights = features["masked_lm_weights"] class_labels = [features["pos_labels"], features["ner_labels"], features["dep_labels"]] is_training = (mode == tf.estimator.ModeKeys.TRAIN) model = modeling.BertModel( config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings) (masked_lm_loss, masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output( bert_config, model.get_sequence_output(), model.get_embedding_table(), masked_lm_positions, masked_lm_ids, class_labels, masked_lm_weights) total_loss = masked_lm_loss tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights): """Computes the loss and accuracy of the model.""" masked_lm_log_probs = tf.reshape(masked_lm_log_probs, [-1, masked_lm_log_probs.shape[-1]]) masked_lm_predictions = tf.argmax( masked_lm_log_probs, axis=-1, output_type=tf.int32) masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1]) masked_lm_ids = tf.reshape(masked_lm_ids, [-1]) masked_lm_weights = tf.reshape(masked_lm_weights, [-1]) masked_lm_accuracy = tf.metrics.accuracy( labels=masked_lm_ids, predictions=masked_lm_predictions, weights=masked_lm_weights) masked_lm_mean_loss = tf.metrics.mean( values=masked_lm_example_loss, weights=masked_lm_weights) return { "masked_lm_accuracy": masked_lm_accuracy, "masked_lm_loss": masked_lm_mean_loss, } eval_metrics = (metric_fn, [ masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights ]) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode)) return output_spec return model_fn def get_masked_lm_output(bert_config, input_raw_tensor, output_weights, positions, label_ids, class_label_ids, label_weights): """Get loss and log probs for the masked LM.""" # input_raw_tensor [B, L, H] # input_tensor [B*75, H] input_tensor = gather_indexes(input_raw_tensor, positions) with tf.variable_scope("cls/predictions"): # We apply one more non-linear transformation before the output layer. # This matrix is not used after pre-training. with tf.variable_scope("transform"): input_tensor = tf.layers.dense( input_tensor, units=bert_config.hidden_size, activation=modeling.get_activation(bert_config.hidden_act), kernel_initializer=modeling.create_initializer( bert_config.initializer_range)) input_tensor = modeling.layer_norm(input_tensor) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. output_bias = tf.get_variable( "output_bias", shape=[bert_config.vocab_size], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) label_ids = tf.reshape(label_ids, [-1]) label_weights = tf.reshape(label_weights, [-1]) one_hot_labels = tf.one_hot( label_ids, depth=bert_config.vocab_size, dtype=tf.float32) # linguistic task head def create_linguistic_layer_and_loss(input_tensor, class_label_ids, label_weights, class_num, name_of_layer): class_logits = tf.layers.dense( input_tensor, units=class_num, activation=None, kernel_initializer=modeling.create_initializer(bert_config.initializer_range), name=name_of_layer) class_log_probs = tf.nn.log_softmax(class_logits, axis=-1) class_label_ids = tf.reshape(class_label_ids, [-1]) class_one_hot_labels = tf.one_hot(class_label_ids, depth=class_num, dtype=tf.float32) return get_loss(class_log_probs, class_one_hot_labels, label_weights) def get_loss(log_probs, one_hot_labels, label_weights): # The `positions` tensor might be zero-padded (if the sequence is too # short to have the maximum number of predictions). The `label_weights` # tensor has a value of 1.0 for every real prediction and 0.0 for the # padding predictions. per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1]) numerator = tf.reduce_sum(label_weights * per_example_loss) denominator = tf.reduce_sum(label_weights) + 1e-5 loss = numerator / denominator return per_example_loss, loss per_mlm_loss, mlm_loss = get_loss(log_probs, one_hot_labels, label_weights) per_pos_loss, pos_loss = create_linguistic_layer_and_loss(input_tensor, class_label_ids[0], label_weights, 28, "output_pos_layer") per_ner_loss, ner_loss = create_linguistic_layer_and_loss(input_tensor, class_label_ids[1], label_weights, 13, "output_ner_layer") per_dep_loss, dep_loss = create_linguistic_layer_and_loss(input_tensor, class_label_ids[2], label_weights, 14, "output_dep_layer") # specify end steps for scaling here. end_pos_steps = 333000 end_ner_steps = 666000 end_dep_steps = 1000000 global_steps = tf.cast(tf.train.get_or_create_global_step(), tf.float32) pos_weight = tf.clip_by_value(global_steps / end_pos_steps, 0.0, 1.0) ner_weight = tf.clip_by_value(global_steps / end_ner_steps, 0.0, 1.0) dep_weight = tf.clip_by_value(global_steps / end_dep_steps, 0.0, 1.0) loss = mlm_loss + pos_weight*pos_loss + ner_weight*ner_loss + dep_weight*dep_loss per_example_loss = per_mlm_loss + pos_weight*per_pos_loss + ner_weight*per_ner_loss + dep_weight*per_dep_loss return (loss, per_example_loss, log_probs) def gather_indexes(sequence_tensor, positions): """Gathers the vectors at the specific positions over a minibatch.""" sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3) batch_size = sequence_shape[0] seq_length = sequence_shape[1] width = sequence_shape[2] flat_offsets = tf.reshape( tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1]) flat_positions = tf.reshape(positions + flat_offsets, [-1]) flat_sequence_tensor = tf.reshape(sequence_tensor, [batch_size * seq_length, width]) output_tensor = tf.gather(flat_sequence_tensor, flat_positions) return output_tensor def input_fn_builder(input_files, max_seq_length, max_predictions_per_seq, is_training, num_cpu_threads=4): """Creates an `input_fn` closure to be passed to TPUEstimator.""" def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] name_to_features = { "input_ids": tf.FixedLenFeature([max_seq_length], tf.int64), "input_mask": tf.FixedLenFeature([max_seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([max_seq_length], tf.int64), "masked_lm_positions": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_ids": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_weights": tf.FixedLenFeature([max_predictions_per_seq], tf.float32), "pos_labels": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "ner_labels": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "dep_labels": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), } # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. if is_training: d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files)) d = d.repeat() d = d.shuffle(buffer_size=len(input_files)) # `cycle_length` is the number of parallel files that get read. cycle_length = min(num_cpu_threads, len(input_files)) # `sloppy` mode means that the interleaving is not exact. This adds # even more randomness to the training pipeline. d = d.apply( tf.contrib.data.parallel_interleave( tf.data.TFRecordDataset, sloppy=is_training, cycle_length=cycle_length)) d = d.shuffle(buffer_size=100) else: d = tf.data.TFRecordDataset(input_files) # Since we evaluate for a fixed number of steps we don't want to encounter # out-of-range exceptions. d = d.repeat() # We must `drop_remainder` on training because the TPU requires fixed # size dimensions. For eval, we assume we are evaluating on the CPU or GPU # and we *don't* want to drop the remainder, otherwise we wont cover # every sample. d = d.apply( tf.contrib.data.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, num_parallel_batches=num_cpu_threads, drop_remainder=True)) return d return input_fn def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t return example def main(_): tf.logging.set_verbosity(tf.logging.INFO) if not FLAGS.do_train and not FLAGS.do_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True.") bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) tf.gfile.MakeDirs(FLAGS.output_dir) input_files = [] for input_pattern in FLAGS.input_file.split(","): input_files.extend(tf.gfile.Glob(input_pattern)) tf.logging.info("*** Input Files ***") for input_file in input_files: tf.logging.info(" %s" % input_file) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, keep_checkpoint_max=10, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) model_fn = model_fn_builder( bert_config=bert_config, init_checkpoint=FLAGS.init_checkpoint, learning_rate=FLAGS.learning_rate, num_train_steps=FLAGS.num_train_steps, num_warmup_steps=FLAGS.num_warmup_steps, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=FLAGS.use_tpu) # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator = tf.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size) if FLAGS.do_train: tf.logging.info("***** Running training *****") tf.logging.info(" Batch size = %d", FLAGS.train_batch_size) train_input_fn = input_fn_builder( input_files=input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=True) estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps) if FLAGS.do_eval: tf.logging.info("***** Running evaluation *****") tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size) eval_input_fn = input_fn_builder( input_files=input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=False) result = estimator.evaluate( input_fn=eval_input_fn, steps=FLAGS.max_eval_steps) output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") with tf.gfile.GFile(output_eval_file, "w") as writer: tf.logging.info("***** Eval results *****") for key in sorted(result.keys()): tf.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if __name__ == "__main__": flags.mark_flag_as_required("input_file") flags.mark_flag_as_required("bert_config_file") flags.mark_flag_as_required("output_dir") tf.app.run()
ymcui/LERT
221
LERT: A Linguistically-motivated Pre-trained Language Model(语言学信息增强的预训练模型LERT)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
src/tokenization.py
Python
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import re import unicodedata import six import tensorflow as tf def validate_case_matches_checkpoint(do_lower_case, init_checkpoint): """Checks whether the casing config is consistent with the checkpoint name.""" # The casing has to be passed in by the user and there is no explicit check # as to whether it matches the checkpoint. The casing information probably # should have been stored in the bert_config.json file, but it's not, so # we have to heuristically detect it to validate. if not init_checkpoint: return m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint) if m is None: return model_name = m.group(1) lower_models = [ "uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12", "multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12" ] cased_models = [ "cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16", "multi_cased_L-12_H-768_A-12" ] is_bad_config = False if model_name in lower_models and not do_lower_case: is_bad_config = True actual_flag = "False" case_name = "lowercased" opposite_flag = "True" if model_name in cased_models and do_lower_case: is_bad_config = True actual_flag = "True" case_name = "cased" opposite_flag = "False" if is_bad_config: raise ValueError( "You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. " "However, `%s` seems to be a %s model, so you " "should pass in `--do_lower_case=%s` so that the fine-tuning matches " "how the model was pre-training. If this error is wrong, please " "just comment out this check." % (actual_flag, init_checkpoint, model_name, case_name, opposite_flag)) def convert_to_unicode(text): """Converts `text` to Unicode (if it's not already), assuming utf-8 input.""" if six.PY3: if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode("utf-8", "ignore") else: raise ValueError("Unsupported string type: %s" % (type(text))) elif six.PY2: if isinstance(text, str): return text.decode("utf-8", "ignore") elif isinstance(text, unicode): return text else: raise ValueError("Unsupported string type: %s" % (type(text))) else: raise ValueError("Not running on Python2 or Python 3?") def printable_text(text): """Returns text encoded in a way suitable for print or `tf.logging`.""" # These functions want `str` for both Python2 and Python3, but in one case # it's a Unicode string and in the other it's a byte string. if six.PY3: if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode("utf-8", "ignore") else: raise ValueError("Unsupported string type: %s" % (type(text))) elif six.PY2: if isinstance(text, str): return text elif isinstance(text, unicode): return text.encode("utf-8") else: raise ValueError("Unsupported string type: %s" % (type(text))) else: raise ValueError("Not running on Python2 or Python 3?") def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() index = 0 with tf.gfile.GFile(vocab_file, "r") as reader: while True: token = convert_to_unicode(reader.readline()) if not token: break token = token.strip() vocab[token] = index index += 1 return vocab def convert_by_vocab(vocab, items): """Converts a sequence of [tokens|ids] using the vocab.""" output = [] for item in items: output.append(vocab[item]) return output def convert_tokens_to_ids(vocab, tokens): return convert_by_vocab(vocab, tokens) def convert_ids_to_tokens(inv_vocab, ids): return convert_by_vocab(inv_vocab, ids) def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens class FullTokenizer(object): """Runs end-to-end tokenziation.""" def __init__(self, vocab_file, do_lower_case=True): self.vocab = load_vocab(vocab_file) self.inv_vocab = {v: k for k, v in self.vocab.items()} self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) def tokenize(self, text): split_tokens = [] for token in self.basic_tokenizer.tokenize(text): for sub_token in self.wordpiece_tokenizer.tokenize(token): split_tokens.append(sub_token) return split_tokens def convert_tokens_to_ids(self, tokens): return convert_by_vocab(self.vocab, tokens) def convert_ids_to_tokens(self, ids): return convert_by_vocab(self.inv_vocab, ids) class BasicTokenizer(object): """Runs basic tokenization (punctuation splitting, lower casing, etc.).""" def __init__(self, do_lower_case=True): """Constructs a BasicTokenizer. Args: do_lower_case: Whether to lower case the input. """ self.do_lower_case = do_lower_case def tokenize(self, text): """Tokenizes a piece of text.""" text = convert_to_unicode(text) text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). text = self._tokenize_chinese_chars(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if self.do_lower_case: token = token.lower() token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text): """Splits punctuation on a piece of text.""" chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ((cp >= 0x4E00 and cp <= 0x9FFF) or # (cp >= 0x3400 and cp <= 0x4DBF) or # (cp >= 0x20000 and cp <= 0x2A6DF) or # (cp >= 0x2A700 and cp <= 0x2B73F) or # (cp >= 0x2B740 and cp <= 0x2B81F) or # (cp >= 0x2B820 and cp <= 0x2CEAF) or (cp >= 0xF900 and cp <= 0xFAFF) or # (cp >= 0x2F800 and cp <= 0x2FA1F)): # return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xfffd or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) class WordpieceTokenizer(object): """Runs WordPiece tokenziation.""" def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example: input = "unaffable" output = ["un", "##aff", "##able"] Args: text: A single token or whitespace separated tokens. This should have already been passed through `BasicTokenizer. Returns: A list of wordpiece tokens. """ text = convert_to_unicode(text) output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens def _is_whitespace(char): """Checks whether `chars` is a whitespace character.""" # \t, \n, and \r are technically contorl characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True cat = unicodedata.category(char) if cat == "Zs": return True return False def _is_control(char): """Checks whether `chars` is a control character.""" # These are technically control characters but we count them as whitespace # characters. if char == "\t" or char == "\n" or char == "\r": return False cat = unicodedata.category(char) if cat.startswith("C"): return True return False def _is_punctuation(char): """Checks whether `chars` is a punctuation character.""" cp = ord(char) # We treat all non-letter/number ASCII as punctuation. # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False
ymcui/LERT
221
LERT: A Linguistically-motivated Pre-trained Language Model(语言学信息增强的预训练模型LERT)
Python
ymcui
Yiming Cui
Joint Laboratory of HIT and iFLYTEK Research (HFL)
gatsby-config.js
JavaScript
require('dotenv').config() module.exports = { plugins: [ `gatsby-plugin-sharp`, { resolve: `gatsby-source-graphcms`, options: { downloadLocalImages: true, endpoint: process.env.GRAPHCMS_ENDPOINT, token: process.env.GRAPHCMS_TOKEN, }, }, `gatsby-transformer-sharp`, ], }
ynnoj/2020-07-17-gatsby-preview-graphcms
1
📹 Preview GraphCMS content with Gatsby Cloud
JavaScript
ynnoj
Jonathan Steele
stripe
src/pages/index.js
JavaScript
import React from 'react' import { graphql } from 'gatsby' import Img from 'gatsby-image' function IndexPage({ data }) { const { products } = data return products.nodes.map((product) => ( <React.Fragment> <h1 key={product.id}>{product.name}</h1> {product.images.map((image) => ( <Img key={image.id} fixed={image.localFile.childImageSharp.fixed} fadeIn={false} /> ))} </React.Fragment> )) } export const query = graphql` query IndexPageQuery { products: allGraphCmsProduct { nodes { id name slug images { id localFile { childImageSharp { fixed(width: 500) { ...GatsbyImageSharpFixed } } } } } } } ` export default IndexPage
ynnoj/2020-07-17-gatsby-preview-graphcms
1
📹 Preview GraphCMS content with Gatsby Cloud
JavaScript
ynnoj
Jonathan Steele
stripe
gatsby-browser.js
JavaScript
import React from 'react' import { MDXProvider } from '@mdx-js/react' const wrapRootElement = ({ element }) => { return ( <MDXProvider components={{ h2: (props) => <h2 style={{ color: 'blue' }} {...props} />, p: (props) => <p style={{ color: 'red' }} {...props} />, CTA: (props) => ( <div style={{ color: 'red' }}> {props.sales ? 'Please contact us for pricing' : 'Pricing is $255 per month'} </div> ), Test: () => ( <div style={{ backgroundColor: 'black', color: 'white' }}> This is from MDX </div> ), }} > {element} </MDXProvider> ) } export { wrapRootElement }
ynnoj/2020-08-07-working-with-mdx-graphcms
2
📹 Working with MDX and GraphCMS
JavaScript
ynnoj
Jonathan Steele
stripe
gatsby-config.js
JavaScript
require('dotenv').config() module.exports = { plugins: [ 'gatsby-plugin-mdx', { resolve: 'gatsby-source-graphcms', options: { endpoint: process.env.GRAPHCMS_ENDPOINT, token: process.env.GRAPHCMS_TOKEN, buildMarkdownNodes: true, }, }, ], }
ynnoj/2020-08-07-working-with-mdx-graphcms
2
📹 Working with MDX and GraphCMS
JavaScript
ynnoj
Jonathan Steele
stripe
src/pages/index.js
JavaScript
import React from 'react' import { graphql } from 'gatsby' import { MDXRenderer } from 'gatsby-plugin-mdx' function IndexPage({ data }) { const { posts } = data return posts.nodes.map((post) => ( <div key={post.id}> <h1>{post.title}</h1> <MDXRenderer>{post.content.markdownNode.childMdx.body}</MDXRenderer> </div> )) } export const pageQuery = graphql` { posts: allGraphCmsPost { nodes { id title content { markdownNode { childMdx { body } } } } } } ` export default IndexPage
ynnoj/2020-08-07-working-with-mdx-graphcms
2
📹 Working with MDX and GraphCMS
JavaScript
ynnoj
Jonathan Steele
stripe
gatsby-browser.js
JavaScript
import React from 'react' import { ApolloClient, ApolloProvider, HttpLink, InMemoryCache, } from '@apollo/client' import { MDXProvider } from '@mdx-js/react' import fetch from 'isomorphic-fetch' import './src/styles/index.css' import Layout from './src/components/layout' const httpLink = new HttpLink({ uri: process.env.GATSBY_GRAPHCMS_ENDPOINT, headers: { Authorization: `Bearer ${process.env.GATSBY_GRAPHCMS_TOKEN}`, }, fetch, }) const apolloClient = new ApolloClient({ link: httpLink, cache: new InMemoryCache(), }) const wrapPageElement = ({ element, props }) => ( <Layout {...props}>{element}</Layout> ) const wrapRootElement = ({ element }) => ( <ApolloProvider client={apolloClient}> <MDXProvider>{element}</MDXProvider> </ApolloProvider> ) export { wrapPageElement, wrapRootElement }
ynnoj/2020-08-28-dynamic-content-in-gatsby
2
📹 Dynamic content in Gatsby with Apollo Client
JavaScript
ynnoj
Jonathan Steele
stripe
gatsby-config.js
JavaScript
require('dotenv').config() module.exports = { siteMetadata: { title: 'GraphCMS Blog', description: 'Gatsby blog starter for GraphCMS! Powered by `gatsby-source-graphcms`, featuring `gatsby-image` and MDX!', keywords: 'Headless CMS, GraphCMS, GraphQL CMS, Gatsby', }, plugins: [ 'gatsby-plugin-mdx', { resolve: 'gatsby-plugin-react-svg', ptions: { rule: { include: /svg/, }, }, }, 'gatsby-plugin-react-helmet', 'gatsby-plugin-sharp', 'gatsby-plugin-postcss', { resolve: 'gatsby-source-graphcms', options: { endpoint: process.env.GATSBY_GRAPHCMS_ENDPOINT, token: process.env.GATSBY_GRAPHCMS_TOKEN, buildMarkdownNodes: true, downloadLocalImages: true, }, }, 'gatsby-transformer-sharp', ], }
ynnoj/2020-08-28-dynamic-content-in-gatsby
2
📹 Dynamic content in Gatsby with Apollo Client
JavaScript
ynnoj
Jonathan Steele
stripe
gatsby-node.js
JavaScript
const path = require('path') exports.createPages = async ({ actions: { createPage }, graphql }) => { const { data } = await graphql( ` { pages: allGraphCmsPage { nodes { id content { markdownNode { childMdx { body } } } seo { description image { url } keywords title } slug subtitle title } } posts: allGraphCmsPost(sort: { fields: date, order: ASC }) { edges { nextPost: next { slug title } page: node { id author { id name title } content { markdownNode { childMdx { body } } } date: formattedDate excerpt remoteId seo { description image { url } keywords title } slug title } previousPost: previous { slug title } } } } ` ) if (data.errors) throw data.errors data.posts.edges.forEach(({ nextPost, page, previousPost }) => { createPage({ component: path.resolve('./src/templates/blog-post.js'), context: { id: page.id, page, previousPost, nextPost, }, path: `/posts/${page.slug}`, }) }) data.pages.nodes.forEach((page) => { createPage({ component: path.resolve('./src/templates/default-page.js'), context: { page, }, path: `/${page.slug}`, }) }) } exports.createResolvers = ({ createResolvers }) => { const resolvers = { GraphCMS_Post: { formattedDate: { type: 'String', resolve: (source) => { const date = new Date(source.date) return new Intl.DateTimeFormat('en-US', { weekday: 'long', year: 'numeric', month: 'long', day: 'numeric', }).format(date) }, }, }, } createResolvers(resolvers) }
ynnoj/2020-08-28-dynamic-content-in-gatsby
2
📹 Dynamic content in Gatsby with Apollo Client
JavaScript
ynnoj
Jonathan Steele
stripe
gatsby-ssr.js
JavaScript
export { wrapPageElement, wrapRootElement } from './gatsby-browser'
ynnoj/2020-08-28-dynamic-content-in-gatsby
2
📹 Dynamic content in Gatsby with Apollo Client
JavaScript
ynnoj
Jonathan Steele
stripe
postcss.config.js
JavaScript
module.exports = { plugins: [require('postcss-preset-env'), require('tailwindcss')], }
ynnoj/2020-08-28-dynamic-content-in-gatsby
2
📹 Dynamic content in Gatsby with Apollo Client
JavaScript
ynnoj
Jonathan Steele
stripe
src/components/footer.js
JavaScript
import React from 'react' import GitHubSVG from '../svg/github.svg' import LinkedInSVG from '../svg/linkedin.svg' import SlackSVG from '../svg/slack.svg' import TwitterSVG from '../svg/twitter.svg' const socialLinks = [ { Component: GitHubSVG, href: 'https://github.com/graphcms/gatsby-graphcms-ecommerce-starter', title: 'GitHub', }, { Component: SlackSVG, href: 'http://slack.graphcms.com', title: 'Slack', }, { Component: TwitterSVG, href: 'https://twitter.com/graphcms', title: 'Twitter', }, { Component: LinkedInSVG, href: 'https://www.linkedin.com/company/graphcms', title: 'LinkedIn', }, ] function Footer() { return ( <footer className="bg-gray-800"> <div className="flex flex-col md:flex-row items-center md:justify-between py-6 max-w-3xl mx-auto px-4 sm:px-6 lg:max-w-5xl space-y-6 md:space-y-0"> <p className="text-gray-300">Powered by GraphCMS &amp; Gatsby</p> <ul className="inline-flex space-x-6"> {socialLinks.map(({ Component, href, title }, index) => ( <li key={index}> <a href={href} target="_blank" className="block text-gray-300 hover:text-white p-1 text-sm" rel="noopener noreferrer" title={title} > <Component className="h-6 w-6" /> </a> </li> ))} </ul> </div> </footer> ) } export default Footer
ynnoj/2020-08-28-dynamic-content-in-gatsby
2
📹 Dynamic content in Gatsby with Apollo Client
JavaScript
ynnoj
Jonathan Steele
stripe
src/components/header.js
JavaScript
import React, { useEffect, useState } from 'react' import { graphql, Link, useStaticQuery } from 'gatsby' import { globalHistory, useLocation } from '@reach/router' import cx from 'classnames' import GraphCMSLogo from '../svg/logo.svg' import GraphCMSMark from '../svg/mark.svg' import Transition from './transition' function Header() { const [mobileNavOpen, setMobileNavOpen] = useState(false) const location = useLocation() const { pages } = useStaticQuery(graphql` { pages: allGraphCmsPage { nodes { id slug title } } } `) useEffect( () => globalHistory.listen(({ action }) => { if (action === 'PUSH') setMobileNavOpen(false) }), [setMobileNavOpen] ) const toggleMobileNavOpen = () => setMobileNavOpen((open) => !open) return ( <header className="py-10 relative"> <nav className="relative flex items-center justify-between sm:h-10 lg:justify-start"> <div className="flex items-center flex-grow flex-shrink-0 lg:flex-grow-0"> <div className="flex items-center justify-between w-full md:w-auto"> <Link to="/" aria-label="GraphCMS Gatsby Blog Starter"> <GraphCMSLogo className="hidden sm:block h-10" /> <GraphCMSMark className="h-10 sm:hidden" /> </Link> <div className="-mr-2 flex items-center md:hidden"> <button onClick={() => toggleMobileNavOpen()} type="button" className="inline-flex items-center justify-center p-2 rounded-md text-gray-400 hover:text-gray-500 hover:bg-gray-100 focus:outline-none focus:bg-gray-100 focus:text-gray-500 transition duration-150 ease-in-out" id="main-menu" aria-label="Main menu" aria-haspopup="true" > <svg className="h-6 w-6" stroke="currentColor" fill="none" viewBox="0 0 24 24" > <path strokeLinecap="round" strokeLinejoin="round" strokeWidth="2" d="M4 6h16M4 12h16M4 18h16" /> </svg> </button> </div> </div> </div> <div className="hidden md:flex md:ml-10 md:pr-4 space-x-8"> {pages.nodes.map((page) => { const isActive = location.pathname.startsWith(`/${page.slug}`) return ( <Link key={page.id} to={`/${page.slug}`} className={cx( 'inline-flex items-center px-1 pt-1 border-b-2 text-lg font-medium leading-5 focus:outline-none transition duration-150 ease-in-out', { 'border-purple-500 text-gray-900 focus:border-purple-600': isActive, 'border-transparent text-gray-500 hover:text-gray-600 hover:border-gray-300 focus:text-gray-600 focus:border-grey-600': !isActive, } )} > {page.title} </Link> ) })} </div> </nav> <Transition show={mobileNavOpen} enter="duration-150 ease-out" enterFrom="opacity-0 scale-95" enterTo="opacity-100 scale-100" leave="duration-100 ease-in" leaveFrom="opacity-100 scale-100" leaveTo="opacity-0 scale-95" > <div className="absolute top-0 inset-x-0 py-2 -mx-2 transition transform origin-top-right md:hidden"> <div className="rounded-lg shadow-md"> <div className="rounded-lg bg-white shadow-xs overflow-hidden" role="menu" aria-orientation="vertical" aria-labelledby="main-menu" > <div className="px-2 pt-8 flex items-center justify-between"> <div> <GraphCMSMark className="h-10" /> </div> <div className="-mr-2"> <button onClick={() => toggleMobileNavOpen()} type="button" className="inline-flex items-center justify-center p-2 rounded-md text-gray-400 hover:text-gray-500 hover:bg-gray-100 focus:outline-none focus:bg-gray-100 focus:text-gray-500 transition duration-150 ease-in-out" aria-label="Close menu" > <svg className="h-6 w-6" stroke="currentColor" fill="none" viewBox="0 0 24 24" > <path strokeLinecap="round" strokeLinejoin="round" strokeWidth="2" d="M6 18L18 6M6 6l12 12" /> </svg> </button> </div> </div> <div className="mt-1 px-2 pt-2 pb-3 space-y-1"> {pages.nodes.map((page) => { const isActive = location.pathname.startsWith(`/${page.slug}`) return ( <Link key={page.id} to={`/${page.slug}`} className={cx( 'block pl-3 pr-4 py-2 border-l-4 font-medium focus:outline-none transition duration-150 ease-in-out', { 'border-purple-500 text-purple-500 bg-purple-50 focus:text-purple-600 focus:bg-purple-100 focus:border-purple-600': isActive, 'border-transparent text-gray-500 hover:text-gray-600 hover:bg-gray-50 hover:border-gray-300 focus:text-gray-600 focus:bg-gray-50 focus:border-gray-300': !isActive, } )} role="menuitem" > {page.title} </Link> ) })} </div> </div> </div> </div> </Transition> </header> ) } export default Header
ynnoj/2020-08-28-dynamic-content-in-gatsby
2
📹 Dynamic content in Gatsby with Apollo Client
JavaScript
ynnoj
Jonathan Steele
stripe
src/components/layout.js
JavaScript
import React from 'react' import Footer from './footer' import Header from './header' import SEO from './seo' function Layout({ children, pageContext: { page } }) { return ( <React.Fragment> <SEO {...page} /> <div className="flex flex-col min-h-screen"> <div className="flex-grow max-w-3xl mx-auto px-4 sm:px-6 lg:max-w-5xl w-full"> <Header /> <main className="flex-grow mb-8">{children}</main> </div> <Footer /> </div> </React.Fragment> ) } export default Layout
ynnoj/2020-08-28-dynamic-content-in-gatsby
2
📹 Dynamic content in Gatsby with Apollo Client
JavaScript
ynnoj
Jonathan Steele
stripe
src/components/seo.js
JavaScript
import React from 'react' import { graphql, useStaticQuery } from 'gatsby' import { Helmet } from 'react-helmet' function SEO({ title, seo }) { const { site: { siteMetadata }, } = useStaticQuery(graphql` { site { siteMetadata { description keywords title } } } `) const defaultTitle = siteMetadata.title const pageDescription = seo?.description || siteMetadata.description const pageKeywords = seo?.keywords || siteMetadata.keywords const pageTitle = seo?.title || title || 'Home' return ( <Helmet htmlAttributes={{ lang: 'en' }} defaultTitle={defaultTitle} titleTemplate={`%s | ${defaultTitle}`} > <title>{pageTitle}</title> <meta name="description" content={pageDescription} /> <meta name="keywords" content={pageKeywords} /> {seo?.image && <meta property="image" content={seo.image.url} />} <meta property="og:title" content={pageTitle} /> <meta property="og:description" content={pageDescription} /> <meta property="og:site_name" content={defaultTitle} /> {seo?.image && <meta property="og:image" content={seo.image.url} />} <meta name="og:type" content="website" /> <meta name="twitter:site" content="@GraphCMS" /> <meta name="twitter:title" content={`${pageTitle} | ${defaultTitle}`} /> <meta name="twitter:card" content="summary_large_image" /> {seo?.image && <meta name="twitter:image:src" content={seo.image.url} />} </Helmet> ) } export default SEO
ynnoj/2020-08-28-dynamic-content-in-gatsby
2
📹 Dynamic content in Gatsby with Apollo Client
JavaScript
ynnoj
Jonathan Steele
stripe
src/components/transition.js
JavaScript
import React, { useRef, useEffect, useContext } from 'react' import { CSSTransition as ReactCSSTransition } from 'react-transition-group' const TransitionContext = React.createContext({ parent: {}, }) function useIsInitialRender() { const isInitialRender = useRef(true) useEffect(() => { isInitialRender.current = false }, []) return isInitialRender.current } function CSSTransition({ show, enter = '', enterFrom = '', enterTo = '', leave = '', leaveFrom = '', leaveTo = '', appear, children, }) { const enterClasses = enter.split(' ').filter((s) => s.length) const enterFromClasses = enterFrom.split(' ').filter((s) => s.length) const enterToClasses = enterTo.split(' ').filter((s) => s.length) const leaveClasses = leave.split(' ').filter((s) => s.length) const leaveFromClasses = leaveFrom.split(' ').filter((s) => s.length) const leaveToClasses = leaveTo.split(' ').filter((s) => s.length) function addClasses(node, classes) { classes.length && node.classList.add(...classes) } function removeClasses(node, classes) { classes.length && node.classList.remove(...classes) } return ( <ReactCSSTransition appear={appear} unmountOnExit in={show} addEndListener={(node, done) => { node.addEventListener('transitionend', done, false) }} onEnter={(node) => { addClasses(node, [...enterClasses, ...enterFromClasses]) }} onEntering={(node) => { removeClasses(node, enterFromClasses) addClasses(node, enterToClasses) }} onEntered={(node) => { removeClasses(node, [...enterToClasses, ...enterClasses]) }} onExit={(node) => { addClasses(node, [...leaveClasses, ...leaveFromClasses]) }} onExiting={(node) => { removeClasses(node, leaveFromClasses) addClasses(node, leaveToClasses) }} onExited={(node) => { removeClasses(node, [...leaveToClasses, ...leaveClasses]) }} > {children} </ReactCSSTransition> ) } function Transition({ show, appear, ...rest }) { const { parent } = useContext(TransitionContext) const isInitialRender = useIsInitialRender() const isChild = show === undefined if (isChild) { return ( <CSSTransition appear={parent.appear || !parent.isInitialRender} show={parent.show} {...rest} /> ) } return ( <TransitionContext.Provider value={{ parent: { show, isInitialRender, appear, }, }} > <CSSTransition appear={appear} show={show} {...rest} /> </TransitionContext.Provider> ) } export default Transition
ynnoj/2020-08-28-dynamic-content-in-gatsby
2
📹 Dynamic content in Gatsby with Apollo Client
JavaScript
ynnoj
Jonathan Steele
stripe