id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
20,594 | import argparse
import json
import os
import random
import time
import shortuuid
import torch
from tqdm import tqdm
from fastchat.llm_judge.common import load_questions, temperature_config
from fastchat.model import load_model, get_conversation_template
from fastchat.utils import str_to_torch_dtype
def get_model_answers(
model_path,
model_id,
questions,
answer_file,
max_new_token,
num_choices,
num_gpus_per_model,
max_gpu_memory,
dtype,
revision,
):
def load_questions(question_file: str, begin: Optional[int], end: Optional[int]):
def run_eval(
model_path,
model_id,
question_file,
question_begin,
question_end,
answer_file,
max_new_token,
num_choices,
num_gpus_per_model,
num_gpus_total,
max_gpu_memory,
dtype,
revision,
):
questions = load_questions(question_file, question_begin, question_end)
# random shuffle the questions to balance the loading
random.shuffle(questions)
# Split the question file into `num_gpus` files
assert num_gpus_total % num_gpus_per_model == 0
use_ray = num_gpus_total // num_gpus_per_model > 1
if use_ray:
get_answers_func = ray.remote(num_gpus=num_gpus_per_model)(
get_model_answers
).remote
else:
get_answers_func = get_model_answers
chunk_size = len(questions) // (num_gpus_total // num_gpus_per_model)
ans_handles = []
for i in range(0, len(questions), chunk_size):
ans_handles.append(
get_answers_func(
model_path,
model_id,
questions[i : i + chunk_size],
answer_file,
max_new_token,
num_choices,
num_gpus_per_model,
max_gpu_memory,
dtype=dtype,
revision=revision,
)
)
if use_ray:
ray.get(ans_handles) | null |
20,595 | import argparse
import json
import os
import random
import time
import shortuuid
import torch
from tqdm import tqdm
from fastchat.llm_judge.common import load_questions, temperature_config
from fastchat.model import load_model, get_conversation_template
from fastchat.utils import str_to_torch_dtype
The provided code snippet includes necessary dependencies for implementing the `reorg_answer_file` function. Write a Python function `def reorg_answer_file(answer_file)` to solve the following problem:
Sort by question id and de-duplication
Here is the function:
def reorg_answer_file(answer_file):
"""Sort by question id and de-duplication"""
answers = {}
with open(answer_file, "r") as fin:
for l in fin:
qid = json.loads(l)["question_id"]
answers[qid] = l
qids = sorted(list(answers.keys()))
with open(answer_file, "w") as fout:
for qid in qids:
fout.write(answers[qid]) | Sort by question id and de-duplication |
20,596 | import argparse
import json
import os
import time
import concurrent.futures
import openai
import shortuuid
import tqdm
from fastchat.llm_judge.common import (
load_questions,
temperature_config,
chat_completion_openai,
chat_completion_anthropic,
chat_completion_palm,
)
from fastchat.llm_judge.gen_model_answer import reorg_answer_file
from fastchat.model.model_adapter import get_conversation_template, ANTHROPIC_MODEL_LIST
temperature_config = {
"writing": 0.7,
"roleplay": 0.7,
"extraction": 0.0,
"math": 0.0,
"coding": 0.0,
"reasoning": 0.0,
"stem": 0.1,
"humanities": 0.1,
"arena-hard-200": 0.0,
}
def chat_completion_openai(model, conv, temperature, max_tokens, api_dict=None):
if api_dict is not None:
openai.api_base = api_dict["api_base"]
openai.api_key = api_dict["api_key"]
output = API_ERROR_OUTPUT
for _ in range(API_MAX_RETRY):
try:
messages = conv.to_openai_api_messages()
response = openai.ChatCompletion.create(
model=model,
messages=messages,
n=1,
temperature=temperature,
max_tokens=max_tokens,
)
output = response["choices"][0]["message"]["content"]
break
except openai.error.OpenAIError as e:
print(type(e), e)
time.sleep(API_RETRY_SLEEP)
return output
def chat_completion_anthropic(model, conv, temperature, max_tokens, api_dict=None):
if api_dict is not None and "api_key" in api_dict:
api_key = api_dict["api_key"]
else:
api_key = os.environ["ANTHROPIC_API_KEY"]
output = API_ERROR_OUTPUT
for _ in range(API_MAX_RETRY):
try:
c = anthropic.Anthropic(api_key=api_key)
prompt = conv.get_prompt()
response = c.completions.create(
model=model,
prompt=prompt,
stop_sequences=[anthropic.HUMAN_PROMPT],
max_tokens_to_sample=max_tokens,
temperature=temperature,
)
output = response.completion
break
except anthropic.APIError as e:
print(type(e), e)
time.sleep(API_RETRY_SLEEP)
return output.strip()
def chat_completion_palm(chat_state, model, conv, temperature, max_tokens):
from fastchat.serve.api_provider import init_palm_chat
assert model == "palm-2-chat-bison-001"
if chat_state is None:
chat_state = init_palm_chat("chat-bison@001")
parameters = {
"temperature": temperature,
"top_p": 0.8,
"top_k": 40,
"max_output_tokens": max_tokens,
}
output = API_ERROR_OUTPUT
for _ in range(API_MAX_RETRY):
try:
response = chat_state.send_message(conv.messages[-2][1], **parameters)
output = response.text
break
except Exception as e:
print(type(e), e)
time.sleep(API_RETRY_SLEEP)
return chat_state, output
ANTHROPIC_MODEL_LIST = (
"claude-1",
"claude-2",
"claude-2.0",
"claude-2.1",
"claude-instant-1",
"claude-instant-1.2",
)
def get_conversation_template(model_path: str) -> Conversation:
"""Get the default conversation template."""
adapter = get_model_adapter(model_path)
return adapter.get_default_conv_template(model_path)
def get_answer(
question: dict, model: str, num_choices: int, max_tokens: int, answer_file: str
):
assert (
args.force_temperature is not None and "required_temperature" in question.keys()
) == False
if args.force_temperature is not None:
temperature = args.force_temperature
elif "required_temperature" in question.keys():
temperature = question["required_temperature"]
elif question["category"] in temperature_config:
temperature = temperature_config[question["category"]]
else:
temperature = 0.7
choices = []
chat_state = None # for palm-2 model
for i in range(num_choices):
conv = get_conversation_template(model)
turns = []
for j in range(len(question["turns"])):
conv.append_message(conv.roles[0], question["turns"][j])
conv.append_message(conv.roles[1], None)
if model in ANTHROPIC_MODEL_LIST:
output = chat_completion_anthropic(model, conv, temperature, max_tokens)
elif model == "palm-2-chat-bison-001":
chat_state, output = chat_completion_palm(
chat_state, model, conv, temperature, max_tokens
)
else:
output = chat_completion_openai(model, conv, temperature, max_tokens)
conv.update_last_message(output)
turns.append(output)
choices.append({"index": i, "turns": turns})
# Dump answers
ans = {
"question_id": question["question_id"],
"answer_id": shortuuid.uuid(),
"model_id": model,
"choices": choices,
"tstamp": time.time(),
}
os.makedirs(os.path.dirname(answer_file), exist_ok=True)
with open(answer_file, "a") as fout:
fout.write(json.dumps(ans) + "\n") | null |
20,597 | import argparse
import json
import os
import numpy as np
def get_mt_bench_votes_data(raw_votes):
data = [{}, {}]
for judge_votes in raw_votes:
for vote in judge_votes:
turn = vote["turn"] - 1
if vote["model_a"] < vote["model_b"]:
key = (vote["question_id"], vote["model_a"], vote["model_b"])
winner = vote["winner"]
else:
key = (vote["question_id"], vote["model_b"], vote["model_a"])
winner = revert(vote["winner"])
judge = get_judge_name(vote["judge"])
if key not in data[turn]:
data[turn][key] = {}
if judge not in data[turn][key]:
data[turn][key][judge] = []
data[turn][key][judge].append(winner)
return data
def get_mt_bench_agreement(data, judge1, judge2, ban):
if judge1.startswith("gpt4") and judge2 == "human":
stats = [0, 0]
for votes in data.values():
if judge1 not in votes or judge2 not in votes:
continue
assert len(votes[judge1]) == 1
if convertvote(votes[judge1][0]) in ban:
continue
for v in votes[judge2]:
if convertvote(v) in ban:
continue
stats[1] += 1
stats[0] += equalvote(votes[judge1][0], v)
return stats[0], stats[1]
elif judge1 == "human" and judge2 == "human":
stats = [0, 0]
for votes in data.values():
if "human" not in votes:
continue
for i in range(len(votes["human"]) - 1):
for j in range(i + 1, len(votes["human"])):
if (
convertvote(votes["human"][i]) in ban
or convertvote(votes["human"][j]) in ban
):
continue
stats[1] += 1
stats[0] += equalvote(votes["human"][i], votes["human"][j])
return stats[0], stats[1]
else:
raise Exception("Unsupported judges.")
def run_mt_bench_agreement(judges, votefiles):
# votes[i]: List of votes
votes = []
for filename in votefiles:
with open(filename, "r") as f:
data = json.load(f)
votes.append(data)
data = get_mt_bench_votes_data(votes)
agree, total = get_mt_bench_agreement(data[0], judges[0], judges[1], ban=[])
print(
f"turn 1 with tie. #total: {total}, #agree: {agree}, ratio: {agree/total:.2f}"
)
agree, total = get_mt_bench_agreement(data[0], judges[0], judges[1], ban=["tie"])
print(
f"turn 1 without tie. #total: {total}, #agree: {agree}, ratio: {agree/total:.2f}"
)
agree, total = get_mt_bench_agreement(data[1], judges[0], judges[1], ban=[])
print(
f"turn 2 with tie. #total: {total}, #agree: {agree}, ratio: {agree/total:.2f}"
)
agree, total = get_mt_bench_agreement(data[1], judges[0], judges[1], ban=["tie"])
print(
f"turn 2 without tie. #total: {total}, #agree: {agree}, ratio: {agree/total:.2f}"
) | null |
20,598 | import argparse
import pandas as pd
def display_result_single(args):
if args.input_file is None:
input_file = (
f"data/{args.bench_name}/model_judgment/{args.judge_model}_single.jsonl"
)
else:
input_file = args.input_file
print(f"Input file: {input_file}")
df_all = pd.read_json(input_file, lines=True)
df = df_all[["model", "score", "turn"]]
df = df[df["score"] != -1]
if args.model_list is not None:
df = df[df["model"].isin(args.model_list)]
print("\n########## First turn ##########")
df_1 = df[df["turn"] == 1].groupby(["model", "turn"]).mean()
print(df_1.sort_values(by="score", ascending=False))
if args.bench_name == "mt_bench":
print("\n########## Second turn ##########")
df_2 = df[df["turn"] == 2].groupby(["model", "turn"]).mean()
print(df_2.sort_values(by="score", ascending=False))
print("\n########## Average ##########")
df_3 = df[["model", "score"]].groupby(["model"]).mean()
print(df_3.sort_values(by="score", ascending=False)) | null |
20,599 | import argparse
import pandas as pd
def display_result_pairwise(args):
if args.input_file is None:
input_file = (
f"data/{args.bench_name}/model_judgment/{args.judge_model}_pair.jsonl"
)
else:
input_file = args.input_file
print(f"Input file: {input_file}")
df_all = pd.read_json(input_file, lines=True)
df_all = df_all[(df_all["g1_winner"] != "error") & (df_all["g2_winner"] != "error")]
model_list = (
df_all["model_1"].unique().tolist() + df_all["model_2"].unique().tolist()
)
model_list = list(set(model_list))
list_res = []
# traverse df row by row
for index, row in df_all.iterrows():
if args.model_list is not None and row["model_1"] not in args.model_list:
continue
if args.baseline_model is not None:
if args.baseline_model not in [row["model_1"], row["model_2"]]:
continue
if row["g1_winner"] == "tie" or row["g1_winner"] != row["g2_winner"]:
list_res.append({"model": row["model_1"], "win": 0, "loss": 0, "tie": 1})
list_res.append({"model": row["model_2"], "win": 0, "loss": 0, "tie": 1})
else:
if row["g1_winner"] == "model_1":
winner = row["model_1"]
loser = row["model_2"]
else:
winner = row["model_2"]
loser = row["model_1"]
list_res.append({"model": winner, "win": 1, "loss": 0, "tie": 0})
list_res.append({"model": loser, "win": 0, "loss": 1, "tie": 0})
df = pd.DataFrame(list_res)
df = df.groupby(["model"]).sum()
# remove baseline model
if args.baseline_model is not None:
df = df[df.index != args.baseline_model]
# add win rate
df["win_rate"] = df["win"] / (df["win"] + df["loss"] + df["tie"])
df["loss_rate"] = df["loss"] / (df["win"] + df["loss"] + df["tie"])
# each tie counts as 0.5 win + 0.5 loss
df["win_rate_adjusted"] = (df["win"] + 0.5 * df["tie"]) / (
df["win"] + df["loss"] + df["tie"]
)
# print(df.sort_values(by="win_rate", ascending=False))
# print(df.sort_values(by="loss_rate", ascending=True))
print(df.sort_values(by="win_rate_adjusted", ascending=False)) | null |
20,600 | import argparse
from concurrent.futures import ThreadPoolExecutor
import json
import numpy as np
from tqdm import tqdm
from fastchat.llm_judge.common import (
load_questions,
load_model_answers,
load_judge_prompts,
check_data,
play_a_match_pair,
play_a_match_single,
get_model_list,
Judge,
MatchPair,
MatchSingle,
NEED_REF_CATS,
)
class MatchPair:
question: dict
model_1: str
model_2: str
answer_1: dict
answer_2: dict
judge: Judge
ref_answer: dict = None
multi_turn: bool = False
def make_match(
questions,
models,
model_answers,
judge,
baseline_model,
ref_answers=None,
multi_turn=False,
):
matches = []
for q in questions:
if multi_turn and len(q["turns"]) != 2:
continue
for i in range(len(models)):
q_id = q["question_id"]
m_1 = models[i]
m_2 = baseline_model
if m_1 == m_2:
continue
a_1 = model_answers[m_1][q_id]
a_2 = model_answers[baseline_model][q_id]
if ref_answers is not None:
ref = ref_answers[judge.model_name][q_id]
match = MatchPair(
dict(q),
m_1,
m_2,
a_1,
a_2,
judge,
ref_answer=ref,
multi_turn=multi_turn,
)
else:
match = MatchPair(
dict(q), m_1, m_2, a_1, a_2, judge, multi_turn=multi_turn
)
matches.append(match)
return matches | null |
20,601 | import argparse
from concurrent.futures import ThreadPoolExecutor
import json
import numpy as np
from tqdm import tqdm
from fastchat.llm_judge.common import (
load_questions,
load_model_answers,
load_judge_prompts,
check_data,
play_a_match_pair,
play_a_match_single,
get_model_list,
Judge,
MatchPair,
MatchSingle,
NEED_REF_CATS,
)
class MatchPair:
question: dict
model_1: str
model_2: str
answer_1: dict
answer_2: dict
judge: Judge
ref_answer: dict = None
multi_turn: bool = False
def make_match_all_pairs(
questions,
models,
model_answers,
judge,
baseline_model=None,
ref_answers=None,
multi_turn=False,
):
matches = []
for q in questions:
if multi_turn and len(q["turns"]) != 2:
continue
for i in range(len(models)):
for j in range(i + 1, len(models)):
q_id = q["question_id"]
m_1 = models[i]
m_2 = models[j]
a_1 = model_answers[m_1][q_id]
a_2 = model_answers[m_2][q_id]
if ref_answers is not None:
ref = ref_answers[judge.model_name][q_id]
match = MatchPair(
dict(q),
m_1,
m_2,
a_1,
a_2,
judge,
ref_answer=ref,
multi_turn=multi_turn,
)
else:
match = MatchPair(
dict(q), m_1, m_2, a_1, a_2, judge, multi_turn=multi_turn
)
matches.append(match)
return matches | null |
20,602 | import argparse
from concurrent.futures import ThreadPoolExecutor
import json
import numpy as np
from tqdm import tqdm
from fastchat.llm_judge.common import (
load_questions,
load_model_answers,
load_judge_prompts,
check_data,
play_a_match_pair,
play_a_match_single,
get_model_list,
Judge,
MatchPair,
MatchSingle,
NEED_REF_CATS,
)
class MatchSingle:
question: dict
model: str
answer: dict
judge: Judge
ref_answer: dict = None
multi_turn: bool = False
def make_match_single(
questions,
models,
model_answers,
judge,
baseline_model=None,
ref_answers=None,
multi_turn=False,
):
matches = []
for q in questions:
if multi_turn and len(q["turns"]) != 2:
continue
for i in range(len(models)):
q_id = q["question_id"]
m = models[i]
a = model_answers[m][q_id]
if ref_answers is not None:
ref = ref_answers[judge.model_name][q_id]
matches.append(
MatchSingle(
dict(q), m, a, judge, ref_answer=ref, multi_turn=multi_turn
)
)
else:
matches.append(MatchSingle(dict(q), m, a, judge, multi_turn=multi_turn))
return matches | null |
20,603 | import argparse
from concurrent.futures import ThreadPoolExecutor
import json
import numpy as np
from tqdm import tqdm
from fastchat.llm_judge.common import (
load_questions,
load_model_answers,
load_judge_prompts,
check_data,
play_a_match_pair,
play_a_match_single,
get_model_list,
Judge,
MatchPair,
MatchSingle,
NEED_REF_CATS,
)
class Judge:
model_name: str
prompt_template: dict
ref_based: bool = False
multi_turn: bool = False
def make_judge_pairwise(judge_model, judge_prompts):
judges = {}
judges["default"] = Judge(judge_model, judge_prompts["pair-v2"])
judges["math"] = Judge(judge_model, judge_prompts["pair-math-v1"], ref_based=True)
judges["default-mt"] = Judge(
judge_model, judge_prompts["pair-v2-multi-turn"], multi_turn=True
)
judges["math-mt"] = Judge(
judge_model,
judge_prompts["pair-math-v1-multi-turn"],
ref_based=True,
multi_turn=True,
)
return judges | null |
20,604 | import argparse
from concurrent.futures import ThreadPoolExecutor
import json
import numpy as np
from tqdm import tqdm
from fastchat.llm_judge.common import (
load_questions,
load_model_answers,
load_judge_prompts,
check_data,
play_a_match_pair,
play_a_match_single,
get_model_list,
Judge,
MatchPair,
MatchSingle,
NEED_REF_CATS,
)
class Judge:
model_name: str
prompt_template: dict
ref_based: bool = False
multi_turn: bool = False
def make_judge_single(judge_model, judge_prompts):
judges = {}
judges["default"] = Judge(judge_model, judge_prompts["single-v1"])
judges["math"] = Judge(judge_model, judge_prompts["single-math-v1"], ref_based=True)
judges["default-mt"] = Judge(
judge_model, judge_prompts["single-v1-multi-turn"], multi_turn=True
)
judges["math-mt"] = Judge(
judge_model,
judge_prompts["single-math-v1-multi-turn"],
ref_based=True,
multi_turn=True,
)
return judges | null |
20,605 | import argparse
from concurrent.futures import ThreadPoolExecutor
import json
import numpy as np
from tqdm import tqdm
from fastchat.llm_judge.common import (
load_questions,
load_model_answers,
load_judge_prompts,
check_data,
play_a_match_pair,
play_a_match_single,
get_model_list,
Judge,
MatchPair,
MatchSingle,
NEED_REF_CATS,
)
def play_a_match_wrapper(match):
play_a_match_func(match, output_file=output_file) | null |
20,606 | import ast
import dataclasses
import glob
import json
import os
import re
import time
from typing import Optional
import openai
import anthropic
from fastchat.model.model_adapter import (
get_conversation_template,
ANTHROPIC_MODEL_LIST,
OPENAI_MODEL_LIST,
)
The provided code snippet includes necessary dependencies for implementing the `load_model_answers` function. Write a Python function `def load_model_answers(answer_dir: str)` to solve the following problem:
Load model answers. The return value is a python dict of type: Dict[model_name: str -> Dict[question_id: int -> answer: dict]]
Here is the function:
def load_model_answers(answer_dir: str):
"""Load model answers.
The return value is a python dict of type:
Dict[model_name: str -> Dict[question_id: int -> answer: dict]]
"""
filenames = glob.glob(os.path.join(answer_dir, "*.jsonl"))
filenames.sort()
model_answers = {}
for filename in filenames:
model_name = os.path.basename(filename)[:-6]
answer = {}
with open(filename) as fin:
for line in fin:
line = json.loads(line)
answer[line["question_id"]] = line
model_answers[model_name] = answer
return model_answers | Load model answers. The return value is a python dict of type: Dict[model_name: str -> Dict[question_id: int -> answer: dict]] |
20,607 | import ast
import dataclasses
import glob
import json
import os
import re
import time
from typing import Optional
import openai
import anthropic
from fastchat.model.model_adapter import (
get_conversation_template,
ANTHROPIC_MODEL_LIST,
OPENAI_MODEL_LIST,
)
The provided code snippet includes necessary dependencies for implementing the `load_judge_prompts` function. Write a Python function `def load_judge_prompts(prompt_file: str)` to solve the following problem:
Load judge prompts. The return value is a python dict of type: Dict[judge_name: str -> dict]
Here is the function:
def load_judge_prompts(prompt_file: str):
"""Load judge prompts.
The return value is a python dict of type:
Dict[judge_name: str -> dict]
"""
prompts = {}
with open(prompt_file) as fin:
for line in fin:
line = json.loads(line)
prompts[line["name"]] = line
return prompts | Load judge prompts. The return value is a python dict of type: Dict[judge_name: str -> dict] |
20,608 | import ast
import dataclasses
import glob
import json
import os
import re
import time
from typing import Optional
import openai
import anthropic
from fastchat.model.model_adapter import (
get_conversation_template,
ANTHROPIC_MODEL_LIST,
OPENAI_MODEL_LIST,
)
class MatchPair:
question: dict
model_1: str
model_2: str
answer_1: dict
answer_2: dict
judge: Judge
ref_answer: dict = None
multi_turn: bool = False
def run_judge_single(question, answer, judge, ref_answer, multi_turn=False):
kwargs = {}
model = judge.model_name
if ref_answer is not None:
kwargs["ref_answer_1"] = ref_answer["choices"][0]["turns"][0]
if multi_turn:
kwargs["ref_answer_2"] = ref_answer["choices"][0]["turns"][1]
if multi_turn:
user_prompt = judge.prompt_template["prompt_template"].format(
question_1=question["turns"][0],
question_2=question["turns"][1],
answer_1=answer["choices"][0]["turns"][0],
answer_2=answer["choices"][0]["turns"][1],
**kwargs,
)
else:
user_prompt = judge.prompt_template["prompt_template"].format(
question=question["turns"][0],
answer=answer["choices"][0]["turns"][0],
**kwargs,
)
rating = -1
system_prompt = judge.prompt_template["system_prompt"]
conv = get_conversation_template(model)
conv.set_system_message(system_prompt)
conv.append_message(conv.roles[0], user_prompt)
conv.append_message(conv.roles[1], None)
if model in OPENAI_MODEL_LIST:
judgment = chat_completion_openai(model, conv, temperature=0, max_tokens=2048)
elif model in ANTHROPIC_MODEL_LIST:
judgment = chat_completion_anthropic(
model, conv, temperature=0, max_tokens=1024
)
else:
raise ValueError(f"Invalid judge model name: {model}")
if judge.prompt_template["output_format"] == "[[rating]]":
match = re.search(one_score_pattern, judgment)
if not match:
match = re.search(one_score_pattern_backup, judgment)
if match:
rating = ast.literal_eval(match.groups()[0])
else:
rating = -1
else:
raise ValueError(
f"invalid output format: {judge.prompt_template['output_format']}"
)
return rating, user_prompt, judgment
def play_a_match_single(match: MatchPair, output_file: str):
question, model, answer, judge, ref_answer, multi_turn = (
match.question,
match.model,
match.answer,
match.judge,
match.ref_answer,
match.multi_turn,
)
if judge.prompt_template["type"] == "single":
score, user_prompt, judgment = run_judge_single(
question, answer, judge, ref_answer, multi_turn=multi_turn
)
question_id = question["question_id"]
turn = 1 if not multi_turn else 2
result = {
"question_id": question_id,
"model": model,
"judge": (judge.model_name, judge.prompt_template["name"]),
"user_prompt": user_prompt,
"judgment": judgment,
"score": score,
"turn": turn,
"tstamp": time.time(),
}
print(
f"question: {question_id}, turn: {turn}, model: {model}, "
f"score: {score}, "
f"judge: {(judge.model_name, judge.prompt_template['name'])}"
)
else:
raise ValueError(f"invalid judge type: {judge['type']}")
if output_file:
os.makedirs(os.path.dirname(output_file), exist_ok=True)
with open(output_file, "a") as fout:
fout.write(json.dumps(result) + "\n")
return result | null |
20,609 | import ast
import dataclasses
import glob
import json
import os
import re
import time
from typing import Optional
import openai
import anthropic
from fastchat.model.model_adapter import (
get_conversation_template,
ANTHROPIC_MODEL_LIST,
OPENAI_MODEL_LIST,
)
TIE_DELTA = 0.1
class MatchPair:
question: dict
model_1: str
model_2: str
answer_1: dict
answer_2: dict
judge: Judge
ref_answer: dict = None
multi_turn: bool = False
def run_judge_single(question, answer, judge, ref_answer, multi_turn=False):
kwargs = {}
model = judge.model_name
if ref_answer is not None:
kwargs["ref_answer_1"] = ref_answer["choices"][0]["turns"][0]
if multi_turn:
kwargs["ref_answer_2"] = ref_answer["choices"][0]["turns"][1]
if multi_turn:
user_prompt = judge.prompt_template["prompt_template"].format(
question_1=question["turns"][0],
question_2=question["turns"][1],
answer_1=answer["choices"][0]["turns"][0],
answer_2=answer["choices"][0]["turns"][1],
**kwargs,
)
else:
user_prompt = judge.prompt_template["prompt_template"].format(
question=question["turns"][0],
answer=answer["choices"][0]["turns"][0],
**kwargs,
)
rating = -1
system_prompt = judge.prompt_template["system_prompt"]
conv = get_conversation_template(model)
conv.set_system_message(system_prompt)
conv.append_message(conv.roles[0], user_prompt)
conv.append_message(conv.roles[1], None)
if model in OPENAI_MODEL_LIST:
judgment = chat_completion_openai(model, conv, temperature=0, max_tokens=2048)
elif model in ANTHROPIC_MODEL_LIST:
judgment = chat_completion_anthropic(
model, conv, temperature=0, max_tokens=1024
)
else:
raise ValueError(f"Invalid judge model name: {model}")
if judge.prompt_template["output_format"] == "[[rating]]":
match = re.search(one_score_pattern, judgment)
if not match:
match = re.search(one_score_pattern_backup, judgment)
if match:
rating = ast.literal_eval(match.groups()[0])
else:
rating = -1
else:
raise ValueError(
f"invalid output format: {judge.prompt_template['output_format']}"
)
return rating, user_prompt, judgment
def run_judge_pair(question, answer_a, answer_b, judge, ref_answer, multi_turn=False):
kwargs = {}
model = judge.model_name
if ref_answer is not None:
kwargs["ref_answer_1"] = ref_answer["choices"][0]["turns"][0]
if multi_turn:
kwargs["ref_answer_2"] = ref_answer["choices"][0]["turns"][1]
if multi_turn:
system_prompt = judge.prompt_template["system_prompt"]
user_prompt = judge.prompt_template["prompt_template"].format(
question_1=question["turns"][0],
question_2=question["turns"][1],
answer_a_1=answer_a["choices"][0]["turns"][0],
answer_b_1=answer_b["choices"][0]["turns"][0],
answer_a_2=answer_a["choices"][0]["turns"][1],
answer_b_2=answer_b["choices"][0]["turns"][1],
**kwargs,
)
else:
system_prompt = judge.prompt_template["system_prompt"]
user_prompt = judge.prompt_template["prompt_template"].format(
question=question["turns"][0],
answer_a=answer_a["choices"][0]["turns"][0],
answer_b=answer_b["choices"][0]["turns"][0],
**kwargs,
)
winner = "error"
conv = get_conversation_template(model)
conv.append_message(conv.roles[0], user_prompt)
conv.append_message(conv.roles[1], None)
if model in OPENAI_MODEL_LIST:
conv.set_system_message(system_prompt)
judgment = chat_completion_openai(model, conv, temperature=0, max_tokens=2048)
elif model in ANTHROPIC_MODEL_LIST:
if system_prompt != "You are a helpful assistant.":
user_prompt = "[Instruction]\n" + system_prompt + "\n\n" + user_prompt
conv.messages[0][1] = user_prompt
judgment = chat_completion_anthropic(
model, conv, temperature=0, max_tokens=1024
)
else:
raise ValueError(f"Invalid judge model name: {model}")
if judge.prompt_template["output_format"] == "[[A]]":
if "[[A]]" in judgment:
winner = "A"
elif "[[B]]" in judgment:
winner = "B"
elif "[[C]]" in judgment:
winner = "tie"
else:
winner = "error"
elif judge.prompt_template["output_format"] == "[[rating_a,rating_b]]":
match = re.search(two_score_pattern, judgment)
if not match:
match = re.search(two_score_pattern_backup, judgment)
if match:
scores = [ast.literal_eval(s.strip()) for s in match.groups()]
if abs(scores[0] - scores[1]) <= TIE_DELTA:
winner = "tie"
elif scores[0] > scores[1]:
winner = "A"
else:
winner = "B"
else:
winner = "error"
else:
raise ValueError(
f"invalid output format: {judge.prompt_template['output_format']}"
)
return winner, user_prompt, judgment
def play_a_match_pair(match: MatchPair, output_file: str):
question, model_1, model_2, answer_1, answer_2, judge, ref_answer, multi_turn = (
match.question,
match.model_1,
match.model_2,
match.answer_1,
match.answer_2,
match.judge,
match.ref_answer,
match.multi_turn,
)
if judge.prompt_template["type"] == "pairwise":
g1_winner, g1_user_prompt, g1_judgment = run_judge_pair(
question, answer_1, answer_2, judge, ref_answer, multi_turn=multi_turn
)
g2_winner, g2_user_prompt, g2_judgment = run_judge_pair(
question, answer_2, answer_1, judge, ref_answer, multi_turn=multi_turn
)
g1_map = {"A": "model_1", "B": "model_2"}
g2_map = {"A": "model_2", "B": "model_1"}
g1_winner = g1_map.get(g1_winner, g1_winner)
g2_winner = g2_map.get(g2_winner, g2_winner)
question_id = question["question_id"]
turn = 1 if not multi_turn else 2
result = {
"question_id": question_id,
"model_1": model_1,
"model_2": model_2,
"g1_winner": g1_winner,
"g2_winner": g2_winner,
"judge": (judge.model_name, judge.prompt_template["name"]),
"g1_user_prompt": g1_user_prompt,
"g1_judgment": g1_judgment,
"g2_user_prompt": g2_user_prompt,
"g2_judgment": g2_judgment,
"turn": turn,
"tstamp": time.time(),
}
print(
f"question: {question_id}, turn: {turn}, model_1: {model_1}, model_2: {model_2}, "
f"g1_winner: {g1_winner}, g2_winner: {g2_winner}, "
f"judge: {(judge.model_name, judge.prompt_template['name'])}"
)
elif judge.prompt_template["type"] == "single":
m1_score, m1_user_prompt, m1_judgment = run_judge_single(
question, answer_1, judge
)
m2_score, m2_user_prompt, m2_judgment = run_judge_single(
question, answer_2, judge
)
if abs(m1_score - m2_score) <= TIE_DELTA:
winner = "tie"
elif m1_score > m2_score:
winner = "model_1"
else:
winner = "model_2"
question_id = question["question_id"]
result = {
"question_id": question_id,
"model_1": model_1,
"model_2": model_2,
"g1_winner": winner,
"g2_winner": winner,
"judge": (judge.model_name, judge.prompt_template["name"]),
"g1_user_prompt": m1_user_prompt,
"g1_judgment": m1_judgment,
"g2_user_prompt": m2_user_prompt,
"g2_judgment": m2_judgment,
"m1_score": m1_score,
"m2_score": m2_score,
"tstamp": time.time(),
}
print(
f"question: {question_id}, model_1: {model_1}, model_2: {model_2}, "
f"winner: {winner}, m1_score: {m1_score}, m2_score: {m2_score}, "
f"judge: {(judge.model_name, judge.prompt_template['name'])}"
)
else:
raise ValueError(f"invalid judge type: {judge['type']}")
if output_file:
os.makedirs(os.path.dirname(output_file), exist_ok=True)
with open(output_file, "a") as fout:
fout.write(json.dumps(result) + "\n")
return result | null |
20,610 | import ast
import dataclasses
import glob
import json
import os
import re
import time
from typing import Optional
import openai
import anthropic
from fastchat.model.model_adapter import (
get_conversation_template,
ANTHROPIC_MODEL_LIST,
OPENAI_MODEL_LIST,
)
API_MAX_RETRY = 16
API_RETRY_SLEEP = 10
API_ERROR_OUTPUT = "$ERROR$"
def chat_completion_openai_azure(model, conv, temperature, max_tokens, api_dict=None):
openai.api_type = "azure"
openai.api_version = "2023-07-01-preview"
if api_dict is not None:
openai.api_base = api_dict["api_base"]
openai.api_key = api_dict["api_key"]
else:
openai.api_base = os.environ["AZURE_OPENAI_ENDPOINT"]
openai.api_key = os.environ["AZURE_OPENAI_KEY"]
if "azure-" in model:
model = model[6:]
output = API_ERROR_OUTPUT
for _ in range(API_MAX_RETRY):
try:
messages = conv.to_openai_api_messages()
response = openai.ChatCompletion.create(
engine=model,
messages=messages,
n=1,
temperature=temperature,
max_tokens=max_tokens,
)
output = response["choices"][0]["message"]["content"]
break
except openai.error.OpenAIError as e:
print(type(e), e)
time.sleep(API_RETRY_SLEEP)
except openai.error.InvalidRequestError as e:
print(type(e), e)
break
except KeyError:
print(response)
break
return output | null |
20,611 | import ast
import dataclasses
import glob
import json
import os
import re
import time
from typing import Optional
import openai
import anthropic
from fastchat.model.model_adapter import (
get_conversation_template,
ANTHROPIC_MODEL_LIST,
OPENAI_MODEL_LIST,
)
def normalize_game_key_dict(judgment_dict):
"""Make the model names sorted in the game keys."""
ret = {}
for key, value in judgment_dict.items():
new_key, new_value = normalize_game_key_single(key, value)
ret[new_key] = new_value
return ret
The provided code snippet includes necessary dependencies for implementing the `load_pairwise_model_judgments` function. Write a Python function `def load_pairwise_model_judgments(filename: str)` to solve the following problem:
Load model judgments. The return value is a dict of type: Dict[judge: Tuple -> Dict[game_key: tuple -> game_result: dict]
Here is the function:
def load_pairwise_model_judgments(filename: str):
"""Load model judgments.
The return value is a dict of type:
Dict[judge: Tuple -> Dict[game_key: tuple -> game_result: dict]
"""
judge_dict = {}
for line in open(filename):
obj = json.loads(line)
judge = tuple(obj["judge"])
qid, model_1, model_2 = obj["question_id"], obj["model_1"], obj["model_2"]
if judge not in judge_dict:
judge_dict[judge] = {}
if "winner" in obj:
winner = obj["winner"]
elif "g1_winner" in obj and "g2_winner" in obj:
g1_winner, g2_winner = obj["g1_winner"], obj["g2_winner"]
if g1_winner == g2_winner:
winner = g1_winner
else:
winner = "inconsistent"
else:
raise ValueError(f"Invalid keys: {list(obj.keys())}")
gamekey = (qid, model_1, model_2)
winners = (winner,)
judge_dict[judge][gamekey] = {
"winners": winners,
"g1_judgment": obj["g1_judgment"],
"g2_judgment": obj["g2_judgment"],
}
# Make the model names sorted in the game keys
normalized = {}
for judge, value in judge_dict.items():
normalized[judge] = normalize_game_key_dict(value)
return normalized | Load model judgments. The return value is a dict of type: Dict[judge: Tuple -> Dict[game_key: tuple -> game_result: dict] |
20,612 | import ast
import dataclasses
import glob
import json
import os
import re
import time
from typing import Optional
import openai
import anthropic
from fastchat.model.model_adapter import (
get_conversation_template,
ANTHROPIC_MODEL_LIST,
OPENAI_MODEL_LIST,
)
The provided code snippet includes necessary dependencies for implementing the `load_single_model_judgments` function. Write a Python function `def load_single_model_judgments(filename: str)` to solve the following problem:
Load model judgments. The return value is a dict of type: Dict[judge: Tuple -> Dict[game_key: tuple -> game_result: dict]
Here is the function:
def load_single_model_judgments(filename: str):
"""Load model judgments.
The return value is a dict of type:
Dict[judge: Tuple -> Dict[game_key: tuple -> game_result: dict]
"""
judge_dict = {}
for line in open(filename):
obj = json.loads(line)
judge = tuple(obj["judge"])
qid, model = obj["question_id"], obj["model"]
if judge not in judge_dict:
judge_dict[judge] = {}
gamekey = (qid, model)
judge_dict[judge][gamekey] = {
"score": obj["score"],
"judgment": obj["judgment"],
}
return judge_dict | Load model judgments. The return value is a dict of type: Dict[judge: Tuple -> Dict[game_key: tuple -> game_result: dict] |
20,613 | import ast
import dataclasses
import glob
import json
import os
import re
import time
from typing import Optional
import openai
import anthropic
from fastchat.model.model_adapter import (
get_conversation_template,
ANTHROPIC_MODEL_LIST,
OPENAI_MODEL_LIST,
)
NEED_REF_CATS = ["math", "reasoning", "coding", "arena-hard-200"]
def check_data(questions, model_answers, ref_answers, models, judges):
# check model answers
for m in models:
assert m in model_answers, f"Missing model answer for {m}"
m_answer = model_answers[m]
for q in questions:
assert (
q["question_id"] in m_answer
), f"Missing model {m}'s answer to Question {q['question_id']}"
# check ref answers
for jg in judges.values():
if not jg.ref_based:
continue
for q in questions:
if q["category"] not in NEED_REF_CATS:
continue
assert (
q["question_id"] in ref_answers[jg.model_name]
), f"Missing reference answer to Question {q['question_id']} for judge {jg.model_name}" | null |
20,614 | import ast
import dataclasses
import glob
import json
import os
import re
import time
from typing import Optional
import openai
import anthropic
from fastchat.model.model_adapter import (
get_conversation_template,
ANTHROPIC_MODEL_LIST,
OPENAI_MODEL_LIST,
)
def get_model_list(answer_dir):
file_paths = glob.glob(f"{answer_dir}/*.jsonl")
file_names = [os.path.splitext(os.path.basename(f))[0] for f in file_paths]
return file_names | null |
20,615 | import argparse
from collections import defaultdict
import re
import gradio as gr
from fastchat.llm_judge.common import (
load_questions,
load_model_answers,
load_single_model_judgments,
load_pairwise_model_judgments,
resolve_single_judgment_dict,
resolve_pairwise_judgment_dict,
get_single_judge_explanation,
get_pairwise_judge_explanation,
)
def build_question_selector_map():
global question_selector_map, category_selector_map
# Build question selector map
for q in questions:
preview = f"{q['question_id']}: " + q["turns"][0][:128] + "..."
question_selector_map[preview] = q
category_selector_map[q["category"]].append(preview)
def build_pairwise_browser_tab():
global question_selector_map, category_selector_map
models = list(model_answers.keys())
num_sides = 2
num_turns = 2
side_names = ["A", "B"]
question_selector_choices = list(question_selector_map.keys())
category_selector_choices = list(category_selector_map.keys())
# Selectors
with gr.Row():
with gr.Column(scale=1, min_width=200):
category_selector = gr.Dropdown(
choices=category_selector_choices, label="Category", container=False
)
with gr.Column(scale=100):
question_selector = gr.Dropdown(
choices=question_selector_choices, label="Question", container=False
)
model_selectors = [None] * num_sides
with gr.Row():
for i in range(num_sides):
with gr.Column():
if i == 0:
value = models[0]
else:
value = "gpt-3.5-turbo"
model_selectors[i] = gr.Dropdown(
choices=models,
value=value,
label=f"Model {side_names[i]}",
container=False,
)
# Conversation
chat_mds = []
for i in range(num_turns):
chat_mds.append(gr.Markdown(elem_id=f"user_question_{i+1}"))
with gr.Row():
for j in range(num_sides):
with gr.Column(scale=100):
chat_mds.append(gr.Markdown())
if j == 0:
with gr.Column(scale=1, min_width=8):
gr.Markdown()
reference = gr.Markdown(elem_id=f"reference")
chat_mds.append(reference)
model_explanation = gr.Markdown(elem_id="model_explanation")
model_explanation2 = gr.Markdown(elem_id="model_explanation")
# Callbacks
category_selector.change(display_question, [category_selector], [question_selector])
question_selector.change(
display_pairwise_answer,
[question_selector] + model_selectors,
chat_mds + [model_explanation] + [model_explanation2],
)
for i in range(num_sides):
model_selectors[i].change(
display_pairwise_answer,
[question_selector] + model_selectors,
chat_mds + [model_explanation] + [model_explanation2],
)
return (category_selector,)
def build_single_answer_browser_tab():
global question_selector_map, category_selector_map
models = list(model_answers.keys())
num_sides = 1
num_turns = 2
side_names = ["A"]
question_selector_choices = list(question_selector_map.keys())
category_selector_choices = list(category_selector_map.keys())
# Selectors
with gr.Row():
with gr.Column(scale=1, min_width=200):
category_selector = gr.Dropdown(
choices=category_selector_choices, label="Category", container=False
)
with gr.Column(scale=100):
question_selector = gr.Dropdown(
choices=question_selector_choices, label="Question", container=False
)
model_selectors = [None] * num_sides
with gr.Row():
for i in range(num_sides):
with gr.Column():
model_selectors[i] = gr.Dropdown(
choices=models,
value=models[i] if len(models) > i else "",
label=f"Model {side_names[i]}",
container=False,
)
# Conversation
chat_mds = []
for i in range(num_turns):
chat_mds.append(gr.Markdown(elem_id=f"user_question_{i+1}"))
with gr.Row():
for j in range(num_sides):
with gr.Column(scale=100):
chat_mds.append(gr.Markdown())
if j == 0:
with gr.Column(scale=1, min_width=8):
gr.Markdown()
reference = gr.Markdown(elem_id=f"reference")
chat_mds.append(reference)
model_explanation = gr.Markdown(elem_id="model_explanation")
model_explanation2 = gr.Markdown(elem_id="model_explanation")
# Callbacks
category_selector.change(display_question, [category_selector], [question_selector])
question_selector.change(
display_single_answer,
[question_selector] + model_selectors,
chat_mds + [model_explanation] + [model_explanation2],
)
for i in range(num_sides):
model_selectors[i].change(
display_single_answer,
[question_selector] + model_selectors,
chat_mds + [model_explanation] + [model_explanation2],
)
return (category_selector,)
block_css = """
#user_question_1 {
background-color: #DEEBF7;
}
#user_question_2 {
background-color: #E2F0D9;
}
#reference {
background-color: #FFF2CC;
}
#model_explanation {
background-color: #FBE5D6;
}
"""
def load_demo():
dropdown_update = gr.Dropdown.update(value=list(category_selector_map.keys())[0])
return dropdown_update, dropdown_update
def build_demo():
build_question_selector_map()
with gr.Blocks(
title="MT-Bench Browser",
theme=gr.themes.Base(text_size=gr.themes.sizes.text_lg),
css=block_css,
) as demo:
gr.Markdown(
"""
# MT-Bench Browser
The code to generate answers and judgments is at [fastchat.llm_judge](https://github.com/lm-sys/FastChat/tree/main/fastchat/llm_judge).
"""
)
with gr.Tab("Single Answer Grading"):
(category_selector,) = build_single_answer_browser_tab()
with gr.Tab("Pairwise Comparison"):
(category_selector2,) = build_pairwise_browser_tab()
demo.load(load_demo, [], [category_selector, category_selector2])
return demo | null |
20,616 | from dataclasses import dataclass, field
import json
import math
import jsonlines
import pathlib
from multiprocessing import Pool
from typing import Dict, Optional, Sequence
import numpy as np
import torch
from torch.utils.data import Dataset
import transformers
from transformers import Trainer
from transformers.trainer_pt_utils import LabelSmoother
from fastchat.conversation import SeparatorStyle
from fastchat.model.model_adapter import get_conversation_template
def apply_prompt_template(sources, systems=None):
conv = get_conversation_template("vicuna")
roles = {"human": conv.roles[0], "gpt": conv.roles[1]}
conversations = []
for i, source in enumerate(sources):
if roles[source[0]["from"]] != conv.roles[0]:
source = source[1:]
conv.messages = []
for j, sentence in enumerate(source):
role = roles[sentence["from"]]
assert role == conv.roles[j % 2], f"{i}"
conv.append_message(role, sentence["value"])
if systems and systems[i]:
conv.set_system_message(systems[i])
prompt = conv.get_prompt()
conversations.append(prompt)
return conversations, conv
def tokenize_conversations(conversations, tokenizer):
input_ids = tokenizer(
conversations,
return_tensors="pt",
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
).input_ids
targets = input_ids.clone()
return input_ids, targets
def mask_targets(conversations, targets, tokenizer, conv):
sep = conv.sep + conv.roles[1] + ": "
for conversation, target in zip(conversations, targets):
total_len = int(target.ne(tokenizer.pad_token_id).sum())
turns = conversation.split(conv.sep2)
cur_len = 0
target[:cur_len] = IGNORE_TOKEN_ID
for i, turn in enumerate(turns):
if turn == "":
break
turn_len = len(tokenizer(turn + conv.sep2).input_ids)
parts = turn.split(sep)
if len(parts) != 2:
break
parts[0] += sep
instruction_len = len(tokenizer(parts[0]).input_ids) - 1
target[cur_len : cur_len + instruction_len] = IGNORE_TOKEN_ID
cur_len += turn_len
target[cur_len:] = IGNORE_TOKEN_ID
if False: # Inspect and check the correctness of masking
z = target.clone()
z = torch.where(z == IGNORE_TOKEN_ID, tokenizer.unk_token_id, z)
rank0_print(tokenizer.decode(z))
if cur_len < tokenizer.model_max_length:
if cur_len != total_len:
target[:] = IGNORE_TOKEN_ID
rank0_print(
f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}."
f" (ignored)"
)
return targets
def preprocess(sources, tokenizer: transformers.PreTrainedTokenizer, **kwargs) -> Dict:
systems = None if not kwargs else kwargs.get("systems", None)
# If the data volume is small, process it directly in the main thread
if len(sources) <= 1000:
conversations, conv = apply_prompt_template(sources, systems)
input_ids, targets = tokenize_conversations(conversations, tokenizer)
targets = mask_targets(conversations, targets, tokenizer, conv)
else: # If the data volume is large, use multithreading for processing
with Pool() as p:
conversations, conv = p.apply_async(
apply_prompt_template, (sources, systems)
).get()
input_ids, targets = p.apply_async(
tokenize_conversations, (conversations, tokenizer)
).get()
targets = p.apply_async(
mask_targets, (conversations, targets, tokenizer, conv)
).get()
p.close()
p.join()
return dict(
input_ids=input_ids,
labels=targets,
attention_mask=input_ids.ne(tokenizer.pad_token_id),
) | null |
20,617 | from dataclasses import dataclass, field
import json
import math
import jsonlines
import pathlib
from multiprocessing import Pool
from typing import Dict, Optional, Sequence
import numpy as np
import torch
from torch.utils.data import Dataset
import transformers
from transformers import Trainer
from transformers.trainer_pt_utils import LabelSmoother
from fastchat.conversation import SeparatorStyle
from fastchat.model.model_adapter import get_conversation_template
class ModelArguments:
model_name_or_path: Optional[str] = field(default="facebook/opt-125m")
class DataArguments:
data_path: str = field(
default=None, metadata={"help": "Path to the training data."}
)
lazy_preprocess: bool = False
class TrainingArguments(transformers.TrainingArguments):
cache_dir: Optional[str] = field(default=None)
optim: str = field(default="adamw_torch")
model_max_length: int = field(
default=512,
metadata={
"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."
},
)
local_rank = None
def safe_save_model_for_hf_trainer(trainer: transformers.Trainer, output_dir: str):
"""Collects the state dict and dump to disk."""
state_dict = trainer.model.state_dict()
if trainer.args.should_save:
cpu_state_dict = {key: value.cpu() for key, value in state_dict.items()}
del state_dict
trainer._save(output_dir, state_dict=cpu_state_dict) # noqa
def make_supervised_data_module(
tokenizer: transformers.PreTrainedTokenizer, data_args, train_ratio=0.98
) -> Dict:
"""Make dataset and collator for supervised fine-tuning."""
train_ratio = min(train_ratio, 1.0)
dataset_cls = (
LazySupervisedDataset if data_args.lazy_preprocess else SupervisedDataset
)
rank0_print("Loading data...")
data_path = data_args.data_path
if data_path.endswith(".json"):
raw_data = json.load(open(data_path, "r"))
elif data_path.endswith(".jsonl"):
with jsonlines.open(data_path, mode="r") as reader:
raw_data = [item for item in reader]
# Split train/test
np.random.seed(0)
perm = np.random.permutation(len(raw_data))
split = int(len(perm) * train_ratio)
train_indices = perm[:split]
if train_ratio < 1:
eval_indices = perm[split:]
else:
# if train_ratio==1, we use 5% of data as eval data, make sure trainer will not throw error when eval data is empty
eval_indices = perm[-int(len(perm) * 0.05) :]
train_raw_data = [raw_data[i] for i in train_indices]
eval_raw_data = [raw_data[i] for i in eval_indices]
rank0_print(f"#train {len(train_raw_data)}, #eval {len(eval_raw_data)}")
train_dataset = dataset_cls(train_raw_data, tokenizer=tokenizer)
eval_dataset = dataset_cls(eval_raw_data, tokenizer=tokenizer)
return dict(train_dataset=train_dataset, eval_dataset=eval_dataset)
def train():
global local_rank
parser = transformers.HfArgumentParser(
(ModelArguments, DataArguments, TrainingArguments)
)
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
local_rank = training_args.local_rank
config = transformers.AutoConfig.from_pretrained(
model_args.model_name_or_path,
trust_remote_code=True,
cache_dir=training_args.cache_dir,
)
# Set RoPE scaling factor
orig_ctx_len = getattr(config, "max_position_embeddings", None)
if orig_ctx_len and training_args.model_max_length > orig_ctx_len:
scaling_factor = float(math.ceil(training_args.model_max_length / orig_ctx_len))
config.rope_scaling = {"type": "linear", "factor": scaling_factor}
config.use_cache = False
model = transformers.AutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path,
config=config,
trust_remote_code=True,
cache_dir=training_args.cache_dir,
)
# Tie the weights
model.tie_weights()
tokenizer = transformers.AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
config=config,
trust_remote_code=True,
cache_dir=training_args.cache_dir,
model_max_length=training_args.model_max_length,
padding_side="right",
use_fast=False,
)
# NOTE: if the token_id exceed the vocab_size will cause failing in training process! we need add special config and resize the embedding size!
tokenizer.pad_token = tokenizer.unk_token
print(f"tokens len: {len(tokenizer)}")
model.resize_token_embeddings(len(tokenizer))
data_module = make_supervised_data_module(
tokenizer=tokenizer, train_ratio=0.98, data_args=data_args
)
trainer = Trainer(
model=model, tokenizer=tokenizer, args=training_args, **data_module
)
if list(pathlib.Path(training_args.output_dir).glob("checkpoint-*")):
trainer.train(resume_from_checkpoint=True)
else:
trainer.train()
trainer.save_state()
safe_save_model_for_hf_trainer(trainer=trainer, output_dir=training_args.output_dir) | null |
20,618 | from dataclasses import dataclass, field
import json
import math
import pathlib
from typing import Dict, Optional, Sequence
import numpy as np
import torch
from torch.utils.data import Dataset
import transformers
from transformers import Trainer
from transformers.trainer_pt_utils import LabelSmoother
from fastchat.conversation import SeparatorStyle
from fastchat.model.model_adapter import get_conversation_template
IGNORE_TOKEN_ID = LabelSmoother.ignore_index
def rank0_print(*args):
if local_rank == 0:
print(*args)
class SeparatorStyle(IntEnum):
"""Separator styles."""
ADD_COLON_SINGLE = auto()
ADD_COLON_TWO = auto()
ADD_COLON_SPACE_SINGLE = auto()
NO_COLON_SINGLE = auto()
NO_COLON_TWO = auto()
ADD_NEW_LINE_SINGLE = auto()
LLAMA2 = auto()
MINIMA = auto()
CHATGLM = auto()
CHATML = auto()
CHATMLQANY = auto()
CHATINTERN = auto()
DOLLY = auto()
RWKV = auto()
PHOENIX = auto()
ROBIN = auto()
FALCON_CHAT = auto()
CHATGLM3 = auto()
DEEPSEEK_CHAT = auto()
METAMATH = auto()
def get_conversation_template(model_path: str) -> Conversation:
"""Get the default conversation template."""
adapter = get_model_adapter(model_path)
return adapter.get_default_conv_template(model_path)
def preprocess(
sources,
tokenizer: transformers.PreTrainedTokenizer,
) -> Dict:
conv = get_conversation_template("vicuna")
roles = {"human": conv.roles[0], "gpt": conv.roles[1]}
# Apply prompt templates
conversations = []
for i, source in enumerate(sources):
if roles[source[0]["from"]] != conv.roles[0]:
# Skip the first one if it is not from human
source = source[1:]
conv.messages = []
for j, sentence in enumerate(source):
role = roles[sentence["from"]]
assert role == conv.roles[j % 2], f"{i}"
conv.append_message(role, sentence["value"])
conversations.append(conv.get_prompt())
# Tokenize conversations
input_ids = tokenizer(
conversations,
return_tensors="pt",
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
).input_ids
targets = input_ids.clone()
assert conv.sep_style == SeparatorStyle.ADD_COLON_TWO
# Mask targets. Only compute loss on the assistant outputs.
sep = conv.sep + conv.roles[1] + ": "
for conversation, target in zip(conversations, targets):
total_len = int(target.ne(tokenizer.pad_token_id).sum())
turns = conversation.split(conv.sep2)
cur_len = 1
target[:cur_len] = IGNORE_TOKEN_ID
for i, turn in enumerate(turns):
if turn == "":
break
turn_len = len(tokenizer(turn).input_ids)
parts = turn.split(sep)
if len(parts) != 2:
break
parts[0] += sep
# "-2" is hardcoded for the Llama tokenizer to make the offset correct.
instruction_len = len(tokenizer(parts[0]).input_ids) - 2
if i != 0 and not tokenizer.legacy:
# The legacy and non-legacy modes handle special tokens differently
instruction_len -= 1
# Ignore the user instructions
target[cur_len : cur_len + instruction_len] = IGNORE_TOKEN_ID
cur_len += turn_len
if i != 0 and not tokenizer.legacy:
# The legacy and non-legacy modes handle special tokens differently
cur_len -= 1
target[cur_len:] = IGNORE_TOKEN_ID
if False: # Inspect and check the correctness of masking
z = target.clone()
z = torch.where(z == IGNORE_TOKEN_ID, tokenizer.unk_token_id, z)
rank0_print(tokenizer.decode(z))
exit()
if cur_len < tokenizer.model_max_length:
if cur_len != total_len:
target[:] = IGNORE_TOKEN_ID
rank0_print(
f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}."
f" #turn = {len(turns) - 1}. (ignored)"
)
return dict(
input_ids=input_ids,
labels=targets,
attention_mask=input_ids.ne(tokenizer.pad_token_id),
) | null |
20,619 | from dataclasses import dataclass, field
import json
import math
import pathlib
from typing import Dict, Optional, Sequence
import numpy as np
import torch
from torch.utils.data import Dataset
import transformers
from transformers import Trainer
from transformers.trainer_pt_utils import LabelSmoother
from fastchat.conversation import SeparatorStyle
from fastchat.model.model_adapter import get_conversation_template
class ModelArguments:
model_name_or_path: Optional[str] = field(default="facebook/opt-125m")
trust_remote_code: bool = field(
default=False,
metadata={
"help": "Whether or not to allow for custom models defined on the Hub in their own modeling files"
},
)
padding_side: str = field(
default="right", metadata={"help": "The padding side in tokenizer"}
)
class DataArguments:
data_path: str = field(
default=None, metadata={"help": "Path to the training data."}
)
eval_data_path: str = field(
default=None, metadata={"help": "Path to the evaluation data."}
)
lazy_preprocess: bool = False
class TrainingArguments(transformers.TrainingArguments):
cache_dir: Optional[str] = field(default=None)
optim: str = field(default="adamw_torch")
model_max_length: int = field(
default=512,
metadata={
"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."
},
)
local_rank = None
def trainer_save_model_safe(trainer: transformers.Trainer):
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp import StateDictType, FullStateDictConfig
save_policy = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
with FSDP.state_dict_type(
trainer.model, StateDictType.FULL_STATE_DICT, save_policy
):
trainer.save_model()
def make_supervised_data_module(
tokenizer: transformers.PreTrainedTokenizer, data_args
) -> Dict:
"""Make dataset and collator for supervised fine-tuning."""
dataset_cls = (
LazySupervisedDataset if data_args.lazy_preprocess else SupervisedDataset
)
rank0_print("Loading data...")
train_json = json.load(open(data_args.data_path, "r"))
train_dataset = dataset_cls(train_json, tokenizer=tokenizer)
if data_args.eval_data_path:
eval_json = json.load(open(data_args.eval_data_path, "r"))
eval_dataset = dataset_cls(eval_json, tokenizer=tokenizer)
else:
eval_dataset = None
return dict(train_dataset=train_dataset, eval_dataset=eval_dataset)
def train():
global local_rank
parser = transformers.HfArgumentParser(
(ModelArguments, DataArguments, TrainingArguments)
)
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
local_rank = training_args.local_rank
# Set RoPE scaling factor
config = transformers.AutoConfig.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
trust_remote_code=model_args.trust_remote_code,
)
orig_ctx_len = getattr(config, "max_position_embeddings", None)
if orig_ctx_len and training_args.model_max_length > orig_ctx_len:
scaling_factor = float(math.ceil(training_args.model_max_length / orig_ctx_len))
config.rope_scaling = {"type": "linear", "factor": scaling_factor}
config.use_cache = False
# Load model and tokenizer
model = transformers.AutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path,
config=config,
cache_dir=training_args.cache_dir,
trust_remote_code=model_args.trust_remote_code,
)
tokenizer = transformers.AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
model_max_length=training_args.model_max_length,
padding_side=model_args.padding_side,
use_fast=False,
trust_remote_code=model_args.trust_remote_code,
)
if tokenizer.pad_token != tokenizer.unk_token:
tokenizer.pad_token = tokenizer.unk_token
# Load data
data_module = make_supervised_data_module(tokenizer=tokenizer, data_args=data_args)
# Start trainner
trainer = Trainer(
model=model, tokenizer=tokenizer, args=training_args, **data_module
)
if list(pathlib.Path(training_args.output_dir).glob("checkpoint-*")):
trainer.train(resume_from_checkpoint=True)
else:
trainer.train()
# Save model
model.config.use_cache = True
trainer.save_state()
if trainer.is_deepspeed_enabled:
trainer.save_model()
else:
trainer_save_model_safe(trainer) | null |
20,620 | import logging
import math
from typing import Optional, Tuple
import torch
import transformers.models.llama.modeling_llama
from torch import nn
def xformers_forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: bool = False,
use_cache: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
# pylint: disable=duplicate-code
bsz, q_len, _ = hidden_states.size()
query_states = (
self.q_proj(hidden_states)
.view(bsz, q_len, self.num_heads, self.head_dim)
.transpose(1, 2)
)
key_states = (
self.k_proj(hidden_states)
.view(bsz, q_len, self.num_heads, self.head_dim)
.transpose(1, 2)
)
value_states = (
self.v_proj(hidden_states)
.view(bsz, q_len, self.num_heads, self.head_dim)
.transpose(1, 2)
)
kv_seq_len = key_states.shape[-2]
if past_key_value is not None:
kv_seq_len += past_key_value[0].shape[-2]
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
(
query_states,
key_states,
) = transformers.models.llama.modeling_llama.apply_rotary_pos_emb(
query_states, key_states, cos, sin, position_ids
)
# [bsz, nh, t, hd]
if past_key_value is not None:
# reuse k, v, self_attention
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
past_key_value = (key_states, value_states) if use_cache else None
# We only apply xformers optimizations if we don't need to output the whole attention matrix
if not output_attentions:
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
# This is a nasty hack. We know attention_mask in transformers is either LowerTriangular or all Zeros.
# We therefore check if one element in the upper triangular portion is zero. If it is, then the mask is all zeros.
if attention_mask is None or attention_mask[0, 0, 0, 1] == 0:
# input and output should be of form (bsz, q_len, num_heads, head_dim)
attn_output = xformers.ops.memory_efficient_attention(
query_states, key_states, value_states, attn_bias=None
)
else:
# input and output should be of form (bsz, q_len, num_heads, head_dim)
attn_output = xformers.ops.memory_efficient_attention(
query_states,
key_states,
value_states,
attn_bias=xformers.ops.LowerTriangularMask(),
)
attn_weights = None
else:
attn_weights = torch.matmul(
query_states, key_states.transpose(2, 3)
) / math.sqrt(self.head_dim)
if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, q_len, kv_seq_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights + attention_mask
attn_weights = torch.max(
attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min)
)
# upcast attention to fp32
attn_weights = nn.functional.softmax(
attn_weights, dim=-1, dtype=torch.float32
).to(query_states.dtype)
attn_output = torch.matmul(attn_weights, value_states)
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights, past_key_value
def replace_llama_attn_with_xformers_attn():
transformers.models.llama.modeling_llama.LlamaAttention.forward = xformers_forward | null |
20,621 | from collections import defaultdict
import copy
import os
from dataclasses import dataclass, field
import random
import json
import logging
import pathlib
from typing import Dict, Optional, Sequence, List
import torch
import torch.distributed as dist
from deepspeed import zero
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training, TaskType
import transformers
from torch.utils.data import Dataset
from transformers import Trainer, AddedToken, BitsAndBytesConfig, deepspeed
from fastchat.train.train_flant5 import (
smart_tokenizer_and_embedding_resize,
make_supervised_data_module,
)
from fastchat.train.train_lora import get_peft_state_maybe_zero_3
from fastchat.model.model_adapter import get_conversation_template
DEFAULT_PAD_TOKEN = "[PAD]"
class LoraArguments:
lora_r: int = 8
lora_alpha: int = 16
lora_dropout: float = 0.05
lora_target_modules: List[str] = field(default_factory=lambda: ["q", "v"])
lora_weight_path: str = ""
lora_bias: str = "none"
q_lora: bool = False
class ModelArguments:
model_name_or_path: Optional[str] = field(default="facebook/opt-125m")
class DataArguments:
data_path: str = field(
default=None, metadata={"help": "Path to the training data."}
)
lazy_preprocess: bool = False
num_data: int = -1
preprocessed_path: str = field(
default=None, metadata={"help": "Path to the preprocessed training data."}
)
class TrainingArguments(transformers.TrainingArguments):
cache_dir: Optional[str] = field(default=None)
optim: str = field(default="adamw_torch")
model_max_length: int = field(
default=2048,
metadata={
"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."
},
)
def safe_save_model_for_hf_trainer(
trainer: transformers.Trainer, output_dir: str, state_dict: dict
):
"""Collects the state dict and dump to disk."""
if trainer.args.should_save:
cpu_state_dict = {key: value.cpu() for key, value in state_dict.items()}
del state_dict
trainer._save(output_dir, state_dict=cpu_state_dict) # noqa
def smart_tokenizer_and_embedding_resize(
special_tokens_dict: Dict,
other_tokens,
tokenizer: transformers.PreTrainedTokenizer,
model: transformers.PreTrainedModel,
):
"""Resize tokenizer and embedding.
Note: This is the unoptimized version that may make your embedding size not be divisible by 64.
"""
num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict)
for new_token in other_tokens:
num_new_tokens += tokenizer.add_tokens(AddedToken(new_token, normalized=False))
model.resize_token_embeddings(len(tokenizer))
if num_new_tokens > 0:
input_embeddings = model.get_input_embeddings().weight.data
output_embeddings = model.get_output_embeddings().weight.data
input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(
dim=0, keepdim=True
)
output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(
dim=0, keepdim=True
)
input_embeddings[-num_new_tokens:] = input_embeddings_avg
output_embeddings[-num_new_tokens:] = output_embeddings_avg
def make_supervised_data_module(
tokenizer: transformers.PreTrainedTokenizer, data_args
) -> Dict:
"""Make dataset and collator for supervised fine-tuning."""
dataset_cls = SupervisedDataset
train_dataset = dataset_cls(
tokenizer=tokenizer,
data_path=data_args.data_path,
preprocessed_path=data_args.preprocessed_path,
num_data=data_args.num_data,
)
data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer)
return dict(
train_dataset=train_dataset, eval_dataset=None, data_collator=data_collator
)
def get_peft_state_maybe_zero_3(named_params, bias):
if bias == "none":
to_return = {k: t for k, t in named_params if "lora_" in k}
elif bias == "all":
to_return = {k: t for k, t in named_params if "lora_" in k or "bias" in k}
elif bias == "lora_only":
to_return = {}
maybe_lora_bias = {}
lora_bias_names = set()
for k, t in named_params:
if "lora_" in k:
to_return[k] = t
bias_name = k.split("lora_")[0] + "bias"
lora_bias_names.add(bias_name)
elif "bias" in k:
maybe_lora_bias[k] = t
for k, t in maybe_lora_bias:
if bias_name in lora_bias_names:
to_return[bias_name] = t
else:
raise NotImplementedError
to_return = {k: maybe_zero_3(v) for k, v in to_return.items()}
return to_return
def train():
parser = transformers.HfArgumentParser(
(ModelArguments, DataArguments, TrainingArguments, LoraArguments)
)
(
model_args,
data_args,
training_args,
lora_args,
) = parser.parse_args_into_dataclasses()
device_map = None
world_size = int(os.environ.get("WORLD_SIZE", 1))
ddp = world_size != 1
if lora_args.q_lora:
device_map = {"": int(os.environ.get("LOCAL_RANK") or 0)} if ddp else None
if len(training_args.fsdp) > 0 or deepspeed.is_deepspeed_zero3_enabled():
logging.warning(
"FSDP and ZeRO3 are both currently incompatible with QLoRA."
)
compute_dtype = (
torch.float16
if training_args.fp16
else (torch.bfloat16 if training_args.bf16 else torch.float32)
)
model = transformers.AutoModelForSeq2SeqLM.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
device_map=device_map,
quantization_config=BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=compute_dtype,
)
if lora_args.q_lora
else None,
)
lora_config = LoraConfig(
r=lora_args.lora_r,
lora_alpha=lora_args.lora_alpha,
target_modules=lora_args.lora_target_modules,
lora_dropout=lora_args.lora_dropout,
bias=lora_args.lora_bias,
task_type=TaskType.SEQ_2_SEQ_LM,
)
if lora_args.q_lora:
model = prepare_model_for_kbit_training(
model, use_gradient_checkpointing=training_args.gradient_checkpointing
)
if not ddp and torch.cuda.device_count() > 1:
# keeps Trainer from trying its own DataParallelism when more than 1 gpu is available
model.is_parallelizable = True
model.model_parallel = True
model = get_peft_model(model, lora_config)
if training_args.deepspeed is not None and training_args.local_rank == 0:
model.print_trainable_parameters()
if training_args.gradient_checkpointing:
model.enable_input_require_grads()
# Dacheng: Note we can only use T5Tokenizer, otherwise it will prepend
# a space before special tokens.
tokenizer = transformers.T5Tokenizer.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
model_max_length=training_args.model_max_length,
padding_side="right",
use_fast=False,
)
smart_tokenizer_and_embedding_resize(
special_tokens_dict=dict(pad_token=DEFAULT_PAD_TOKEN),
other_tokens=["<", "{", "\n", "}", "`", " ", "\\", "^", "\t"],
tokenizer=tokenizer,
model=model,
)
data_module = make_supervised_data_module(tokenizer=tokenizer, data_args=data_args)
trainer = Trainer(
model=model, tokenizer=tokenizer, args=training_args, **data_module
)
if list(pathlib.Path(training_args.output_dir).glob("checkpoint-*")):
trainer.train(resume_from_checkpoint=True)
else:
trainer.train()
trainer.save_state()
# check if zero3 mode enabled
if deepspeed.is_deepspeed_zero3_enabled():
# use deepspeed engine internal function to gather state dict
# state_dict_zero3 contains whole parameters of base and lora adapters
# we will not extract lora parameters since peft save_pretrained will do that
# https://github.com/huggingface/peft/blob/3714aa2fff158fdfa637b2b65952580801d890b2/src/peft/peft_model.py#L125
# https://github.com/huggingface/peft/blob/3714aa2fff158fdfa637b2b65952580801d890b2/src/peft/utils/save_and_load.py#L19
state_dict_zero3 = trainer.model_wrapped._zero3_consolidated_16bit_state_dict()
if training_args.local_rank == 0:
state_dict = state_dict_zero3
else:
# in other mode we use original code from fastchat team, to make sure our change is minimum
state_dict = get_peft_state_maybe_zero_3(
model.named_parameters(), lora_args.lora_bias
)
if training_args.local_rank == 0:
safe_save_model_for_hf_trainer(
trainer=trainer, output_dir=training_args.output_dir, state_dict=state_dict
) | null |
20,622 | from dataclasses import dataclass, field
import logging
import pathlib
import typing
import os
from deepspeed import zero
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training
import transformers
from transformers import Trainer, BitsAndBytesConfig, deepspeed
import torch
from fastchat.train.train import (
DataArguments,
ModelArguments,
make_supervised_data_module,
)
from fastchat.train.llama_flash_attn_monkey_patch import (
replace_llama_attn_with_flash_attn,
)
class TrainingArguments(transformers.TrainingArguments):
cache_dir: typing.Optional[str] = field(default=None)
optim: str = field(default="adamw_torch")
model_max_length: int = field(
default=512,
metadata={
"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."
},
)
flash_attn: bool = False
class LoraArguments:
lora_r: int = 8
lora_alpha: int = 16
lora_dropout: float = 0.05
lora_target_modules: typing.List[str] = field(
default_factory=lambda: ["q_proj", "v_proj"]
)
lora_weight_path: str = ""
lora_bias: str = "none"
q_lora: bool = False
def get_peft_state_maybe_zero_3(named_params, bias):
if bias == "none":
to_return = {k: t for k, t in named_params if "lora_" in k}
elif bias == "all":
to_return = {k: t for k, t in named_params if "lora_" in k or "bias" in k}
elif bias == "lora_only":
to_return = {}
maybe_lora_bias = {}
lora_bias_names = set()
for k, t in named_params:
if "lora_" in k:
to_return[k] = t
bias_name = k.split("lora_")[0] + "bias"
lora_bias_names.add(bias_name)
elif "bias" in k:
maybe_lora_bias[k] = t
for k, t in maybe_lora_bias:
if bias_name in lora_bias_names:
to_return[bias_name] = t
else:
raise NotImplementedError
to_return = {k: maybe_zero_3(v) for k, v in to_return.items()}
return to_return
class ModelArguments:
model_name_or_path: Optional[str] = field(default="facebook/opt-125m")
trust_remote_code: bool = field(
default=False,
metadata={
"help": "Whether or not to allow for custom models defined on the Hub in their own modeling files"
},
)
padding_side: str = field(
default="right", metadata={"help": "The padding side in tokenizer"}
)
class DataArguments:
data_path: str = field(
default=None, metadata={"help": "Path to the training data."}
)
eval_data_path: str = field(
default=None, metadata={"help": "Path to the evaluation data."}
)
lazy_preprocess: bool = False
def make_supervised_data_module(
tokenizer: transformers.PreTrainedTokenizer, data_args
) -> Dict:
"""Make dataset and collator for supervised fine-tuning."""
dataset_cls = (
LazySupervisedDataset if data_args.lazy_preprocess else SupervisedDataset
)
rank0_print("Loading data...")
train_json = json.load(open(data_args.data_path, "r"))
train_dataset = dataset_cls(train_json, tokenizer=tokenizer)
if data_args.eval_data_path:
eval_json = json.load(open(data_args.eval_data_path, "r"))
eval_dataset = dataset_cls(eval_json, tokenizer=tokenizer)
else:
eval_dataset = None
return dict(train_dataset=train_dataset, eval_dataset=eval_dataset)
def replace_llama_attn_with_flash_attn():
cuda_major, cuda_minor = torch.cuda.get_device_capability()
if cuda_major < 8:
warnings.warn(
"Flash attention is only supported on A100 or H100 GPU during training due to head dim > 64 backward."
"ref: https://github.com/HazyResearch/flash-attention/issues/190#issuecomment-1523359593"
)
transformers.models.llama.modeling_llama.LlamaModel._prepare_decoder_attention_mask = (
_prepare_decoder_attention_mask
)
transformers.models.llama.modeling_llama.LlamaAttention.forward = forward
def train():
parser = transformers.HfArgumentParser(
(ModelArguments, DataArguments, TrainingArguments, LoraArguments)
)
(
model_args,
data_args,
training_args,
lora_args,
) = parser.parse_args_into_dataclasses()
if training_args.flash_attn:
replace_llama_attn_with_flash_attn()
device_map = None
world_size = int(os.environ.get("WORLD_SIZE", 1))
ddp = world_size != 1
if lora_args.q_lora:
device_map = {"": int(os.environ.get("LOCAL_RANK") or 0)} if ddp else None
if len(training_args.fsdp) > 0 or deepspeed.is_deepspeed_zero3_enabled():
logging.warning(
"FSDP and ZeRO3 are both currently incompatible with QLoRA."
)
compute_dtype = (
torch.float16
if training_args.fp16
else (torch.bfloat16 if training_args.bf16 else torch.float32)
)
model = transformers.AutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
device_map=device_map,
quantization_config=BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=compute_dtype,
)
if lora_args.q_lora
else None,
)
lora_config = LoraConfig(
r=lora_args.lora_r,
lora_alpha=lora_args.lora_alpha,
target_modules=lora_args.lora_target_modules,
lora_dropout=lora_args.lora_dropout,
bias=lora_args.lora_bias,
task_type="CAUSAL_LM",
)
if lora_args.q_lora:
model = prepare_model_for_kbit_training(
model, use_gradient_checkpointing=training_args.gradient_checkpointing
)
if not ddp and torch.cuda.device_count() > 1:
# keeps Trainer from trying its own DataParallelism when more than 1 gpu is available
model.is_parallelizable = True
model.model_parallel = True
model = get_peft_model(model, lora_config)
if training_args.flash_attn:
for name, module in model.named_modules():
if "norm" in name:
module = module.to(compute_dtype)
if "lm_head" in name or "embed_tokens" in name:
if hasattr(module, "weight"):
module = module.to(compute_dtype)
if training_args.deepspeed is not None and training_args.local_rank == 0:
model.print_trainable_parameters()
if training_args.gradient_checkpointing:
model.enable_input_require_grads()
tokenizer = transformers.AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
model_max_length=training_args.model_max_length,
padding_side="right",
use_fast=False,
)
tokenizer.pad_token = tokenizer.unk_token
data_module = make_supervised_data_module(tokenizer=tokenizer, data_args=data_args)
trainer = Trainer(
model=model, tokenizer=tokenizer, args=training_args, **data_module
)
model.config.use_cache = False
if list(pathlib.Path(training_args.output_dir).glob("checkpoint-*")):
trainer.train(resume_from_checkpoint=True)
else:
trainer.train()
trainer.save_state()
# check if zero3 mode enabled
if deepspeed.is_deepspeed_zero3_enabled():
# use deepspeed engine internal function to gather state dict
# state_dict_zero3 contains whole parameters of base and lora adapters
# we will not extract lora parameters since peft save_pretrained will do that
# https://github.com/huggingface/peft/blob/3714aa2fff158fdfa637b2b65952580801d890b2/src/peft/peft_model.py#L125
# https://github.com/huggingface/peft/blob/3714aa2fff158fdfa637b2b65952580801d890b2/src/peft/utils/save_and_load.py#L19
state_dict_zero3 = trainer.model_wrapped._zero3_consolidated_16bit_state_dict()
if training_args.local_rank == 0:
state_dict = state_dict_zero3
else:
# in other mode we use original code from fastchat team, to make sure our change is minimum
state_dict = get_peft_state_maybe_zero_3(
model.named_parameters(), lora_args.lora_bias
)
if training_args.local_rank == 0:
model.save_pretrained(training_args.output_dir, state_dict=state_dict) | null |
20,623 | from collections import defaultdict
import copy
import os
from dataclasses import dataclass, field
import random
import json
import logging
import pathlib
from typing import Dict, Optional, Sequence
import torch
import torch.distributed as dist
import transformers
from torch.utils.data import Dataset
from transformers import Trainer, AddedToken
from fastchat.model.model_adapter import get_conversation_template
default_conversation = get_conversation_template("t5")
def _tokenize_fn(
strings: Sequence[str], tokenizer: transformers.PreTrainedTokenizer
) -> Dict:
"""Tokenize a list of strings."""
tokenized_list = [
tokenizer(
text,
return_tensors="pt",
padding="longest",
max_length=tokenizer.model_max_length,
truncation=True,
)
for text in strings
]
input_ids = labels = [tokenized.input_ids[0] for tokenized in tokenized_list]
input_ids_lens = labels_lens = [
tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item()
for tokenized in tokenized_list
]
return dict(
input_ids=input_ids,
labels=labels,
input_ids_lens=input_ids_lens,
labels_lens=labels_lens,
)
def _form_qa(
q_list,
a_list,
tokenized_conversation,
tokenized_lens,
speakers,
header_len,
max_length,
eos_id,
):
cur_idx = header_len
conv_len = len(tokenized_conversation)
for tokenized_len, speaker in zip(tokenized_lens, speakers):
if cur_idx >= conv_len:
break
if speaker == "gpt":
# truncate answer if it is too long
content_a = None
if tokenized_len > max_length:
content_a = tokenized_conversation[cur_idx : cur_idx + max_length]
else:
content_a = tokenized_conversation[cur_idx : cur_idx + tokenized_len]
content_a.append(eos_id)
a_list.append(content_a)
content_q = None
if cur_idx >= max_length:
content_q = tokenized_conversation[cur_idx - max_length : cur_idx]
else:
content_q = tokenized_conversation[:cur_idx]
content_q.append(eos_id)
q_list.append(content_q)
# asser the last token is actually a EOS for an answer
assert a_list[-1][-1] == eos_id, "Last Token is not EOS!"
cur_idx += tokenized_len
def _add_speaker_and_signal(header, source, get_conversation=True):
"""Add speaker and start/end signal on each round."""
BEGIN_SIGNAL = "### "
END_SIGNAL = "\n"
conversation = header
unknown_role = "unknown" # use default unknown role
roles = {
"human": default_conversation.roles[0], # human role
"gpt": default_conversation.roles[1], # gpt role
}
for i in range(len(source)):
sentence = source[i]
sentence_from = sentence["from"].lower()
# TODO(Dacheng): verify this is a good way to split sentences
if sentence_from == "human":
# if this is not the last sentence
if i != len(source) - 1:
next_sentence = source[i + 1]
sentence["value"] = (
BEGIN_SIGNAL
+ roles.get(sentence_from, unknown_role)
+ ": "
+ sentence["value"]
+ END_SIGNAL
+ BEGIN_SIGNAL
+ roles.get(next_sentence["from"].lower(), unknown_role)
+ ": "
)
else:
# if human is the last speaker, it does not contribute to an answer
pass
else:
sentence["value"] = sentence["value"] + END_SIGNAL
if get_conversation:
conversation += sentence["value"]
return conversation
The provided code snippet includes necessary dependencies for implementing the `preprocess` function. Write a Python function `def preprocess( sources: Sequence[str], tokenizer: transformers.PreTrainedTokenizer, ) -> Dict` to solve the following problem:
Given a list of sources, each is a conversation list. This transform: 1. Add signal '### ' at the beginning each sentence, with end signal '\n'; 2. Concatenate conversations together; 3. Tokenize the concatenated conversation; 4. Make a deepcopy as the target. Mask human words with IGNORE_INDEX.
Here is the function:
def preprocess(
sources: Sequence[str],
tokenizer: transformers.PreTrainedTokenizer,
) -> Dict:
"""
Given a list of sources, each is a conversation list. This transform:
1. Add signal '### ' at the beginning each sentence, with end signal '\n';
2. Concatenate conversations together;
3. Tokenize the concatenated conversation;
4. Make a deepcopy as the target. Mask human words with IGNORE_INDEX.
"""
# add end signal and concatenate together
conversations = []
header = f"{default_conversation.system_message}\n\n"
for source in sources:
conversation = _add_speaker_and_signal(header, source, tokenizer)
conversations.append(conversation)
# TODO(Dacheng): This is related to whether the dataset has been truncated..
# Assume we get long conversations, don't pad, don't return tensor
tokenized_conversations = tokenizer(conversations, max_length=None)["input_ids"]
q_list = []
a_list = []
# count for EOS length
header_len = _tokenize_fn([header], tokenizer)["input_ids_lens"][0] - 1
from tqdm import tqdm
for tokenized_conversation, source in tqdm(zip(tokenized_conversations, sources)):
tokenized_sentence = _tokenize_fn([s["value"] for s in source], tokenizer)
tokenized_lens = tokenized_sentence["input_ids_lens"]
tokenized_lens = [l - 1 for l in tokenized_lens]
speakers = [sentence["from"] for sentence in source]
ids = tokenized_sentence["input_ids"]
_form_qa(
q_list,
a_list,
tokenized_conversation,
tokenized_lens,
speakers,
header_len,
tokenizer.model_max_length,
tokenizer.eos_token_id,
)
return dict(input_ids=q_list, labels=a_list) | Given a list of sources, each is a conversation list. This transform: 1. Add signal '### ' at the beginning each sentence, with end signal '\n'; 2. Concatenate conversations together; 3. Tokenize the concatenated conversation; 4. Make a deepcopy as the target. Mask human words with IGNORE_INDEX. |
20,624 | from collections import defaultdict
import copy
import os
from dataclasses import dataclass, field
import random
import json
import logging
import pathlib
from typing import Dict, Optional, Sequence
import torch
import torch.distributed as dist
import transformers
from torch.utils.data import Dataset
from transformers import Trainer, AddedToken
from fastchat.model.model_adapter import get_conversation_template
DEFAULT_PAD_TOKEN = "[PAD]"
class ModelArguments:
model_name_or_path: Optional[str] = field(default="facebook/opt-125m")
class DataArguments:
data_path: str = field(
default=None, metadata={"help": "Path to the training data."}
)
lazy_preprocess: bool = False
num_data: int = -1
preprocessed_path: str = field(
default=None, metadata={"help": "Path to the preprocessed training data."}
)
class TrainingArguments(transformers.TrainingArguments):
cache_dir: Optional[str] = field(default=None)
optim: str = field(default="adamw_torch")
model_max_length: int = field(
default=2048,
metadata={
"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."
},
)
def safe_save_model_for_hf_trainer(trainer: transformers.Trainer, output_dir: str):
"""Collects the state dict and dump to disk."""
state_dict = trainer.model.state_dict()
if trainer.args.should_save:
cpu_state_dict = {key: value.cpu() for key, value in state_dict.items()}
del state_dict
trainer._save(output_dir, state_dict=cpu_state_dict) # noqa
def smart_tokenizer_and_embedding_resize(
special_tokens_dict: Dict,
other_tokens,
tokenizer: transformers.PreTrainedTokenizer,
model: transformers.PreTrainedModel,
):
"""Resize tokenizer and embedding.
Note: This is the unoptimized version that may make your embedding size not be divisible by 64.
"""
num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict)
for new_token in other_tokens:
num_new_tokens += tokenizer.add_tokens(AddedToken(new_token, normalized=False))
model.resize_token_embeddings(len(tokenizer))
if num_new_tokens > 0:
input_embeddings = model.get_input_embeddings().weight.data
output_embeddings = model.get_output_embeddings().weight.data
input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(
dim=0, keepdim=True
)
output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(
dim=0, keepdim=True
)
input_embeddings[-num_new_tokens:] = input_embeddings_avg
output_embeddings[-num_new_tokens:] = output_embeddings_avg
def make_supervised_data_module(
tokenizer: transformers.PreTrainedTokenizer, data_args
) -> Dict:
"""Make dataset and collator for supervised fine-tuning."""
dataset_cls = SupervisedDataset
train_dataset = dataset_cls(
tokenizer=tokenizer,
data_path=data_args.data_path,
preprocessed_path=data_args.preprocessed_path,
num_data=data_args.num_data,
)
data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer)
return dict(
train_dataset=train_dataset, eval_dataset=None, data_collator=data_collator
)
def train():
parser = transformers.HfArgumentParser(
(ModelArguments, DataArguments, TrainingArguments)
)
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
model = transformers.AutoModelForSeq2SeqLM.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
)
# Dacheng: Note we can only use T5Tokenizer, otherwise it will prepend
# a space before special tokens.
tokenizer = transformers.T5Tokenizer.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
model_max_length=training_args.model_max_length,
padding_side="right",
use_fast=False,
)
smart_tokenizer_and_embedding_resize(
special_tokens_dict=dict(pad_token=DEFAULT_PAD_TOKEN),
other_tokens=["<", "{", "\n", "}", "`", " ", "\\", "^", "\t"],
tokenizer=tokenizer,
model=model,
)
data_module = make_supervised_data_module(tokenizer=tokenizer, data_args=data_args)
trainer = Trainer(
model=model, tokenizer=tokenizer, args=training_args, **data_module
)
if list(pathlib.Path(training_args.output_dir).glob("checkpoint-*")):
trainer.train(resume_from_checkpoint=True)
else:
trainer.train()
trainer.save_state()
safe_save_model_for_hf_trainer(trainer=trainer, output_dir=training_args.output_dir) | null |
20,625 | import warnings
from typing import Optional, Tuple
import torch
from flash_attn import __version__ as flash_attn_version
from flash_attn.bert_padding import pad_input, unpad_input
from flash_attn.flash_attn_interface import (
flash_attn_func,
flash_attn_varlen_kvpacked_func,
)
from transformers.models.llama.modeling_llama import (
LlamaAttention,
LlamaModel,
rotate_half,
)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: bool = False,
use_cache: bool = False,
padding_mask: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
if output_attentions:
warnings.warn(
"Output attentions is not supported for patched `LlamaAttention`, returning `None` instead."
)
bsz, q_len, _ = hidden_states.size()
kv_heads = getattr(self, "num_key_value_heads", self.num_heads)
q, k, v = (
op(hidden_states).view(bsz, q_len, nh, self.head_dim)
for op, nh in (
(self.q_proj, self.num_heads),
(self.k_proj, kv_heads),
(self.v_proj, kv_heads),
)
)
# shape: (b, s, num_heads, head_dim)
kv_seq_len = k.shape[1]
past_kv_len = 0
if past_key_value is not None:
past_kv_len = past_key_value[0].shape[2]
kv_seq_len += past_kv_len
cos_sin = self.rotary_emb(v, seq_len=kv_seq_len)
q, k = apply_rotary_pos_emb(q, k, cos_sin, position_ids)
if past_key_value is not None:
assert (
flash_attn_version >= "2.1.0"
), "past_key_value support requires flash-attn >= 2.1.0"
# reuse k, v
k = torch.cat([past_key_value[0].transpose(1, 2), k], dim=1)
v = torch.cat([past_key_value[1].transpose(1, 2), v], dim=1)
past_key_value = (k.transpose(1, 2), v.transpose(1, 2)) if use_cache else None
if attention_mask is None:
output = flash_attn_func(q, k, v, 0.0, softmax_scale=None, causal=True).view(
bsz, q_len, -1
)
else:
q, indices, cu_q_lens, max_s = unpad_input(q, attention_mask[:, -q_len:])
# We can skip concat and call unpad twice but seems better to call unpad only once.
kv, _, cu_k_lens, max_k = unpad_input(
torch.stack((k, v), dim=2), attention_mask
)
output_unpad = flash_attn_varlen_kvpacked_func(
q,
kv,
cu_q_lens,
cu_k_lens,
max_s,
max_k,
0.0,
softmax_scale=None,
causal=True,
)
output_unpad = output_unpad.reshape(-1, self.num_heads * self.head_dim)
output = pad_input(output_unpad, indices, bsz, q_len)
return self.o_proj(output), None, past_key_value
def _prepare_decoder_attention_mask(
self, attention_mask, input_shape, inputs_embeds, past_key_values_length
):
# [bsz, seq_len]
if past_key_values_length > 0 and attention_mask is not None:
attention_mask = torch.cat(
(
torch.full(
(input_shape[0], past_key_values_length),
True,
dtype=attention_mask.dtype,
device=attention_mask.device,
),
attention_mask,
),
dim=-1,
)
if attention_mask is not None and torch.all(attention_mask):
return None # This uses the faster call when training with full samples
return attention_mask
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: bool = False,
use_cache: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
if output_attentions:
warnings.warn(
"Output attentions is not supported for patched `LlamaAttention`, returning `None` instead."
)
bsz, q_len, _ = hidden_states.size()
query_states = (
self.q_proj(hidden_states)
.view(bsz, q_len, self.num_heads, self.head_dim)
.transpose(1, 2)
)
key_states = (
self.k_proj(hidden_states)
.view(bsz, q_len, self.num_heads, self.head_dim)
.transpose(1, 2)
)
value_states = (
self.v_proj(hidden_states)
.view(bsz, q_len, self.num_heads, self.head_dim)
.transpose(1, 2)
) # shape: (b, num_heads, s, head_dim)
kv_seq_len = key_states.shape[-2]
if past_key_value is not None:
kv_seq_len += past_key_value[0].shape[-2]
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
query_states, key_states = apply_rotary_pos_emb(
query_states, key_states, cos, sin, position_ids
)
if past_key_value is not None:
# reuse k, v
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
past_key_value = (key_states, value_states) if use_cache else None
# Transform the data into the format required by flash attention
qkv = torch.stack([query_states, key_states, value_states], dim=2)
qkv = qkv.transpose(1, 3) # shape: [b, s, 3, num_heads, head_dim]
key_padding_mask = attention_mask
if key_padding_mask is None:
qkv = qkv.reshape(-1, 3, self.num_heads, self.head_dim)
cu_q_lens = torch.arange(
0, (bsz + 1) * q_len, step=q_len, dtype=torch.int32, device=qkv.device
)
max_s = q_len
output = flash_attn_varlen_qkvpacked_func(
qkv, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True
)
output = output.view(bsz, q_len, -1)
else:
qkv = qkv.reshape(bsz, q_len, -1)
qkv, indices, cu_q_lens, max_s = unpad_input(qkv, key_padding_mask)
qkv = qkv.view(-1, 3, self.num_heads, self.head_dim)
output_unpad = flash_attn_varlen_qkvpacked_func(
qkv, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True
)
output_unpad = output_unpad.reshape(-1, self.num_heads * self.head_dim)
output = pad_input(output_unpad, indices, bsz, q_len)
return self.o_proj(output), None, past_key_value
def replace_llama_attn_with_flash_attn():
cuda_major, cuda_minor = torch.cuda.get_device_capability()
if cuda_major < 8:
warnings.warn(
"Flash attention is only supported on A100 or H100 GPU during training due to head dim > 64 backward."
"ref: https://github.com/HazyResearch/flash-attention/issues/190#issuecomment-1523359593"
)
LlamaModel._prepare_decoder_attention_mask = _prepare_decoder_attention_mask
LlamaAttention.forward = forward | null |
20,626 | import argparse
import tempfile
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
def upload_hub(model_path, hub_repo_id, component, private):
if component == "all":
components = ["model", "tokenizer"]
else:
components = [component]
kwargs = {"push_to_hub": True, "repo_id": hub_repo_id, "private": args.private}
if "model" in components:
model = AutoModelForCausalLM.from_pretrained(
model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True
)
with tempfile.TemporaryDirectory() as tmp_path:
model.save_pretrained(tmp_path, **kwargs)
if "tokenizer" in components:
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
with tempfile.TemporaryDirectory() as tmp_path:
tokenizer.save_pretrained(tmp_path, **kwargs) | null |
20,627 | import math
import os
import re
import sys
from typing import Dict, List, Optional
import warnings
import psutil
import torch
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForSeq2SeqLM,
AutoTokenizer,
LlamaTokenizer,
LlamaForCausalLM,
T5Tokenizer,
)
from fastchat.constants import CPU_ISA
from fastchat.conversation import Conversation, get_conv_template
from fastchat.model.compression import load_compress_model
from fastchat.model.llama_condense_monkey_patch import replace_llama_with_condense
from fastchat.model.model_chatglm import generate_stream_chatglm
from fastchat.model.model_codet5p import generate_stream_codet5p
from fastchat.model.model_falcon import generate_stream_falcon
from fastchat.model.model_exllama import generate_stream_exllama
from fastchat.model.model_xfastertransformer import generate_stream_xft
from fastchat.model.monkey_patch_non_inplace import (
replace_llama_attn_with_non_inplace_operations,
)
from fastchat.modules.awq import AWQConfig, load_awq_quantized
from fastchat.modules.exllama import ExllamaConfig, load_exllama_model
from fastchat.modules.xfastertransformer import load_xft_model, XftConfig
from fastchat.modules.gptq import GptqConfig, load_gptq_quantized
from fastchat.utils import get_gpu_memory
model_adapters: List[BaseModelAdapter] = []
The provided code snippet includes necessary dependencies for implementing the `register_model_adapter` function. Write a Python function `def register_model_adapter(cls)` to solve the following problem:
Register a model adapter.
Here is the function:
def register_model_adapter(cls):
"""Register a model adapter."""
model_adapters.append(cls()) | Register a model adapter. |
20,628 | import math
import os
import re
import sys
from typing import Dict, List, Optional
import warnings
import psutil
import torch
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForSeq2SeqLM,
AutoTokenizer,
LlamaTokenizer,
LlamaForCausalLM,
T5Tokenizer,
)
from fastchat.constants import CPU_ISA
from fastchat.conversation import Conversation, get_conv_template
from fastchat.model.compression import load_compress_model
from fastchat.model.llama_condense_monkey_patch import replace_llama_with_condense
from fastchat.model.model_chatglm import generate_stream_chatglm
from fastchat.model.model_codet5p import generate_stream_codet5p
from fastchat.model.model_falcon import generate_stream_falcon
from fastchat.model.model_exllama import generate_stream_exllama
from fastchat.model.model_xfastertransformer import generate_stream_xft
from fastchat.model.monkey_patch_non_inplace import (
replace_llama_attn_with_non_inplace_operations,
)
from fastchat.modules.awq import AWQConfig, load_awq_quantized
from fastchat.modules.exllama import ExllamaConfig, load_exllama_model
from fastchat.modules.xfastertransformer import load_xft_model, XftConfig
from fastchat.modules.gptq import GptqConfig, load_gptq_quantized
from fastchat.utils import get_gpu_memory
The provided code snippet includes necessary dependencies for implementing the `remove_parent_directory_name` function. Write a Python function `def remove_parent_directory_name(model_path)` to solve the following problem:
Remove parent directory name.
Here is the function:
def remove_parent_directory_name(model_path):
"""Remove parent directory name."""
if model_path[-1] == "/":
model_path = model_path[:-1]
return model_path.split("/")[-1] | Remove parent directory name. |
20,629 | import dataclasses
import gc
import glob
import os
from accelerate import init_empty_weights
from accelerate.utils import set_module_tensor_to_device
from huggingface_hub import snapshot_download
import torch
from torch import Tensor
from torch.nn import functional as F
import torch.nn as nn
from tqdm import tqdm
from transformers import (
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
AutoModel,
AutoModelForSeq2SeqLM,
)
class CLinear(nn.Module):
def __init__(self, weight=None, bias=None, device=None):
def forward(self, input: Tensor) -> Tensor:
def compress_module(module, target_device):
for attr_str in dir(module):
target_attr = getattr(module, attr_str)
if type(target_attr) == torch.nn.Linear:
setattr(
module,
attr_str,
CLinear(target_attr.weight, target_attr.bias, target_device),
)
for name, child in module.named_children():
compress_module(child, target_device) | null |
20,630 | import dataclasses
import gc
import glob
import os
from accelerate import init_empty_weights
from accelerate.utils import set_module_tensor_to_device
from huggingface_hub import snapshot_download
import torch
from torch import Tensor
from torch.nn import functional as F
import torch.nn as nn
from tqdm import tqdm
from transformers import (
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
AutoModel,
AutoModelForSeq2SeqLM,
)
The provided code snippet includes necessary dependencies for implementing the `decompress` function. Write a Python function `def decompress(packed_data, config)` to solve the following problem:
Simulate group-wise dequantization.
Here is the function:
def decompress(packed_data, config):
"""Simulate group-wise dequantization."""
if not config.enabled:
return packed_data
group_size, num_bits, group_dim, symmetric = (
config.group_size,
config.num_bits,
config.group_dim,
config.symmetric,
)
# Dequantize
if symmetric:
data, scale, original_shape = packed_data
data = data / scale
else:
data, mn, scale, original_shape = packed_data
data = data / scale
data.add_(mn)
# Unpad
pad_len = (group_size - original_shape[group_dim] % group_size) % group_size
if pad_len:
padded_original_shape = (
original_shape[:group_dim]
+ (original_shape[group_dim] + pad_len,)
+ original_shape[group_dim + 1 :]
)
data = data.reshape(padded_original_shape)
indices = [slice(0, x) for x in original_shape]
return data[indices].contiguous()
else:
return data.view(original_shape) | Simulate group-wise dequantization. |
20,631 | from functools import partial
import torch
import transformers
import transformers.models.llama.modeling_llama
class CondenseRotaryEmbedding(torch.nn.Module):
def __init__(
self, dim, ratio, max_position_embeddings=2048, base=10000, device=None
):
super().__init__()
inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim))
self.register_buffer("inv_freq", inv_freq)
# Build here to make `torch.jit.trace` work.
self.ratio = ratio
max_position_embeddings *= ratio
self.max_seq_len_cached = max_position_embeddings
# print(f"Monkey Patching condense ratio {ratio}")
t = (
torch.arange(
self.max_seq_len_cached,
device=self.inv_freq.device,
dtype=self.inv_freq.dtype,
)
/ ratio
)
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
# Different from paper, but it uses a different permutation in order to obtain the same calculation
emb = torch.cat((freqs, freqs), dim=-1)
dtype = torch.get_default_dtype()
self.register_buffer(
"cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False
)
self.register_buffer(
"sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False
)
def forward(self, x, seq_len=None):
# x: [bs, num_attention_heads, seq_len, head_size]
# This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case.
if seq_len > self.max_seq_len_cached:
self.max_seq_len_cached = seq_len
t = (
torch.arange(
self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype
)
/ self.ratio
)
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
# Different from paper, but it uses a different permutation in order to obtain the same calculation
emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
self.register_buffer(
"cos_cached", emb.cos()[None, None, :, :].to(x.dtype), persistent=False
)
self.register_buffer(
"sin_cached", emb.sin()[None, None, :, :].to(x.dtype), persistent=False
)
return (
self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
)
def replace_llama_with_condense(ratio):
transformers.models.llama.modeling_llama.LlamaRotaryEmbedding = partial(
CondenseRotaryEmbedding, ratio=ratio
) | null |
20,632 | import argparse
import torch
from peft import PeftModel
from transformers import AutoTokenizer, AutoModelForCausalLM
def apply_lora(base_model_path, target_model_path, lora_path):
print(f"Loading the base model from {base_model_path}")
base = AutoModelForCausalLM.from_pretrained(
base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True
)
base_tokenizer = AutoTokenizer.from_pretrained(base_model_path, use_fast=False)
print(f"Loading the LoRA adapter from {lora_path}")
lora_model = PeftModel.from_pretrained(
base,
lora_path,
# torch_dtype=torch.float16
)
print("Applying the LoRA")
model = lora_model.merge_and_unload()
print(f"Saving the target model to {target_model_path}")
model.save_pretrained(target_model_path)
base_tokenizer.save_pretrained(target_model_path) | null |
20,633 | import argparse
import gc
import glob
import json
import os
import shutil
import tempfile
from huggingface_hub import snapshot_download
import torch
from torch import nn
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig
GB = 1 << 30
def split_files(model_path, tmp_path, split_size):
if not os.path.exists(model_path):
model_path = snapshot_download(repo_id=model_path)
if not os.path.exists(tmp_path):
os.makedirs(tmp_path)
file_pattern = os.path.join(model_path, "pytorch_model-*.bin")
files = glob.glob(file_pattern)
part = 0
try:
for file_path in tqdm(files):
state_dict = torch.load(file_path)
new_state_dict = {}
current_size = 0
for name, param in state_dict.items():
param_size = param.numel() * param.element_size()
if current_size + param_size > split_size:
new_file_name = f"pytorch_model-{part}.bin"
new_file_path = os.path.join(tmp_path, new_file_name)
torch.save(new_state_dict, new_file_path)
current_size = 0
new_state_dict = None
gc.collect()
new_state_dict = {}
part += 1
new_state_dict[name] = param
current_size += param_size
new_file_name = f"pytorch_model-{part}.bin"
new_file_path = os.path.join(tmp_path, new_file_name)
torch.save(new_state_dict, new_file_path)
new_state_dict = None
gc.collect()
new_state_dict = {}
part += 1
except Exception as e:
print(f"An error occurred during split_files: {e}")
shutil.rmtree(tmp_path)
raise
def apply_delta_low_cpu_mem(base_model_path, target_model_path, delta_path):
delta_tokenizer = AutoTokenizer.from_pretrained(delta_path, use_fast=False)
delta_config = AutoConfig.from_pretrained(delta_path)
if os.path.exists(target_model_path):
shutil.rmtree(target_model_path)
os.makedirs(target_model_path)
split_size = 4 * GB
with tempfile.TemporaryDirectory() as tmp_base_path, tempfile.TemporaryDirectory() as tmp_delta_path:
print(f"Split files for the base model to {tmp_base_path}")
split_files(base_model_path, tmp_base_path, split_size)
print(f"Split files for the delta weights to {tmp_delta_path}")
split_files(delta_path, tmp_delta_path, split_size)
base_pattern = os.path.join(tmp_base_path, "pytorch_model-*.bin")
base_files = glob.glob(base_pattern)
delta_pattern = os.path.join(tmp_delta_path, "pytorch_model-*.bin")
delta_files = glob.glob(delta_pattern)
delta_state_dict = torch.load(delta_files[0])
print("Applying the delta")
weight_map = {}
total_size = 0
for i, base_file in tqdm(enumerate(base_files)):
state_dict = torch.load(base_file)
file_name = f"pytorch_model-{i}.bin"
for name, param in state_dict.items():
if name not in delta_state_dict:
for delta_file in delta_files:
delta_state_dict = torch.load(delta_file)
gc.collect()
if name in delta_state_dict:
break
state_dict[name] += delta_state_dict[name]
weight_map[name] = file_name
total_size += param.numel() * param.element_size()
gc.collect()
torch.save(state_dict, os.path.join(target_model_path, file_name))
with open(
os.path.join(target_model_path, "pytorch_model.bin.index.json"), "w"
) as f:
json.dump(
{"weight_map": weight_map, "metadata": {"total_size": total_size}}, f
)
print(f"Saving the target model to {target_model_path}")
delta_tokenizer.save_pretrained(target_model_path)
delta_config.save_pretrained(target_model_path) | null |
20,634 | import argparse
import gc
import glob
import json
import os
import shutil
import tempfile
from huggingface_hub import snapshot_download
import torch
from torch import nn
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig
def apply_delta(base_model_path, target_model_path, delta_path):
print(f"Loading the delta weights from {delta_path}")
delta_tokenizer = AutoTokenizer.from_pretrained(delta_path, use_fast=False)
delta = AutoModelForCausalLM.from_pretrained(
delta_path, torch_dtype=torch.float16, low_cpu_mem_usage=True
)
print(f"Loading the base model from {base_model_path}")
base = AutoModelForCausalLM.from_pretrained(
base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True
)
print("Applying the delta")
for name, param in tqdm(base.state_dict().items(), desc="Applying delta"):
assert name in delta.state_dict()
param.data += delta.state_dict()[name]
print(f"Saving the target model to {target_model_path}")
base.save_pretrained(target_model_path)
delta_tokenizer.save_pretrained(target_model_path) | null |
20,635 | from collections import namedtuple, OrderedDict
from typing import List
ModelInfo = namedtuple("ModelInfo", ["simple_name", "link", "description"])
model_info = OrderedDict()
def register_model_info(
full_names: List[str], simple_name: str, link: str, description: str
):
info = ModelInfo(simple_name, link, description)
for full_name in full_names:
model_info[full_name] = info | null |
20,636 | import argparse
import torch
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModelForCausalLM
def make_delta(base_model_path, target_model_path, delta_path):
print(f"Loading the base model from {base_model_path}")
base = AutoModelForCausalLM.from_pretrained(
base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True
)
print(f"Loading the target model from {target_model_path}")
target = AutoModelForCausalLM.from_pretrained(
target_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True
)
target_tokenizer = AutoTokenizer.from_pretrained(target_model_path, use_fast=False)
print("Calculating the delta")
for name, param in tqdm(target.state_dict().items(), desc="Calculating delta"):
assert name in base.state_dict()
param.data -= base.state_dict()[name]
print(f"Saving the delta to {delta_path}")
if args.hub_repo_id:
kwargs = {"push_to_hub": True, "repo_id": args.hub_repo_id}
else:
kwargs = {}
target.save_pretrained(delta_path, **kwargs)
target_tokenizer.save_pretrained(delta_path, **kwargs) | null |
20,637 | import argparse
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
def convert_fp16(in_checkpoint, out_checkpoint):
tokenizer = AutoTokenizer.from_pretrained(in_checkpoint, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
in_checkpoint, torch_dtype=torch.float16, low_cpu_mem_usage=True
)
model.save_pretrained(out_checkpoint)
tokenizer.save_pretrained(out_checkpoint) | null |
20,638 |
def send_request(ques):
# url = 'https://qanything-test.site.youdao.com/api/local_doc_qa/local_doc_chat'
url = 'http://localhost:8777/api/local_doc_qa/local_doc_chat'
headers = {
'content-type': 'application/json'
}
data = {
"user_id": "liujx_265",
"kb_ids": ["KBf652e9e379c546f1894597dcabdc8e47"],
"question": ques,
"rerank": False,
"history": []
}
try:
start_time = time.time()
response = requests.post(url=url, headers=headers, json=data, timeout=60)
end_time = time.time()
response_times.append(end_time - start_time)
res = response.json()
print(res['response'])
print(f"响应状态码: {response.status_code}, 响应时间: {end_time - start_time}秒")
except Exception as e:
print(f"请求发送失败: {e}") | null |
20,639 | import os
import sys
import aiohttp
import asyncio
import time
import re
def remove_full_width_characters(s):
# 匹配全角字符的正则表达式
pattern = re.compile(r'[\uFF00-\uFFEF]')
# 替换字符串中的全角字符为空字符串
return pattern.sub('', s) | null |
20,640 | import os
import sys
import aiohttp
import asyncio
import time
import re
file_folder = sys.argv[1]
kb_id = sys.argv[2]
support_end = ('.md', '.txt', '.pptx', '.jpg', '.jpeg', '.png', '.docx', '.xlsx', '.eml', '.csv', '.pdf')
files = []
for root, dirs, file_names in os.walk(file_folder):
for file_name in file_names:
# print(file_name)
if file_name.endswith(support_end):
file_path = os.path.join(root, file_name)
files.append(file_path)
print(len(files))
response_times = []
iohttp.ClientTimeout(total=300)
send_request_with_semaphore(semaphore, round_, files):
async with semaphore:
await send_request(round_, files)
if __name__ == '__main__':
asyncio.run(main())
async def create_tasks_by_size_limit(files, size_limit_mb, max_concurrent_tasks=4):
tasks = []
size_limit = size_limit_mb * 1024 * 1024 # 转换MB到字节
current_batch = []
current_size = 0
semaphore = asyncio.Semaphore(max_concurrent_tasks) # 创建 Semaphore 对象
round_ = 0
for file in files:
file_size = os.path.getsize(file) # 获取文件大小
if current_size + file_size > size_limit and current_batch:
# 当前批次添加文件后会超出大小限制, 发送当前批次
task = asyncio.create_task(send_request_with_semaphore(semaphore, round_, current_batch))
round_ += 1
tasks.append(task)
current_batch = [] # 重置批次
current_size = 0 # 重置累计大小
current_batch.append(file)
current_size += file_size
if current_batch:
# 发送最后一批次,如果有的话
task = asyncio.create_task(send_request_with_semaphore(semaphore, round_, current_batch))
tasks.append(task)
await asyncio.gather(*tasks) | null |
20,641 | import os
import json
import requests
import time
import random
import string
import hashlib
import argparse
import concurrent.futures
import numpy as np
import pandas as pd
from tqdm import tqdm
import random
import threading
print("response", response)
print(response.iter_lines)
def test_stream():
data_raw = {
"kb_ids": [
"KBf46828db208c4289a120a34f0fc96147",
"KBc2440f13e98f4736b5ef81cfaebef3a9",
"KBb78af28c73f74fb4ae6ad44b3c53302f",
"KB6c2b097d83be430ab809e361fa8dcc8b",
"KB69331d593f5b4b5bb555a0ea1b145e5b",
"KB3cdc79f8c8d24a14bffd27e6570c33da"
],
"question": "西南交通大学是211院校吗",
"user_id": "liujx_265",
"streaming": True,
"rerank": True,
"history": []
}
for i, chunk in enumerate(stream_requests(data_raw)):
if chunk:
chunkstr = chunk.decode("utf-8")[6:]
chunkjs = json.loads(chunkstr)
print(chunkjs) | null |
20,642 | import os
import json
import requests
import time
import random
import string
import hashlib
import argparse
import concurrent.futures
import numpy as np
import pandas as pd
from tqdm import tqdm
import random
import threading
def measure_latency(ques, output_file, is_stream=False):
start_time = time.time()
if is_stream:
_ = list(stream_requests(ques, output_file))
else:
no_stream_requests(ques, output_file)
end_time = time.time()
return end_time - start_time
def perform_load_test(concurrency, total_requests, questions, output_file, is_stream=False):
latencies = []
questions = ["什么是三大专项", "江苏高三物生地,军校能不能报,哪些专业不能报", "山东文科在江苏怎么选学校", "东南大学化学工程与工艺,生物科学,制药工程分流哪个好?", "男生高三物化地,辽宁,学日语好选学校吗"]
#questions = ["什么是三大专项"] * 5
with concurrent.futures.ThreadPoolExecutor(max_workers=concurrency) as executor:
future_to_request = {executor.submit(measure_latency, random.choice(questions), output_file, is_stream): i for i in range(total_requests)}
for future in concurrent.futures.as_completed(future_to_request):
try:
latency = future.result()
latencies.append(latency)
except Exception as e:
print(f"请求执行异常: {e}")
# 计算统计数据
p99 = np.percentile(latencies, 99)
p95 = np.percentile(latencies, 95)
total_time = sum(latencies)
qps = total_requests / total_time
return latencies, p99, p95, qps | null |
20,643 | ls import cycle
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, ProgbarLogger
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm
maxlen = 128
batch_size = 16
config_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/bert_config.json'
checkpoint_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/pytorch_model.bin'
dict_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
= Tokenizer(dict_path, do_lower_case=True)
ataset(ListDataset):
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
aloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.train.data']), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.valid.data']), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.test.data']), batch_size=batch_size, collate_fn=collate_fn)
eModel):
model = Model().to(device)
CrossEntropyLoss(),
optimizer=optim.Adam(model.parameters(), lr=2e-5),
)
e(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total
if __name__ == '__main__':
model.fit(train_dataloader, epochs=20, steps_per_epoch=100)
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten() | null |
20,644 | ls import cycle
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, ProgbarLogger
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm
maxlen = 128
batch_size = 16
config_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/bert_config.json'
checkpoint_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/pytorch_model.bin'
dict_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
= Tokenizer(dict_path, do_lower_case=True)
ataset(ListDataset):
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
aloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.train.data']), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.valid.data']), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.test.data']), batch_size=batch_size, collate_fn=collate_fn)
eModel):
model = Model().to(device)
CrossEntropyLoss(),
optimizer=optim.Adam(model.parameters(), lr=2e-5),
)
e(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total
if __name__ == '__main__':
model.fit(train_dataloader, epochs=20, steps_per_epoch=100)
def evaluate(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total | null |
20,645 | from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, Logger, Tensorboard, text_segmentate, ListDataset, Evaluator, EarlyStopping, seed_everything, get_pool_emb
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from torchinfo import summary
import os
maxlen = 256
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def collate_fn(batch):
def inference(texts):
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten() | null |
20,646 | from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, Logger, Tensorboard, text_segmentate, ListDataset, Evaluator, EarlyStopping, seed_everything, get_pool_emb
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from torchinfo import summary
import os
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
def inference(texts):
'''单条样本推理
'''
for text in texts:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
token_ids = torch.tensor(token_ids, dtype=torch.long, device=device)[None, :]
segment_ids = torch.tensor(segment_ids, dtype=torch.long, device=device)[None, :]
logit = model.predict([token_ids, segment_ids])
y_pred = torch.argmax(torch.softmax(logit, dim=-1)).cpu().numpy()
print(text, ' ----> ', y_pred)
def acc(y_pred, y_true):
y_pred_tmp = torch.argmax(y_pred, dim=-1).detach() # 这里detach从计算图中去除
return torch.sum(y_pred_tmp.eq(y_true)).item() / y_true.numel() | null |
20,647 | from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, Logger, Tensorboard, text_segmentate, ListDataset, Evaluator, EarlyStopping, seed_everything, get_pool_emb
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from torchinfo import summary
import os
maxlen = 256
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
model = Model().to(device)
model.compile(
loss=nn.CrossEntropyLoss(),
optimizer=optimizer,
metrics={'acc': acc}
)
def inference(texts):
'''单条样本推理
'''
for text in texts:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
token_ids = torch.tensor(token_ids, dtype=torch.long, device=device)[None, :]
segment_ids = torch.tensor(segment_ids, dtype=torch.long, device=device)[None, :]
logit = model.predict([token_ids, segment_ids])
y_pred = torch.argmax(torch.softmax(logit, dim=-1)).cpu().numpy()
print(text, ' ----> ', y_pred)
The provided code snippet includes necessary dependencies for implementing the `inference` function. Write a Python function `def inference(texts)` to solve the following problem:
单条样本推理
Here is the function:
def inference(texts):
'''单条样本推理
'''
for text in texts:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
token_ids = torch.tensor(token_ids, dtype=torch.long, device=device)[None, :]
segment_ids = torch.tensor(segment_ids, dtype=torch.long, device=device)[None, :]
logit = model.predict([token_ids, segment_ids])
y_pred = torch.argmax(torch.softmax(logit, dim=-1)).cpu().numpy()
print(text, ' ----> ', y_pred) | 单条样本推理 |
20,648 | AutoModelForSequenceClassification
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import BaseModel
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
maxlen = 128
batch_size = 16
dict_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
= Tokenizer(dict_path, do_lower_case=True)
ataset(ListDataset):
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
aloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.train.data']), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.valid.data']), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.test.data']), batch_size=batch_size, collate_fn=collate_fn)
model = Model().to(device)
CrossEntropyLoss(),
optimizer=optim.Adam(model.parameters(), lr=2e-5),
grad_accumulation_steps=2,
metrics=['accuracy']
)
e(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total
class Evaluator(Callback):
"""评估与保存
"""
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=20, steps_per_epoch=100, callbacks=[evaluator])
else:
model.load_weights('best_model.pt')
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten() | null |
20,649 | AutoModelForSequenceClassification
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import BaseModel
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
maxlen = 128
batch_size = 16
dict_path = 'E:/pretrain_ckpt/bert/google@chinese_L-12_H-768_A-12/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
= Tokenizer(dict_path, do_lower_case=True)
ataset(ListDataset):
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
aloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.train.data']), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.valid.data']), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.test.data']), batch_size=batch_size, collate_fn=collate_fn)
model = Model().to(device)
CrossEntropyLoss(),
optimizer=optim.Adam(model.parameters(), lr=2e-5),
grad_accumulation_steps=2,
metrics=['accuracy']
)
e(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total
class Evaluator(Callback):
"""评估与保存
"""
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=20, steps_per_epoch=100, callbacks=[evaluator])
else:
model.load_weights('best_model.pt')
def evaluate(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total | null |
20,650 | import os
import json
import shutil
def replace_file(local_path, convert_path, replace=False):
if replace:
shutil.copy(convert_path, local_path) | null |
20,651 | from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset
from bert4torch.layers import TplinkerHandshakingKernel
from tqdm import tqdm
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torch.optim as optim
import numpy as np
maxlen = 64
device = 'cuda' if torch.cuda.is_available() else 'cpu'
d, id2predicate = {}, {}
def trans_ij2k(seq_len, i, j):
def search(pattern, sequence):
def collate_fn(batch):
def collate_fn(batch):
# batch_entity_labels: [btz, pair_len]
# batch_head_labels: [btz, rel_size, pair_len]
# batch_tail_labels: [btz, rel_size, pair_len]
batch_token_ids = []
batch_entity_labels = []
batch_head_labels = []
batch_tail_labels = []
batch_mask = []
for d in batch:
token_ids = tokenizer.encode(d['text'])[0][1:-1][:maxlen] # 这里要限制取前max_len个
map_ij2k = {(i, j): trans_ij2k(len(token_ids), i, j) for i in range(len(token_ids)) for j in range(len(token_ids)) if j >= i}
mask = [0 if token_ids[j]==0 else 1 for i in range(len(token_ids)) for j in range(len(token_ids)) if j >= i]
pair_len = len(token_ids) * (len(token_ids)+1) // 2
entity_labels = np.zeros(pair_len)
head_labels = np.zeros((len(predicate2id), pair_len))
tail_labels = np.zeros((len(predicate2id), pair_len))
# 整理三元组 {s: [(o, p)]}
for s, p, o in d['spo_list']:
s = tokenizer.encode(s)[0][1:-1]
p = predicate2id[p]
o = tokenizer.encode(o)[0][1:-1]
sh = search(s, token_ids) # 这里超过长度就会找不到
oh = search(o, token_ids)
if sh != -1 and oh != -1:
st, ot = sh+len(s)-1, oh+len(o)-1
entity_labels[map_ij2k[sh, st]] = 1
entity_labels[map_ij2k[oh, ot]] = 1
if sh <= oh:
head_labels[p, map_ij2k[sh, oh]] = 1
else:
head_labels[p, map_ij2k[oh, sh]] = 2
if st <= ot:
tail_labels[p, map_ij2k[st, ot]] = 1
else:
tail_labels[p, map_ij2k[ot, st]] = 2
batch_token_ids.append(token_ids)
batch_entity_labels.append(entity_labels)
batch_head_labels.append(head_labels)
batch_tail_labels.append(tail_labels)
batch_mask.append(mask)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_entity_labels = torch.tensor(sequence_padding(batch_entity_labels), dtype=torch.long, device=device)
batch_head_labels = torch.tensor(sequence_padding(batch_head_labels, seq_dims=2), dtype=torch.long, device=device)
batch_tail_labels = torch.tensor(sequence_padding(batch_tail_labels, seq_dims=2), dtype=torch.long, device=device)
batch_mask = torch.tensor(sequence_padding(batch_mask), dtype=torch.long, device=device)
return [batch_token_ids], [batch_entity_labels, batch_head_labels, batch_tail_labels, batch_mask] | null |
20,652 | json
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset
from bert4torch.layers import TplinkerHandshakingKernel
from tqdm import tqdm
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torch.optim as optim
import numpy as np
d, id2predicate = {}, {}
with open('E:/data/corpus/relation_extraction/BD_Knowledge_Extraction/all_50_schemas', encoding='utf-8') as f:
for l in f:
l = json.loads(l)
if l['predicate'] not in predicate2id:
id2predicate[len(predicate2id)] = l['predicate']
predicate2id[l['predicate']] = len(predicate2id)
def trans_ij2k(seq_len, i, j):
'''把第i行,第j列转化成上三角flat后的序号
'''
if (i > seq_len - 1) or (j > seq_len - 1) or (i > j):
return 0
return int(0.5*(2*seq_len-i+1)*i+(j-i))
def search(pattern, sequence):
"""从sequence中寻找子串pattern
如果找到,返回第一个下标;否则返回-1。
"""
n = len(pattern)
if isinstance(sequence, torch.Tensor):
sequence = sequence.cpu().tolist()
for i in range(len(sequence)):
if sequence[i:i + n] == pattern:
return i
return -1
def collate_fn(batch):
# batch_entity_labels: [btz, pair_len]
# batch_head_labels: [btz, rel_size, pair_len]
# batch_tail_labels: [btz, rel_size, pair_len]
batch_token_ids = []
batch_entity_labels = []
batch_head_labels = []
batch_tail_labels = []
batch_mask = []
for d in batch:
token_ids = tokenizer.encode(d['text'])[0][1:-1][:maxlen] # 这里要限制取前max_len个
map_ij2k = {(i, j): trans_ij2k(len(token_ids), i, j) for i in range(len(token_ids)) for j in range(len(token_ids)) if j >= i}
mask = [0 if token_ids[j]==0 else 1 for i in range(len(token_ids)) for j in range(len(token_ids)) if j >= i]
pair_len = len(token_ids) * (len(token_ids)+1) // 2
entity_labels = np.zeros(pair_len)
head_labels = np.zeros((len(predicate2id), pair_len))
tail_labels = np.zeros((len(predicate2id), pair_len))
# 整理三元组 {s: [(o, p)]}
for s, p, o in d['spo_list']:
s = tokenizer.encode(s)[0][1:-1]
p = predicate2id[p]
o = tokenizer.encode(o)[0][1:-1]
sh = search(s, token_ids) # 这里超过长度就会找不到
oh = search(o, token_ids)
if sh != -1 and oh != -1:
st, ot = sh+len(s)-1, oh+len(o)-1
entity_labels[map_ij2k[sh, st]] = 1
entity_labels[map_ij2k[oh, ot]] = 1
if sh <= oh:
head_labels[p, map_ij2k[sh, oh]] = 1
else:
head_labels[p, map_ij2k[oh, sh]] = 2
if st <= ot:
tail_labels[p, map_ij2k[st, ot]] = 1
else:
tail_labels[p, map_ij2k[ot, st]] = 2
batch_token_ids.append(token_ids)
batch_entity_labels.append(entity_labels)
batch_head_labels.append(head_labels)
batch_tail_labels.append(tail_labels)
batch_mask.append(mask)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_entity_labels = torch.tensor(sequence_padding(batch_entity_labels), dtype=torch.long, device=device)
batch_head_labels = torch.tensor(sequence_padding(batch_head_labels, seq_dims=2), dtype=torch.long, device=device)
batch_tail_labels = torch.tensor(sequence_padding(batch_tail_labels, seq_dims=2), dtype=torch.long, device=device)
batch_mask = torch.tensor(sequence_padding(batch_mask), dtype=torch.long, device=device)
return [batch_token_ids], [batch_entity_labels, batch_head_labels, batch_tail_labels, batch_mask]
def extract_spoes(text):
"""抽取输入text所包含的三元组
"""
def get_spots_fr_shaking_tag(shaking_tag):
'''解析关系
'''
spots = []
for shaking_inds in shaking_tag.nonzero():
rel_id = shaking_inds[0].item()
tag_id = shaking_tag[rel_id][shaking_inds[1]].item()
matrix_inds = map_k2ij[shaking_inds[1].item()]
# 保证前面是subject,后面是object
if tag_id == 1:
spot = (rel_id, matrix_inds[0], matrix_inds[1])
elif tag_id == 2:
spot = (rel_id, matrix_inds[1], matrix_inds[0])
spots.append(spot)
return spots
tokens = tokenizer.tokenize(text)[1:-1]
mapping = tokenizer.rematch(text, tokens)
token_ids = tokenizer.encode(text)[0][1:-1]
token_ids_ts = torch.tensor([token_ids], dtype=torch.long, device=device)
map_ij2k = {(i, j): trans_ij2k(len(token_ids), i, j) for i in range(len(token_ids)) for j in range(len(token_ids)) if j >= i}
map_k2ij = {v: k for k, v in map_ij2k.items()}
outputs = model.predict([token_ids_ts])
outputs = [o[0].argmax(dim=-1) for o in outputs]
# 抽取entity
ent_matrix_spots = set()
ent_text = set()
for shaking_ind in outputs[0].nonzero():
shaking_ind_ = shaking_ind[0].item()
# tag_id = outputs[0][shaking_ind_]
matrix_inds = map_k2ij[shaking_ind_]
spot = (matrix_inds[0], matrix_inds[1])
if (spot[0] < len(mapping)) and (spot[1] < len(mapping)): # 实体起始在mapping范围内
ent_matrix_spots.add(spot)
ent_text.add(text[mapping[spot[0]][0]:mapping[spot[1]][-1] + 1])
# 识别对应的predicate
head_rel_matrix_spots = get_spots_fr_shaking_tag(outputs[1])
tail_rel_matrix_spots = get_spots_fr_shaking_tag(outputs[2])
spoes = []
for rel_h, sh, oh in head_rel_matrix_spots:
for rel_t, st, ot in tail_rel_matrix_spots:
# 如果关系相同,且(sh, st)和(oh, ot)都在entity_maxtrix_spots中
if (rel_h == rel_t) and ((sh, st) in ent_matrix_spots) and ((oh, ot) in ent_matrix_spots):
spoes.append((text[mapping[sh][0]:mapping[st][-1] + 1], id2predicate[rel_h], text[mapping[oh][0]:mapping[ot][-1] + 1]))
return spoes, token_ids, ent_text
class SPO(tuple):
"""用来存三元组的类
表现跟tuple基本一致,只是重写了 __hash__ 和 __eq__ 方法,
使得在判断两个三元组是否等价时容错性更好。
"""
def __init__(self, spo):
self.spox = (tuple(tokenizer.tokenize(spo[0])), spo[1], tuple(tokenizer.tokenize(spo[2])))
def __hash__(self):
return self.spox.__hash__()
def __eq__(self, spo):
return self.spox == spo.spox
The provided code snippet includes necessary dependencies for implementing the `evaluate` function. Write a Python function `def evaluate(data)` to solve the following problem:
评估函数,计算f1、precision、recall
Here is the function:
def evaluate(data):
"""评估函数,计算f1、precision、recall
"""
X, Y, Z = 0, 1e-10, 1e-10
E1, E2 = 0, 1e-10
f = open('dev_pred.json', 'w', encoding='utf-8')
pbar = tqdm()
for d in data:
spoes, token_ids, ent_text_pred = extract_spoes(d['text'])
# spo_list是用来根据maxlen删减的
spo_list = []
for s, p, o in d['spo_list']:
s_ = tokenizer.encode(s)[0][1:-1]
o_ = tokenizer.encode(o)[0][1:-1]
sh = search(s_, token_ids) # 这里超过长度就会找不到
oh = search(o_, token_ids)
if sh != -1 and oh != -1:
spo_list.append((s, p, o))
# 计算三元组的f1值
R = set([SPO(spo) for spo in spoes])
T = set([SPO(spo) for spo in spo_list])
X += len(R & T)
Y += len(R)
Z += len(T)
f1, precision, recall = 2 * X / (Y + Z), X / Y, X / Z
# 计算实体的指标
ent_text_truth = set([spo[0] for spo in spo_list] + [spo[-1] for spo in spo_list])
E1 += len(ent_text_pred & ent_text_truth)
E2 += len(ent_text_truth)
E_acc = E1 / E2
# 计算entity_matrix, head_matrix,tail_matrix的accuracy
pbar.update()
pbar.set_description('f1: %.5f, precision: %.5f, recall: %.5f, ent_acc: %.5f' % (f1, precision, recall, E_acc))
s = json.dumps({'text': d['text'], 'spo_list': list(T), 'spo_list_pred': list(R),
'new': list(R - T), 'lack': list(T - R)}, ensure_ascii=False, indent=4)
f.write(s + '\n')
pbar.close()
f.close()
return f1, precision, recall | 评估函数,计算f1、precision、recall |
20,653 | import build_transformer_model
from bert4torch.snippets import sequence_padding
from bert4torch.callbacks import Callback
from bert4torch.optimizers import get_linear_schedule_with_warmup
from torch.utils.data import Dataset
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
import json
import os
import shelve
import random
import time
batch_size = 7
def collate_fn(batch):
batch_token_ids, batch_labels = [], []
for item in batch:
batch_token_ids.append(item['input_ids'])
batch_labels.append(item['masked_lm_labels'])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids], batch_labels
files_training_data = os.listdir(dir_training_data)
files_training_data = [file.split(".")[0] for file in files_training_data if "train" in file]
data = [i for i in set(files_training_data) if files_training_data.count(i)==4]
if files_training_data:
file_train = random.choice(files_training_data)
for suffix in [".bak", ".dat", ".dir", ".json"]:
file_old = os.path.join(dir_training_data, file_train + suffix)
file_new = os.path.join(dir_training_data, task_name + suffix)
os.renames(file_old, file_new)
cur_load_file = file_new.split(".")[0]
train_dataloader = DataLoader(MyDataset(cur_load_file), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
break
else:
sleep_seconds = 300
print(f"No training data! Sleep {sleep_seconds}s!")
time.sleep(sleep_seconds)
continu train_dataloader
train_dataloader =
def get_train_dataloader():
while True:
# prepare dataset
files_training_data = os.listdir(dir_training_data)
files_training_data = [file.split(".")[0] for file in files_training_data if "train" in file]
# 防止使用到正在生成的文件
files_training_data = [i for i in set(files_training_data) if files_training_data.count(i)==4]
if files_training_data:
file_train = random.choice(files_training_data)
for suffix in [".bak", ".dat", ".dir", ".json"]:
file_old = os.path.join(dir_training_data, file_train + suffix)
file_new = os.path.join(dir_training_data, task_name + suffix)
os.renames(file_old, file_new)
cur_load_file = file_new.split(".")[0]
train_dataloader = DataLoader(MyDataset(cur_load_file), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
break
else:
sleep_seconds = 300
print(f"No training data! Sleep {sleep_seconds}s!")
time.sleep(sleep_seconds)
continue
return train_dataloader | null |
20,654 | import json, glob, re
from tqdm import tqdm
import collections
import gc
import shelve
import time
import os
import random
import jieba
The provided code snippet includes necessary dependencies for implementing the `some_texts` function. Write a Python function `def some_texts()` to solve the following problem:
挑选语料
Here is the function:
def some_texts():
'''挑选语料
'''
files_corpus = glob.glob(f'{dir_corpus}/*/*') # 根据目录结构自行调整
file_corpus = random.choice(files_corpus) # 随机挑选一篇文章
count, texts = 0, []
with open(file_corpus, encoding='utf-8') as f:
for l in tqdm(f, desc=f'Load data from {file_corpus}'):
l = l.strip()
texts.extend(re.findall(u'.*?[\n。]+', l))
count += 1
if count == 10: # 10篇文章合在一起再处理
yield texts
count, texts = 0, []
if texts:
yield texts | 挑选语料 |
20,655 | import json, glob, re
from tqdm import tqdm
import collections
import gc
import shelve
import time
import os
import random
import jieba
jieba.initialize()
def word_segment(text):
return jieba.lcut(text) | null |
20,656 | import numpy as np
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
import torch.nn.functional as F
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, ListDataset, text_segmentate, truncate_sequences, get_pool_emb
from bert4torch.generation import AutoRegressiveDecoder
from bert4torch.callbacks import Callback
from bert4torch.tokenizers import Tokenizer
import jieba
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def split(text):
def masked_encode(text):
def collate_fn(batch):
batch_token_ids, batch_segment_ids = [], []
for d in batch:
text, synonyms = d['text'], d['synonyms']
text, synonym = np.random.permutation([text] + synonyms)[:2]
text, synonym = split(text)[0], split(synonym)[0]
for _ in range(2):
if np.random.random() < 0.5:
text_ids = masked_encode(text)[0]
else:
text_ids = tokenizer.encode(text)[0]
synonym_ids = tokenizer.encode(synonym)[0][1:]
truncate_sequences(maxlen * 2, -2, text_ids, synonym_ids)
token_ids = text_ids + synonym_ids
segment_ids = [0] * len(text_ids) + [1] * len(synonym_ids)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
text, synonym = synonym, text
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], [batch_token_ids, batch_segment_ids] | null |
20,657 | import numpy as np
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
import torch.nn.functional as F
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, ListDataset, text_segmentate, truncate_sequences, get_pool_emb
from bert4torch.generation import AutoRegressiveDecoder
from bert4torch.callbacks import Callback
from bert4torch.tokenizers import Tokenizer
import jieba
def gen_synonyms(text, n=100, k=20):
""""含义: 产生sent的n个相似句,然后返回最相似的k个。
做法:用seq2seq生成,并用encoder算相似度并排序。
效果:
>>> gen_synonyms(u'微信和支付宝哪个好?')
[
u'微信和支付宝,哪个好?',
u'微信和支付宝哪个好',
u'支付宝和微信哪个好',
u'支付宝和微信哪个好啊',
u'微信和支付宝那个好用?',
u'微信和支付宝哪个好用',
u'支付宝和微信那个更好',
u'支付宝和微信哪个好用',
u'微信和支付宝用起来哪个好?',
u'微信和支付宝选哪个好',
]
"""
r = synonyms_generator.generate(text, n)
r = [i for i in set(r) if i != text] # 不和原文相同
r = [text] + r
Z = cal_sen_emb(r)
Z /= (Z**2).sum(dim=1, keepdims=True)**0.5
argsort = torch.matmul(Z[1:], -Z[0]).argsort()
return [r[i + 1] for i in argsort[:k]]
The provided code snippet includes necessary dependencies for implementing the `just_show` function. Write a Python function `def just_show(some_samples)` to solve the following problem:
随机观察一些样本的效果
Here is the function:
def just_show(some_samples):
"""随机观察一些样本的效果
"""
S = [np.random.choice(some_samples) for _ in range(3)]
for s in S:
try:
print(u'原句子:%s' % s)
print(u'同义句子:', gen_synonyms(s, 10, 10))
print()
except:
pass | 随机观察一些样本的效果 |
20,658 | from torch import nn, optim
from torch.utils.data import DataLoader
import torch.nn.functional as F
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, ListDataset, text_segmentate
from bert4torch.snippets import truncate_sequences, get_pool_emb
from bert4torch.callbacks import Callback
from bert4torch.tokenizers import Tokenizer
import json
import glob
The provided code snippet includes necessary dependencies for implementing the `split` function. Write a Python function `def split(text)` to solve the following problem:
分割句子
Here is the function:
def split(text):
"""分割句子
"""
seps, strips = u'\n。!?!?;;,, ', u';;,, '
return text_segmentate(text, int(maxlen * 1.2), seps, strips) | 分割句子 |
20,659 | from torch import nn, optim
from torch.utils.data import DataLoader
import torch.nn.functional as F
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, ListDataset, text_segmentate
from bert4torch.snippets import truncate_sequences, get_pool_emb
from bert4torch.callbacks import Callback
from bert4torch.tokenizers import Tokenizer
import json
import glob
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text1, text2, label in batch:
for text in [text1, text2]:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels | null |
20,660 | import numpy as np
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
import torch.nn.functional as F
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, ListDataset, text_segmentate, get_pool_emb, truncate_sequences
from bert4torch.generation import AutoRegressiveDecoder
from bert4torch.callbacks import Callback
from bert4torch.tokenizers import Tokenizer
import jieba
def split(text):
"""分割句子
"""
seps, strips = u'\n。!?!?;;,, ', u';;,, '
return text_segmentate(text, maxlen * 1.2, seps, strips)
def masked_encode(text):
"""wwm随机mask
"""
words = jieba.lcut(text)
rands = np.random.random(len(words))
source, target = [tokenizer._token_start_id], [0]
for r, w in zip(rands, words):
ids = tokenizer.encode(w)[0][1:-1]
if r < 0.15 * 0.8:
source.extend([tokenizer._token_mask_id] * len(ids))
target.extend(ids)
elif r < 0.15 * 0.9:
source.extend(ids)
target.extend(ids)
elif r < 0.15:
source.extend(
np.random.choice(tokenizer._vocab_size - 1, size=len(ids)) + 1
)
target.extend(ids)
else:
source.extend(ids)
target.extend([0] * len(ids))
source = source[:maxlen - 1] + [tokenizer._token_end_id]
target = target[:maxlen - 1] + [0]
return source, target
batch_token_ids, batch_segment_ids = [], []
batch_sim_token_ids, batch_sim_segment_ids = [], []
for d in batch:
text, synonyms = d['text'], d['synonyms']
text, synonym = np.random.permutation([text] + synonyms)[:2]
text, synonym = split(text)[0], split(synonym)[0]
for _ in range(2):
if np.random.random() < 0.5:
text_ids = masked_encode(text)[0]
else:
text_ids = tokenizer.encode(text)[0]
synonym_ids = tokenizer.encode(synonym)[0][1:]
truncate_sequences(maxlen * 2, -2, text_ids, synonym_ids)
token_ids = text_ids + synonym_ids
segment_ids = [0] * len(text_ids) + [1] * len(synonym_ids)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
# ==== 蒸馏用:开始 ====
token_ids, segment_ids = sim_tokenizer.encode(text, maxlen=maxlen)
batch_sim_token_ids.append(token_ids)
batch_sim_segment_ids.append(segment_ids)
# ==== 蒸馏用:结束 ====
text, synonym = synonym, text
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
im_token_ids = torch.tensor(sequence_padding(batch_sim_token_ids), dtype=torch.long, device=device)
batch_sim_segment_ids = torch.tensor(sequence_padding(batch_sim_segment_ids), dtype=torch.long, device=device)
sim_vecs = simbert.predict([batch_sim_token_ids, batch_sim_segment_ids])[1]
sim_vecs /= (sim_vecs**2).sum(dim=-1, keepdims=True)**0.5
sims = torch.matmul(sim_vecs, sim_vecs.T)
[batch_token_ids, batch_segment_ids], [batch_token_ids, batch_segment_ids, sims
def collate_fn(batch):
batch_token_ids, batch_segment_ids = [], []
batch_sim_token_ids, batch_sim_segment_ids = [], []
for d in batch:
text, synonyms = d['text'], d['synonyms']
text, synonym = np.random.permutation([text] + synonyms)[:2]
text, synonym = split(text)[0], split(synonym)[0]
for _ in range(2):
if np.random.random() < 0.5:
text_ids = masked_encode(text)[0]
else:
text_ids = tokenizer.encode(text)[0]
synonym_ids = tokenizer.encode(synonym)[0][1:]
truncate_sequences(maxlen * 2, -2, text_ids, synonym_ids)
token_ids = text_ids + synonym_ids
segment_ids = [0] * len(text_ids) + [1] * len(synonym_ids)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
# ==== 蒸馏用:开始 ====
token_ids, segment_ids = sim_tokenizer.encode(text, maxlen=maxlen)
batch_sim_token_ids.append(token_ids)
batch_sim_segment_ids.append(segment_ids)
# ==== 蒸馏用:结束 ====
text, synonym = synonym, text
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
# ==== 蒸馏用:开始 ====
batch_sim_token_ids = torch.tensor(sequence_padding(batch_sim_token_ids), dtype=torch.long, device=device)
batch_sim_segment_ids = torch.tensor(sequence_padding(batch_sim_segment_ids), dtype=torch.long, device=device)
sim_vecs = simbert.predict([batch_sim_token_ids, batch_sim_segment_ids])[1]
sim_vecs /= (sim_vecs**2).sum(dim=-1, keepdims=True)**0.5
sims = torch.matmul(sim_vecs, sim_vecs.T)
# ==== 蒸馏用:结束 ====
return [batch_token_ids, batch_segment_ids], [batch_token_ids, batch_segment_ids, sims] | null |
20,661 | import numpy as np
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
import torch.nn.functional as F
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, ListDataset, text_segmentate, get_pool_emb, truncate_sequences
from bert4torch.generation import AutoRegressiveDecoder
from bert4torch.callbacks import Callback
from bert4torch.tokenizers import Tokenizer
import jieba
def split(text):
"""分割句子
"""
seps, strips = u'\n。!?!?;;,, ', u';;,, '
return text_segmentate(text, maxlen * 1.2, seps, strips)
def masked_encode(text):
"""wwm随机mask
"""
words = jieba.lcut(text)
rands = np.random.random(len(words))
source, target = [tokenizer._token_start_id], [0]
for r, w in zip(rands, words):
ids = tokenizer.encode(w)[0][1:-1]
if r < 0.15 * 0.8:
source.extend([tokenizer._token_mask_id] * len(ids))
target.extend(ids)
elif r < 0.15 * 0.9:
source.extend(ids)
target.extend(ids)
elif r < 0.15:
source.extend(
np.random.choice(tokenizer._vocab_size - 1, size=len(ids)) + 1
)
target.extend(ids)
else:
source.extend(ids)
target.extend([0] * len(ids))
source = source[:maxlen - 1] + [tokenizer._token_end_id]
target = target[:maxlen - 1] + [0]
return source, target
def gen_synonyms(text, n=100, k=20):
""""含义: 产生sent的n个相似句,然后返回最相似的k个。
做法:用seq2seq生成,并用encoder算相似度并排序。
效果:
>>> gen_synonyms(u'微信和支付宝哪个好?')
[
u'微信和支付宝,哪个好?',
u'微信和支付宝哪个好',
u'支付宝和微信哪个好',
u'支付宝和微信哪个好啊',
u'微信和支付宝那个好用?',
u'微信和支付宝哪个好用',
u'支付宝和微信那个更好',
u'支付宝和微信哪个好用',
u'微信和支付宝用起来哪个好?',
u'微信和支付宝选哪个好',
]
"""
r = synonyms_generator.generate(text, n)
r = [i for i in set(r) if i != text] # 不和原文相同
r = [text] + r
Z = cal_sen_emb(r)
Z /= (Z**2).sum(dim=1, keepdims=True)**0.5
argsort = torch.matmul(Z[1:], -Z[0]).argsort()
return [r[i + 1] for i in argsort[:k]]
The provided code snippet includes necessary dependencies for implementing the `just_show` function. Write a Python function `def just_show(some_samples)` to solve the following problem:
随机观察一些样本的效果
Here is the function:
def just_show(some_samples):
"""随机观察一些样本的效果
"""
S = [np.random.choice(some_samples) for _ in range(3)]
for s in S:
try:
print(u'原句子:%s' % s)
print(u'同义句子:', gen_synonyms(s, 10, 10))
print()
except:
pass | 随机观察一些样本的效果 |
20,662 | rch.models import build_transformer_model
from bert4torch.snippets import sequence_padding, ListDataset
from bert4torch.callbacks import Callback
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
import torch
from bert4torch.models import build_transformer_model
from bert4torch.tokenizers import Tokenizer
from bert4torch.generation import AutoRegressiveDecoder
import glob
= 256
batch_size = 8
epochs = 10000
th = 'E:/pretrain_ckpt/gpt/thu-coai@CDial-GPT_LCCC-base/'
config_path = root_path + 'bert4torch_config.json'
checkpoint_path = root_path + 'pytorch_model.bin'
dict_path = root_path + 'bert4torch_vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
tokenizer = Tokenizer(dict_path, do_lower_case=True) ate_fn(batch):
"""单条样本格式:[CLS]篇章[SEP]答案[SEP]问题[SEP]
"""
batch_token_ids, batch_segment_ids = [], []
for txt in batch:
text = open(txt, encoding='utf-8').read()
text = text.split('\n')
if len(text) > 1:
title = text[0]
content = '\n'.join(text[1:])
token_ids, segment_ids = tokenizer.encode(content, title, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_token_ids
train_dataloader = DataLoader(ListDataset(glob.glob('E:/data/corpus/sentence_classification/THUCNews/*/*.txt')),
batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
encoder = build_transformer_model(config_path, checkpoint_path, add_trainer=True).to(device) pyLoss(nn.CrossEntropyLoss):
encoder.compile(loss=CrossEntropyLoss(ignore_index=0), optimizer=optim.Adam(encoder.parameters(), 1e-5))
autotitle = AutoTitle(bos_token_id=None, eos_token_id=tokenizer._token_end_id, max_new_tokens=32, device=device)
Evaluator(Callback):
"""评估与保存
"""
def __init__(self):
self.lowest = 1e10
def on_epoch_end(self, steps, epoch, logs=None):
# 保存最优
if logs['loss'] <= self.lowest:
self.lowest = logs['loss']
# model.save_weights('./best_model.pt')
# 演示效果
just_show()
if __name__ == '__main__':
just_show()
evaluator = Evaluator()
encoder.fit(
train_dataloader,
steps_per_epoch=None,
epochs=epochs,
callbacks=[]
)
else:
encoder.load_weights('./best_model.pt')
The provided code snippet includes necessary dependencies for implementing the `collate_fn` function. Write a Python function `def collate_fn(batch)` to solve the following problem:
单条样本格式:[CLS]篇章[SEP]答案[SEP]问题[SEP]
Here is the function:
def collate_fn(batch):
"""单条样本格式:[CLS]篇章[SEP]答案[SEP]问题[SEP]
"""
batch_token_ids, batch_segment_ids = [], []
for txt in batch:
text = open(txt, encoding='utf-8').read()
text = text.split('\n')
if len(text) > 1:
title = text[0]
content = '\n'.join(text[1:])
token_ids, segment_ids = tokenizer.encode(content, title, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_token_ids | 单条样本格式:[CLS]篇章[SEP]答案[SEP]问题[SEP] |
20,663 | rch.models import build_transformer_model
from bert4torch.snippets import sequence_padding, ListDataset
from bert4torch.callbacks import Callback
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
import torch
from bert4torch.models import build_transformer_model
from bert4torch.tokenizers import Tokenizer
from bert4torch.generation import AutoRegressiveDecoder
import glob
= 256
batch_size = 8
epochs = 10000
th = 'E:/pretrain_ckpt/gpt/thu-coai@CDial-GPT_LCCC-base/'
config_path = root_path + 'bert4torch_config.json'
checkpoint_path = root_path + 'pytorch_model.bin'
dict_path = root_path + 'bert4torch_vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
tokenizer = Tokenizer(dict_path, do_lower_case=True) ate_fn(batch):
"""单条样本格式:[CLS]篇章[SEP]答案[SEP]问题[SEP]
"""
batch_token_ids, batch_segment_ids = [], []
for txt in batch:
text = open(txt, encoding='utf-8').read()
text = text.split('\n')
if len(text) > 1:
title = text[0]
content = '\n'.join(text[1:])
token_ids, segment_ids = tokenizer.encode(content, title, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_token_ids
train_dataloader = DataLoader(ListDataset(glob.glob('E:/data/corpus/sentence_classification/THUCNews/*/*.txt')),
batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
encoder = build_transformer_model(config_path, checkpoint_path, add_trainer=True).to(device) pyLoss(nn.CrossEntropyLoss):
encoder.compile(loss=CrossEntropyLoss(ignore_index=0), optimizer=optim.Adam(encoder.parameters(), 1e-5))
autotitle = AutoTitle(bos_token_id=None, eos_token_id=tokenizer._token_end_id, max_new_tokens=32, device=device)
Evaluator(Callback):
"""评估与保存
"""
def __init__(self):
self.lowest = 1e10
def on_epoch_end(self, steps, epoch, logs=None):
# 保存最优
if logs['loss'] <= self.lowest:
self.lowest = logs['loss']
# model.save_weights('./best_model.pt')
# 演示效果
just_show()
if __name__ == '__main__':
just_show()
evaluator = Evaluator()
encoder.fit(
train_dataloader,
steps_per_epoch=None,
epochs=epochs,
callbacks=[]
)
else:
encoder.load_weights('./best_model.pt')
def just_show():
s1 = u'别爱我没结果'
s2 = u'你这样会失去我的'
for s in [s1, s2]:
print(u'生成标题:', autotitle.generate(s)) | null |
20,664 | import torch
import torch.nn as nn
import numpy as np
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from torch.optim import Adam
from bert4torch.snippets import sequence_padding, ListDataset, log_warn_once
from bert4torch.callbacks import Callback
from torch.utils.data import DataLoader
from torchinfo import summary
def load_data(filename):
D = []
with open(filename, encoding='utf-8') as f:
for l in f:
text, label = l.strip().split('\t')
D.append((text, int(label)))
return D | null |
20,665 | import torch
import torch.nn as nn
import numpy as np
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from torch.optim import Adam
from bert4torch.snippets import sequence_padding, ListDataset, log_warn_once
from bert4torch.callbacks import Callback
from torch.utils.data import DataLoader
from torchinfo import summary
choice = 'finetune_few'
if choice == 'finetune_few':
# 苏神的代码中也会对前面的token进行mask,个人感觉不mask,这样仅需微调这几个token的权重
# 打印验证是否仅更新目标的几个token,True的话会取消random_mask方便验证
use_random_mask = True
if choice == 'finetune_few':
# 只训练这几个tokens权重
model = PtuningBERT().to(device)
summary(model, input_data=next(iter(train_dataloader))[0])
elif choice == 'finetune_all':
# 全部权重一起训练
model = build_transformer_model(config_path=config_path, checkpoint_path=checkpoint_path, with_mlm=True, add_trainer=True).to(device)
summary(model, input_data=[next(iter(train_dataloader))[0]])
The provided code snippet includes necessary dependencies for implementing the `random_masking` function. Write a Python function `def random_masking(token_ids)` to solve the following problem:
对输入进行随机mask
Here is the function:
def random_masking(token_ids):
"""对输入进行随机mask
"""
rands = np.random.random(len(token_ids))
source, target = [], []
for r, t in zip(rands, token_ids):
if r < 0.15 * 0.8:
source.append(tokenizer._token_mask_id)
target.append(t)
elif r < 0.15 * 0.9:
source.append(t)
target.append(t)
elif r < 0.15:
source.append(np.random.choice(tokenizer._vocab_size - 1) + 1)
target.append(t)
else:
source.append(t)
target.append(0)
return source, target | 对输入进行随机mask |
20,666 | import build_transformer_model, BaseModel
from torch.utils.data import DataLoader
from bert4torch.snippets import sequence_padding, ListDataset
from bert4torch.callbacks import Callback, EarlyStopping, AdversarialTraining
from bert4torch.tokenizers import Tokenizer
import torch.nn.functional as F
from sklearn.metrics import f1_score
from sklearn.model_selection import StratifiedKFold
import torch
from torch import nn, optim
from tqdm import tqdm
cuda' if torch.cuda.is_available() else 'cpu'
n = 5
SEED = 2020
batch_size = 4
grad_accum_steps = 64
lr = 2e-5
epochs = 100
def load_data(df):
"""加载数据。"""
D = list()
for _, row in df.iterrows():
text = row['text']
label = row['label']
D.append((text, int(label)))
return D
def collate_fn(batch):
batch_token_ids, batch_labels = [], []
for text, label in batch:
token_ids = sentence_split(text)
token_ids = sequence_padding(token_ids, length=maxlen)
batch_token_ids.append(token_ids)
batch_labels.append(label)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, length=max_segment), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, device=device)
return batch_token_ids, batch_labels
class Evaluator(Callback):
def do_train(df_train):
skf = StratifiedKFold(n_splits=n, random_state=SEED, shuffle=True)
for fold, (trn_idx, val_idx) in enumerate(skf.split(df_train['text'], df_train['label']), 1):
print(f'[Fold {fold}]')
train_data = load_data(df_train.iloc[trn_idx])
valid_data = load_data(df_train.iloc[val_idx])
train_dataloader = DataLoader(ListDataset(data=train_data), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(ListDataset(data=valid_data), batch_size=batch_size, collate_fn=collate_fn)
model = Model().to(device)
model.compile(loss=nn.CrossEntropyLoss(), optimizer=optim.Adam(model.parameters(), lr=lr),
grad_accumulation_steps=grad_accum_steps)
callbacks = [
AdversarialTraining('fgm'),
Evaluator(model, valid_dataloader, fold),
EarlyStopping(monitor='val_f1', patience=5, verbose=1, mode='max'), # 需要在Evaluator后面
]
model.fit(
train_dataloader,
steps_per_epoch=None,
epochs=epochs,
callbacks=callbacks
)
del model
class AdversarialTraining(Callback):
"""对抗训练Callback
:param mode: str, 对抗训练的模式,可选{'fgm', 'pgd', 'vat', 'gradient_penalty'}
:param adversarial: dict, 对抗训练的参数配置,不同模式所需参数不同
"""
def __init__(self, mode, adversarial={}, **kwargs):
super(AdversarialTraining, self).__init__(**kwargs)
assert mode in {'', 'fgm', 'pgd', 'vat', 'gradient_penalty'}, 'adversarial_train support fgm, pgd, vat and gradient_penalty mode'
self.mode = mode
adversarial['epsilon'] = adversarial.get('epsilon', 1.0)
adversarial['emb_name'] = adversarial.get('emb_name', 'word_embeddings')
if mode == 'pgd':
adversarial['K'] = adversarial.get('K', 3) # 步数
adversarial['alpha'] = adversarial.get('alpha', 0.3) # 学习率
elif mode == 'vat':
adversarial['K'] = adversarial.get('K', 3)
adversarial['noise_var'] = adversarial.get('noise_var', 1e-5) # 噪声的方差
adversarial['noise_gamma'] = adversarial.get('noise_gamma', 1e-6) # eps
adversarial['adv_step_size'] = adversarial.get('adv_step_size', 1e-3) # 学习率
adversarial['adv_alpha'] = adversarial.get('adv_alpha', 1) # 对抗loss的权重
adversarial['norm_type'] = adversarial.get('norm_type', 'l2') # 归一化方式
adversarial['rank'] = adversarial.get('rank', 0) # forward返回多个时指定使用的logit
self.adversarial = adversarial
def on_train_begin(self, logs=None):
if self.mode in {'gradient_penalty', 'vat'}:
self.trainer.retain_graph = True
if self.mode == 'fgm':
self.ad_train = FGM(self.model)
elif self.mode == 'pgd':
self.ad_train = PGD(self.model)
elif self.mode == 'vat':
self.ad_train = VAT(self.model, **self.adversarial)
self.trainer.old_train_step = self.trainer.train_step
self.trainer.train_step = self.train_step
def train_step(self, train_X, train_y):
output, loss, loss_detail = self.trainer.old_train_step(train_X, train_y)
# 对抗训练执行逻辑
if self.mode == 'fgm':
self.ad_train.attack(**self.adversarial) # embedding被修改了
output, loss, loss_detail = self.trainer.old_train_step(train_X, train_y)
# loss.backward() # 反向传播,在正常的grad基础上,累加对抗训练的梯度
# 恢复Embedding的参数, 因为要在正常的embedding上更新参数,而不是增加了对抗扰动后的embedding上更新参数~
self.ad_train.restore(**self.adversarial)
elif self.mode == 'pgd':
self.ad_train.backup_grad() # 备份梯度
for t in range(self.adversarial['K']):
# 在embedding上添加对抗扰动, first attack时备份param.data
self.ad_train.attack(**self.adversarial, is_first_attack=(t==0))
if t != self.adversarial['K']-1:
self.optimizer.zero_grad() # 为了累积扰动而不是梯度
else:
self.ad_train.restore_grad() # 恢复正常的grad
output, loss, loss_detail = self.trainer.old_train_step(train_X, train_y)
# loss.backward() # 反向传播,在正常的grad基础上,累加对抗训练的梯度
self.ad_train.restore(**self.adversarial) # 恢复embedding参数
# 梯度惩罚
elif self.mode == 'gradient_penalty':
para = search_layer(self.model, self.adversarial['emb_name'], retrun_first=True)
gp = (para.grad ** 2).sum()
loss += 0.5 * gp * self.adversarial['epsilon']
loss.backward()
# 虚拟对抗训练
elif self.mode == 'vat':
logit = output[self.adversarial['rank']] if isinstance(output, (tuple, list)) else output
adv_loss = self.ad_train.virtual_adversarial_training(train_X, logit)
loss += (adv_loss if adv_loss else 0)
loss.backward()
loss_detail.update({'loss_sup': loss.item(), 'loss_unsup': adv_loss.item()})
return output, loss, loss_detail
def do_train(df_train):
skf = StratifiedKFold(n_splits=n, random_state=SEED, shuffle=True)
for fold, (trn_idx, val_idx) in enumerate(skf.split(df_train['text'], df_train['label']), 1):
print(f'[Fold {fold}]')
train_data = load_data(df_train.iloc[trn_idx])
valid_data = load_data(df_train.iloc[val_idx])
train_dataloader = DataLoader(ListDataset(data=train_data), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(ListDataset(data=valid_data), batch_size=batch_size, collate_fn=collate_fn)
model = Model().to(device)
model.compile(loss=nn.CrossEntropyLoss(), optimizer=optim.Adam(model.parameters(), lr=lr),
grad_accumulation_steps=grad_accum_steps)
callbacks = [
AdversarialTraining('fgm'),
Evaluator(model, valid_dataloader, fold),
EarlyStopping(monitor='val_f1', patience=5, verbose=1, mode='max'), # 需要在Evaluator后面
]
model.fit(
train_dataloader,
steps_per_epoch=None,
epochs=epochs,
callbacks=callbacks
)
del model | null |
20,667 | from torch import device
from training import Model, collate_fn
import torch
from torch.utils.data import DataLoader
from bert4torch.snippets import ListDataset
import pandas as pd
from tqdm import tqdm
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `load_data` function. Write a Python function `def load_data(df)` to solve the following problem:
加载数据。
Here is the function:
def load_data(df):
"""加载数据。"""
D = list()
for _, row in df.iterrows():
text = row['text']
D.append((text, 0))
return D | 加载数据。 |
20,668 | import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything, get_pool_emb
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm
maxlen = 256
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def collate_fn(batch):
batch_token_ids, batch_labels = [], []
for text, label in batch:
token_ids = tokenizer.encode(text, maxlen=maxlen)[0]
batch_token_ids.append(token_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return batch_token_ids, batch_labels.flatten()
def collate_fn(batch):
batch_token_ids, batch_labels = [], []
for text, label in batch:
token_ids = tokenizer.encode(text, maxlen=maxlen)[0]
batch_token_ids.append(token_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return batch_token_ids, batch_labels.flatten() | null |
20,669 | import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything, get_pool_emb
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm
maxlen = 256
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def collate_fn(batch):
batch_token_ids, batch_labels = [], []
for text, label in batch:
token_ids = tokenizer.encode(text, maxlen=maxlen)[0]
batch_token_ids.append(token_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return batch_token_ids, batch_labels.flatten()
model = Model().to(device)
The provided code snippet includes necessary dependencies for implementing the `inference` function. Write a Python function `def inference(texts)` to solve the following problem:
单条样本推理
Here is the function:
def inference(texts):
'''单条样本推理
'''
for text in texts:
token_ids = tokenizer.encode(text, maxlen=maxlen)[0]
token_ids = torch.tensor(token_ids, dtype=torch.long, device=device)[None, :]
logit = model.predict(token_ids)
y_pred = torch.argmax(torch.softmax(logit, dim=-1)).cpu().numpy()
print(text, ' ----> ', y_pred) | 单条样本推理 |
20,670 | from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
import random
import os
import numpy as np
maxlen = 256
batch_size = 16
config_path = 'E:/pretrain_ckpt/gau/sushen@chinese_GAU-alpha-char_L-24_H-768_torch/bert4torch_config.json'
checkpoint_path = 'E:/pretrain_ckpt/gau/sushen@chinese_GAU-alpha-char_L-24_H-768_torch/pytorch_model.bin'
dict_path = 'E:/pretrain_ckpt/gau/sushen@chinese_GAU-alpha-char_L-24_H-768_torch/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
d_everything(42)
= Tokenizer(dict_path, do_lower_case=True)
ataset(ListDataset):
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
aloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.train.data']), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.valid.data']), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.test.data']), batch_size=batch_size, collate_fn=collate_fn)
eModel):
model = Model().to(device)
CrossEntropyLoss(),
optimizer=optim.Adam(model.parameters(), lr=2e-5),
metrics=['accuracy']
)
e(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total
class Evaluator(Callback):
"""评估与保存
"""
# def on_batch_end(self, global_step, local_step, logs=None):
# if global_step % 10 == 0:
# writer.add_scalar(f"train/loss", logs['loss'], global_step)
# val_acc = evaluate(valid_dataloader)
# writer.add_scalar(f"valid/acc", val_acc, global_step)
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=10, steps_per_epoch=None, callbacks=[evaluator])
else:
model.load_weights('best_model.pt')
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten() | null |
20,671 | from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
import random
import os
import numpy as np
maxlen = 256
batch_size = 16
config_path = 'E:/pretrain_ckpt/gau/sushen@chinese_GAU-alpha-char_L-24_H-768_torch/bert4torch_config.json'
checkpoint_path = 'E:/pretrain_ckpt/gau/sushen@chinese_GAU-alpha-char_L-24_H-768_torch/pytorch_model.bin'
dict_path = 'E:/pretrain_ckpt/gau/sushen@chinese_GAU-alpha-char_L-24_H-768_torch/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
d_everything(42)
= Tokenizer(dict_path, do_lower_case=True)
ataset(ListDataset):
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
aloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.train.data']), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.valid.data']), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.test.data']), batch_size=batch_size, collate_fn=collate_fn)
eModel):
model = Model().to(device)
CrossEntropyLoss(),
optimizer=optim.Adam(model.parameters(), lr=2e-5),
metrics=['accuracy']
)
e(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total
class Evaluator(Callback):
"""评估与保存
"""
# def on_batch_end(self, global_step, local_step, logs=None):
# if global_step % 10 == 0:
# writer.add_scalar(f"train/loss", logs['loss'], global_step)
# val_acc = evaluate(valid_dataloader)
# writer.add_scalar(f"valid/acc", val_acc, global_step)
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=10, steps_per_epoch=None, callbacks=[evaluator])
else:
model.load_weights('best_model.pt')
def evaluate(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total | null |
20,672 |
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text1, text2, label in batch:
token_ids, segment_ids = tokenizer.encode(text1, text2, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return (batch_token_ids, batch_segment_ids), batch_labels.flatten() | null |
20,673 |
def evaluate(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total | null |
20,674 | as np
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything, get_pool_emb
from bert4torch.callbacks import Callback
from bert4torch.optimizers import Lion
import torch.nn as nn
import torch
from torch.utils.data import DataLoader, Dataset
from tensorboardX import SummaryWriter
import random
import os
import numpy as np
maxlen = 256
batch_size = 16
config_path = 'E:/pretrain_ckpt/albert/brightmart@albert_small_zh/bert4torch_config.json'
checkpoint_path = 'E:/pretrain_ckpt/albert/brightmart@albert_small_zh/pytorch_model.bin'
dict_path = 'E:/pretrain_ckpt/albert/brightmart@albert_small_zh/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
writer = SummaryWriter(log_dir='./summary')
seed_everything(42)
= Tokenizer(dict_path, do_lower_case=True)
ataset(ListDataset):
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
aloader = DataLoader(MyDataset('E:/data/corpus/sentence_classification/sentiment/sentiment.train.data'), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset('E:/data/corpus/sentence_classification/sentiment/sentiment.valid.data'), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset('E:/data/corpus/sentence_classification/sentiment/sentiment.test.data'), batch_size=batch_size, collate_fn=collate_fn)
eModel):
model = Model().to(device)
CrossEntropyLoss(),
optimizer=Lion(model.parameters(), lr=2e-5),
metrics=['accuracy']
)
e(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total
class Evaluator(Callback):
"""评估与保存
"""
# def on_batch_end(self, global_step, local_step, logs=None):
# if global_step % 10 == 0:
# writer.add_scalar(f"train/loss", logs['loss'], global_step)
# val_acc = evaluate(valid_dataloader)
# writer.add_scalar(f"valid/acc", val_acc, global_step)
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=10, steps_per_epoch=None, callbacks=[evaluator])
else:
model.load_weights('best_model.pt')
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten() | null |
20,675 | as np
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything, get_pool_emb
from bert4torch.callbacks import Callback
from bert4torch.optimizers import Lion
import torch.nn as nn
import torch
from torch.utils.data import DataLoader, Dataset
from tensorboardX import SummaryWriter
import random
import os
import numpy as np
maxlen = 256
batch_size = 16
config_path = 'E:/pretrain_ckpt/albert/brightmart@albert_small_zh/bert4torch_config.json'
checkpoint_path = 'E:/pretrain_ckpt/albert/brightmart@albert_small_zh/pytorch_model.bin'
dict_path = 'E:/pretrain_ckpt/albert/brightmart@albert_small_zh/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
writer = SummaryWriter(log_dir='./summary')
seed_everything(42)
= Tokenizer(dict_path, do_lower_case=True)
ataset(ListDataset):
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
aloader = DataLoader(MyDataset('E:/data/corpus/sentence_classification/sentiment/sentiment.train.data'), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset('E:/data/corpus/sentence_classification/sentiment/sentiment.valid.data'), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset('E:/data/corpus/sentence_classification/sentiment/sentiment.test.data'), batch_size=batch_size, collate_fn=collate_fn)
eModel):
model = Model().to(device)
CrossEntropyLoss(),
optimizer=Lion(model.parameters(), lr=2e-5),
metrics=['accuracy']
)
e(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total
class Evaluator(Callback):
"""评估与保存
"""
# def on_batch_end(self, global_step, local_step, logs=None):
# if global_step % 10 == 0:
# writer.add_scalar(f"train/loss", logs['loss'], global_step)
# val_acc = evaluate(valid_dataloader)
# writer.add_scalar(f"valid/acc", val_acc, global_step)
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=10, steps_per_epoch=None, callbacks=[evaluator])
else:
model.load_weights('best_model.pt')
def evaluate(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total | null |
20,676 | from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything, get_pool_emb
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm
maxlen = 256
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten() | null |
20,677 | from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything, get_pool_emb
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm
maxlen = 256
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
model = Model().to(device)
The provided code snippet includes necessary dependencies for implementing the `inference` function. Write a Python function `def inference(texts)` to solve the following problem:
单条样本推理
Here is the function:
def inference(texts):
'''单条样本推理
'''
for text in texts:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
token_ids = torch.tensor(token_ids, dtype=torch.long, device=device)[None, :]
segment_ids = torch.tensor(segment_ids, dtype=torch.long, device=device)[None, :]
logit = model.predict([token_ids, segment_ids])
y_pred = torch.argmax(torch.softmax(logit, dim=-1)).cpu().numpy()
print(text, ' ----> ', y_pred) | 单条样本推理 |
20,678 | t from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, text_segmentate, seed_everything
from bert4torch.callbacks import AdversarialTraining
from bert4torch.optimizers import get_linear_schedule_with_warmup
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from tqdm import tqdm
from sklearn.metrics import f1_score, classification_report, accuracy_score
import warnings
maxlen = 512
D = []
seps, strips = u'\n。!?!?;;,, ', u';;,, '
with open(filename, encoding='utf-8') as f:
for l in tqdm(f.readlines(), desc="Loading data"):
taskData = json.loads(l.strip())
text2 = ''.join([ent+'[MASK]' for ent in taskData['entity'].keys()]) + '[SEP]'
text2_len = sum([len(ent)+1 for ent in taskData['entity'].keys()]) + 1
for t in text_segmentate(taskData['content'], maxlen-text2_len-2, seps, strips):
D.append((t, text2, taskData['entity']))
return
def load_data(filename):
D = []
seps, strips = u'\n。!?!?;;,, ', u';;,, '
with open(filename, encoding='utf-8') as f:
for l in tqdm(f.readlines(), desc="Loading data"):
taskData = json.loads(l.strip())
text2 = ''.join([ent+'[MASK]' for ent in taskData['entity'].keys()]) + '[SEP]'
text2_len = sum([len(ent)+1 for ent in taskData['entity'].keys()]) + 1
for t in text_segmentate(taskData['content'], maxlen-text2_len-2, seps, strips):
D.append((t, text2, taskData['entity']))
return D | null |
20,679 | torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, text_segmentate, seed_everything
from bert4torch.callbacks import AdversarialTraining
from bert4torch.optimizers import get_linear_schedule_with_warmup
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from tqdm import tqdm
from sklearn.metrics import f1_score, classification_report, accuracy_score
import warnings
device = f'cuda' if torch.cuda.is_available() else 'cpu'
categories = [-2, -1, 0, 1, 2]
def search(tokens, start_idx=0):
for i in range(len(tokens)):
if tokens[i] == '[MASK]':
mask_idxs.append(i+start_idx)
batch_token_ids, batch_entity_ids, batch_entity_labels = [], [], []
for text1, text2, entity in batch:
token_ids1 = tokenizer.encode(text1)[0]
tokens2 = tokenizer.tokenize(text2)[1:-1]
token_ids2 = tokenizer.tokens_to_ids(tokens2)
ent_ids_raw = search(tokens2, start_idx=len(token_ids1))
# 不在原文中的实体,其[MASK]标记不用于计算loss
ent_labels, ent_ids = [], []
for i, (ent, label) in enumerate(entity.items()):
if ent in text1:
assert tokens2[ent_ids_raw[i]-len(token_ids1)] == '[MASK]'
ent_ids.append(ent_ids_raw[i])
ent_labels.append(categories.index(label))
batch_token_ids.append(token_ids1 + token_ids2)
batch_entity_ids.append(ent_ids)
batch_entity_labels.append(ent_labels)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_entity_ids = torch.tensor(sequence_padding(batch_entity_ids), dtype=torch.long, device=device)
batch_entity_labels = torch.tensor(sequence_padding(batch_entity_labels, value=-1), dtype=torch.long, device=device) urn [batch_token_ids, batch_entity_ids], batch_entity_label
def collate_fn(batch):
batch_token_ids, batch_entity_ids, batch_entity_labels = [], [], []
for text1, text2, entity in batch:
token_ids1 = tokenizer.encode(text1)[0]
tokens2 = tokenizer.tokenize(text2)[1:-1]
token_ids2 = tokenizer.tokens_to_ids(tokens2)
ent_ids_raw = search(tokens2, start_idx=len(token_ids1))
# 不在原文中的实体,其[MASK]标记不用于计算loss
ent_labels, ent_ids = [], []
for i, (ent, label) in enumerate(entity.items()):
if ent in text1:
assert tokens2[ent_ids_raw[i]-len(token_ids1)] == '[MASK]'
ent_ids.append(ent_ids_raw[i])
ent_labels.append(categories.index(label))
batch_token_ids.append(token_ids1 + token_ids2)
batch_entity_ids.append(ent_ids)
batch_entity_labels.append(ent_labels)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_entity_ids = torch.tensor(sequence_padding(batch_entity_ids), dtype=torch.long, device=device)
batch_entity_labels = torch.tensor(sequence_padding(batch_entity_labels, value=-1), dtype=torch.long, device=device) # [btz, 实体个数]
return [batch_token_ids, batch_entity_ids], batch_entity_labels | null |
20,680 | torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, text_segmentate, seed_everything
from bert4torch.callbacks import AdversarialTraining
from bert4torch.optimizers import get_linear_schedule_with_warmup
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from tqdm import tqdm
from sklearn.metrics import f1_score, classification_report, accuracy_score
import warnings
def average_function(ax: torch.Tensor, x: torch.Tensor, num: int) -> torch.Tensor:
return ax + (x - ax) / (num + 1) | null |
20,681 | from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, text_segmentate, seed_everything
from bert4torch.callbacks import AdversarialTraining
from bert4torch.optimizers import get_linear_schedule_with_warmup
from bert4torch.tokenizers import Tokenizer, SpTokenizer
from bert4torch.models import build_transformer_model, BaseModel
from tqdm import tqdm
import transformers
import random
from sklearn.metrics import f1_score, classification_report, accuracy_score
import warnings
mask_symbol = '<mask>'
D = []
with open(filename, encoding='utf-8') as f:
for l in tqdm(f.readlines(), desc="Loading data"):
taskData = json.loads(l.strip())
text2 = ''.join([ent+mask_symbol for ent in taskData['entity'].keys()])
D.append((taskData['content'], text2, taskData['entity']))
return
def load_data(filename):
D = []
with open(filename, encoding='utf-8') as f:
for l in tqdm(f.readlines(), desc="Loading data"):
taskData = json.loads(l.strip())
text2 = ''.join([ent+mask_symbol for ent in taskData['entity'].keys()])
D.append((taskData['content'], text2, taskData['entity']))
return D | null |
20,682 | torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, text_segmentate, seed_everything
from bert4torch.callbacks import AdversarialTraining
from bert4torch.optimizers import get_linear_schedule_with_warmup
from bert4torch.tokenizers import Tokenizer, SpTokenizer
from bert4torch.models import build_transformer_model, BaseModel
from tqdm import tqdm
import transformers
import random
from sklearn.metrics import f1_score, classification_report, accuracy_score
import warnings
device = f'cuda' if torch.cuda.is_available() else 'cpu'
maxlen = 900
categories = [-2, -1, 0, 1, 2]
def search(tokens, search_token, start_idx=0):
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_entity_ids, batch_entity_labels = [], [], [], []
for text, prompt, entity in batch:
inputs = tokenizer.__call__(text=text, text_pair=prompt, add_special_tokens=True, max_length=maxlen, truncation="only_first")
token_ids, segment_ids = inputs['input_ids'], inputs['token_type_ids']
ent_ids = search(token_ids, tokenizer.mask_token_id)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_entity_ids.append(ent_ids)
batch_entity_labels.append([categories.index(label) for label in entity.values()])
# 20230513: 修改bug,xlnet的pad_id=5,所以padding和下面build_transformer_model要指定一下
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, value=tokenizer.pad_token_id), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_entity_ids = torch.tensor(sequence_padding(batch_entity_ids), dtype=torch.long, device=device)
batch_entity_labels = torch.tensor(sequence_padding(batch_entity_labels, value=-1), dtype=torch.long, device=device) # [btz, 实体个数]
return [batch_token_ids, batch_segment_ids, batch_entity_ids], batch_entity_labels | null |
20,683 | torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, text_segmentate, seed_everything
from bert4torch.callbacks import AdversarialTraining
from bert4torch.optimizers import get_linear_schedule_with_warmup
from bert4torch.tokenizers import Tokenizer, SpTokenizer
from bert4torch.models import build_transformer_model, BaseModel
from tqdm import tqdm
import transformers
import random
from sklearn.metrics import f1_score, classification_report, accuracy_score
import warnings
def average_function(ax: torch.Tensor, x: torch.Tensor, num: int) -> torch.Tensor:
return ax + (x - ax) / (num + 1) | null |
20,684 | import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, text_segmentate
from bert4torch.callbacks import AdversarialTraining
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.losses import FocalLoss
from tqdm import tqdm
from sklearn.metrics import f1_score, classification_report, accuracy_score
import random
import os
import argparse
import pickle
import warnings
device = f'cuda:{gpuid}' if torch.cuda.is_available() else 'cpu'
maxlen = 512
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
batch_extra, batch_token_ids, batch_entity_ids, batch_entity_labels = [], [], [], []
for d in batch:
id, contents, entities = d[0], d[1], d[2:]
tokens = tokenizer.tokenize(contents, maxlen=maxlen)[1:-1]
tokens = ['[CLS]'] + [j for i in tokens for j in i] + ['[SEP]'] # 转成char为单位的
mapping = tokenizer.rematch(contents, tokens)
start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}
end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}
token_ids = tokenizer.tokens_to_ids(tokens)
entity_ids, entity_labels, extra_map = [], [], {}
for ent, start, end, label in entities:
if start in start_mapping and end in end_mapping:
start = start_mapping[start]
end = end_mapping[end]
entity_ids.append([start, end])
# # 验证边界id没有问题
# if ''.join(tokenizer.ids_to_tokens(token_ids[start:end+1])) != ent.lower():
# print(''.join(tokenizer.ids_to_tokens(token_ids[start:end+1])), ent)
entity_labels.append(label)
extra_map[(start, end)] = (ent, label)
if not entity_ids: # 至少要有一个标签
entity_ids.append([0, 0]) # 如果没有则用0填充
entity_labels.append(0)
batch_extra.append((id, extra_map))
batch_token_ids.append(token_ids)
batch_entity_ids.append(entity_ids)
batch_entity_labels.append(entity_labels)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_entity_ids = torch.tensor(sequence_padding(batch_entity_ids), dtype=torch.long, device=device) _entity_labels = torch.tensor(sequence_padding(batch_entity_labels), dtype=torch.long, device=device) urn [batch_token_ids, batch_entity_ids, batch_extra], batch_entity_label
def collate_fn(batch):
batch_extra, batch_token_ids, batch_entity_ids, batch_entity_labels = [], [], [], []
for d in batch:
id, contents, entities = d[0], d[1], d[2:]
tokens = tokenizer.tokenize(contents, maxlen=maxlen)[1:-1]
tokens = ['[CLS]'] + [j for i in tokens for j in i] + ['[SEP]'] # 转成char为单位的
mapping = tokenizer.rematch(contents, tokens)
start_mapping = {j[0]: i for i, j in enumerate(mapping) if j}
end_mapping = {j[-1]: i for i, j in enumerate(mapping) if j}
token_ids = tokenizer.tokens_to_ids(tokens)
entity_ids, entity_labels, extra_map = [], [], {}
for ent, start, end, label in entities:
if start in start_mapping and end in end_mapping:
start = start_mapping[start]
end = end_mapping[end]
entity_ids.append([start, end])
# # 验证边界id没有问题
# if ''.join(tokenizer.ids_to_tokens(token_ids[start:end+1])) != ent.lower():
# print(''.join(tokenizer.ids_to_tokens(token_ids[start:end+1])), ent)
entity_labels.append(label)
extra_map[(start, end)] = (ent, label)
if not entity_ids: # 至少要有一个标签
entity_ids.append([0, 0]) # 如果没有则用0填充
entity_labels.append(0)
batch_extra.append((id, extra_map))
batch_token_ids.append(token_ids)
batch_entity_ids.append(entity_ids)
batch_entity_labels.append(entity_labels)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_entity_ids = torch.tensor(sequence_padding(batch_entity_ids), dtype=torch.long, device=device) # [btz, 实体个数,start/end]
batch_entity_labels = torch.tensor(sequence_padding(batch_entity_labels), dtype=torch.long, device=device) # [btz, 实体个数]
return [batch_token_ids, batch_entity_ids, batch_extra], batch_entity_labels | null |
20,685 | import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, text_segmentate
from bert4torch.callbacks import AdversarialTraining
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.losses import FocalLoss
from tqdm import tqdm
from sklearn.metrics import f1_score, classification_report, accuracy_score
import random
import os
import argparse
import pickle
import warnings
total_eval_step = None
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
model = Model().to(device)
print(categories_count)
model.compile(loss=Loss(), optimizer=optim.Adam(model.parameters(), lr=1e-5))
def trans_entity2tuple(entity_ids, entity_labels, entity_probs=None):
'''把tensor转为(样本id, start, end, 实体类型, 实体概率值)的tuple用于计算指标
'''
y, ent_tuple = [], []
for i, one_sample in enumerate(entity_ids): # 遍历样本
for j, item in enumerate(one_sample): # 遍历实体
if item[0].item() * item[1].item() != 0:
tmp = (i, j, item[0].item(), item[1].item(), entity_labels[i, j].item())
y.append(entity_labels[i, j].item())
ent_tuple.append(tmp if entity_probs is None else tmp + (entity_probs[i, j].item(),))
return y, ent_tuple
def evaluate(data):
valid_true, valid_pred = [], []
eval_step = 0
result, result_prob = dict(), dict()
for (token_ids, entity_ids, extra), entity_labels in tqdm(data):
entity_logit = model.predict([token_ids, entity_ids])[0] # [btz, 实体个数, 实体类别数]
entity_logit = F.softmax(entity_logit, dim=-1)
entity_prob, entity_pred = torch.max(entity_logit, dim=-1) # [btz, 实体个数]
# v_pred和v_true是实体的预测结果,entity_tuple是(smp_id, ent_id, start, end, label, prob)的列表
v_pred, entity_tuple = trans_entity2tuple(entity_ids, entity_pred, entity_prob)
v_true, _ = trans_entity2tuple(entity_ids, entity_labels)
valid_pred.extend(v_pred)
valid_true.extend(v_true)
# generate submit result
for id_, ent_id_, start, end, label_, prob in entity_tuple:
label_ = label_-3
smp_id, s_e_ents = extra[id_][0], extra[id_][1]
if (start, end) not in s_e_ents:
raise ValueError('entity missing')
if smp_id not in result:
result[smp_id], result_prob[smp_id] = {}, {}
ent_name = s_e_ents[(start, end)][0]
if ent_name in result[smp_id] and prob < result[smp_id][ent_name][-1]:
# 如果同一个实体
continue
else:
result[smp_id].update({ent_name: (label_, prob)})
ent_prob = entity_logit[id_][ent_id_].cpu().numpy()
result_prob[smp_id].update({ent_name: ent_prob})
assert prob == ent_prob[label_+3]
eval_step += 1
if (total_eval_step is not None) and (eval_step >= total_eval_step):
break
valid_true = np.array(valid_true)
valid_pred = np.array(valid_pred)
f1 = f1_score(valid_true, valid_pred, average='macro')
acc = accuracy_score(valid_true, valid_pred)
print(classification_report(valid_true, valid_pred))
# 只保留label,不需要prob
for k, v in result.items():
result[k] = {i: j[0] for i, j in v.items()}
return f1, acc, result, result_prob | null |
20,686 | import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, ListDataset, text_segmentate
from bert4torch.callbacks import AdversarialTraining
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.losses import FocalLoss
from tqdm import tqdm
from sklearn.metrics import f1_score, classification_report, accuracy_score
import random
import os
import argparse
import pickle
import warnings
def save_result(result, result_prob, save_path):
result = [(key, value) for key, value in result.items()]
result.sort(key=lambda x: x[0])
result_str = 'id\tresult\n'
for key, value in result:
result_str += f'{key}\t{value}\n'
with open(save_path, 'w', encoding='utf-8') as f:
f.write(result_str)
# 保存概率
with open(save_path[:-4] + '_prob.pkl', 'wb') as f:
pickle.dump(result_prob, f) | null |
20,687 | from bert4torch.tokenizers import SpTokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything
import torch.nn as nn
import torch
import torch.optim as optim
import random, os, numpy as np
from torch.utils.data import DataLoader
maxlen = 256
batch_size = 16
pretrain_model = 'E:/pretrain_ckpt/xlnet/hfl@chinese-xlnet-base/'
config_path = pretrain_model + 'bert4torch_config.json'
checkpoint_path = pretrain_model + 'pytorch_model.bin'
spm_path = pretrain_model + 'spiece.model'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
d_everything(42)
= SpTokenizer(spm_path, token_start=None, token_end=None)
sep_id = tokenizer.sp_model.piece_to_id('<sep>') = tokenizer.sp_model.piece_to_id('<cls>')
ataset(ListDataset):
def collate_fn(batch):
batch_token_ids, batch_labels = [], []
for text, label in batch:
token_ids, _ = tokenizer.encode(text, maxlen=maxlen-2)
# single sequence X <sep> <cls>
# pair sequence A <sep> B <sep> <cls>
batch_token_ids.append(token_ids + [sep_id, cls_id])
batch_labels.append([label])
# 用tokenizer的pad_id来做padding
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, value=tokenizer._token_pad_id, mode='pre'), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return batch_token_ids, batch_labels.flatten()
aloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.train.data']), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.valid.data']), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.test.data']), batch_size=batch_size, collate_fn=collate_fn)
eModel):
model = Model().to(device)
CrossEntropyLoss(),
optimizer=optim.Adam(model.parameters(), lr=2e-5),
metrics=['accuracy']
)
e(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total
class Evaluator(Callback):
"""评估与保存
"""
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=10, steps_per_epoch=None, callbacks=[evaluator])
else:
model.load_weights('best_model.pt')
def collate_fn(batch):
batch_token_ids, batch_labels = [], []
for text, label in batch:
token_ids, _ = tokenizer.encode(text, maxlen=maxlen-2)
# single sequence X <sep> <cls>
# pair sequence A <sep> B <sep> <cls>
batch_token_ids.append(token_ids + [sep_id, cls_id])
batch_labels.append([label])
# 用tokenizer的pad_id来做padding
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, value=tokenizer._token_pad_id, mode='pre'), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return batch_token_ids, batch_labels.flatten() | null |
20,688 | from bert4torch.tokenizers import SpTokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything
import torch.nn as nn
import torch
import torch.optim as optim
import random, os, numpy as np
from torch.utils.data import DataLoader
maxlen = 256
batch_size = 16
pretrain_model = 'E:/pretrain_ckpt/xlnet/hfl@chinese-xlnet-base/'
config_path = pretrain_model + 'bert4torch_config.json'
checkpoint_path = pretrain_model + 'pytorch_model.bin'
spm_path = pretrain_model + 'spiece.model'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
d_everything(42)
= SpTokenizer(spm_path, token_start=None, token_end=None)
sep_id = tokenizer.sp_model.piece_to_id('<sep>') = tokenizer.sp_model.piece_to_id('<cls>')
ataset(ListDataset):
def collate_fn(batch):
batch_token_ids, batch_labels = [], []
for text, label in batch:
token_ids, _ = tokenizer.encode(text, maxlen=maxlen-2)
# single sequence X <sep> <cls>
# pair sequence A <sep> B <sep> <cls>
batch_token_ids.append(token_ids + [sep_id, cls_id])
batch_labels.append([label])
# 用tokenizer的pad_id来做padding
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids, value=tokenizer._token_pad_id, mode='pre'), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return batch_token_ids, batch_labels.flatten()
aloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.train.data']), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.valid.data']), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.test.data']), batch_size=batch_size, collate_fn=collate_fn)
eModel):
model = Model().to(device)
CrossEntropyLoss(),
optimizer=optim.Adam(model.parameters(), lr=2e-5),
metrics=['accuracy']
)
e(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total
class Evaluator(Callback):
"""评估与保存
"""
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=10, steps_per_epoch=None, callbacks=[evaluator])
else:
model.load_weights('best_model.pt')
def evaluate(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total | null |
20,689 | import numpy as np
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything, get_pool_emb
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
import random
import os
import numpy as np
maxlen = 256
batch_size = 16
config_path = 'E:/pretrain_ckpt/nezha/huawei_noah@nezha-cn-base/bert4torch_config.json'
checkpoint_path = 'E:/pretrain_ckpt/nezha/huawei_noah@nezha-cn-base/pytorch_model.bin'
dict_path = 'E:/pretrain_ckpt/nezha/huawei_noah@nezha-cn-base/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
d_everything(42)
= Tokenizer(dict_path, do_lower_case=True)
ataset(ListDataset):
def collate_fn(batch):
aloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.train.data']), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.valid.data']), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.test.data']), batch_size=batch_size, collate_fn=collate_fn)
eModel):
model = Model().to(device)
CrossEntropyLoss(),
optimizer=optim.Adam(model.parameters(), lr=2e-5),
metrics=['accuracy']
)
e(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total
class Evaluator(Callback):
"""评估与保存
"""
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=10, steps_per_epoch=None, callbacks=[evaluator])
else:
model.load_weights('best_model.pt')
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten() | null |
20,690 | import numpy as np
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything, get_pool_emb
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
import random
import os
import numpy as np
maxlen = 256
batch_size = 16
config_path = 'E:/pretrain_ckpt/nezha/huawei_noah@nezha-cn-base/bert4torch_config.json'
checkpoint_path = 'E:/pretrain_ckpt/nezha/huawei_noah@nezha-cn-base/pytorch_model.bin'
dict_path = 'E:/pretrain_ckpt/nezha/huawei_noah@nezha-cn-base/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
d_everything(42)
= Tokenizer(dict_path, do_lower_case=True)
ataset(ListDataset):
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
aloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.train.data']), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.valid.data']), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.test.data']), batch_size=batch_size, collate_fn=collate_fn)
eModel):
model = Model().to(device)
CrossEntropyLoss(),
optimizer=optim.Adam(model.parameters(), lr=2e-5),
metrics=['accuracy']
)
e(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total
class Evaluator(Callback):
"""评估与保存
"""
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=10, steps_per_epoch=None, callbacks=[evaluator])
else:
model.load_weights('best_model.pt')
def evaluate(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total | null |
20,691 | import numpy as np
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
import random
import os
import numpy as np
maxlen = 256
batch_size = 16
config_path = 'E:/pretrain_ckpt/electra/hfl@chinese-electra-base-discriminator/bert4torch_config.json'
checkpoint_path = 'E:/pretrain_ckpt/electra/hfl@chinese-electra-base-discriminator/pytorch_model.bin'
dict_path = 'E:/pretrain_ckpt/electra/hfl@chinese-electra-base-discriminator/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
seed_everything(42)
= Tokenizer(dict_path, do_lower_case=True)
ataset(ListDataset):
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
aloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.train.data']), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.valid.data']), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.test.data']), batch_size=batch_size, collate_fn=collate_fn)
eModel):
model = Model().to(device)
CrossEntropyLoss(),
optimizer=optim.Adam(model.parameters(), lr=2e-5),
metrics=['accuracy']
)
e(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total
class Evaluator(Callback):
"""评估与保存
"""
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=10, steps_per_epoch=None, callbacks=[evaluator])
else:
model.load_weights('best_model.pt')
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten() | null |
20,692 | import numpy as np
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
import random
import os
import numpy as np
maxlen = 256
batch_size = 16
config_path = 'E:/pretrain_ckpt/electra/hfl@chinese-electra-base-discriminator/bert4torch_config.json'
checkpoint_path = 'E:/pretrain_ckpt/electra/hfl@chinese-electra-base-discriminator/pytorch_model.bin'
dict_path = 'E:/pretrain_ckpt/electra/hfl@chinese-electra-base-discriminator/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
seed_everything(42)
= Tokenizer(dict_path, do_lower_case=True)
ataset(ListDataset):
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
aloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.train.data']), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.valid.data']), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset(['E:/data/corpus/sentence_classification/sentiment/sentiment.test.data']), batch_size=batch_size, collate_fn=collate_fn)
eModel):
model = Model().to(device)
CrossEntropyLoss(),
optimizer=optim.Adam(model.parameters(), lr=2e-5),
metrics=['accuracy']
)
e(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total
class Evaluator(Callback):
"""评估与保存
"""
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=10, steps_per_epoch=None, callbacks=[evaluator])
else:
model.load_weights('best_model.pt')
def evaluate(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total | null |
20,693 | import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.callbacks import Callback
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, seed_everything, get_pool_emb
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
import jieba
maxlen = 256
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten() | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.