repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalEval/colossal_eval/evaluate/__init__.py | applications/ColossalEval/colossal_eval/evaluate/__init__.py | python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false | |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalEval/colossal_eval/evaluate/dataset_evaluator/metrics.py | applications/ColossalEval/colossal_eval/evaluate/dataset_evaluator/metrics.py | # Code adapted from https://github.com/THUDM/LongBench/blob/main/metrics.py
# Code adapted from https://github.com/hendrycks/math/blob/main/modeling/math_equivalence.py
# Code adapted from https://github.com/ruixiangcui/AGIEval/blob/main/src/evaluation.py
# https://github.com/SkyworkAI/Skywork/blob/main/eval/eval_gsm8k.py
import difflib
import re
import string
from collections import Counter
import jieba
from fuzzywuzzy import fuzz
from rouge import Rouge
ANS_RE = re.compile(r"#### (\-?[0-9\.\,]+)")
INVALID_ANS = "[invalid]"
ans_re1 = re.compile(r"(\-?[0-9][0-9\.\,]*)")
ans_re2 = re.compile(r"=\s*(\$?-?[0-9][0-9\.\,]*)")
metrics4subcategory = {
"pretrain": {
"perplexity": ["ALL"],
"ppl_score": ["ALL"],
"per_byte_perplexity": ["ALL"],
"per_byte_ppl_score": ["ALL"],
},
# The commented are non 4-choice questions.
"AGIEvalDataset": {
"combined_single_choice_accuracy": [
# "lsat-ar",
# "lsat-lr",
# "lsat-rc",
"logiqa-en",
"sat-math",
"sat-en",
# "aqua-rat",
"sat-en-without-passage",
"gaokao-english",
"logiqa-zh",
"gaokao-chinese",
"gaokao-geography",
"gaokao-history",
"gaokao-biology",
"gaokao-chemistry",
],
"first_token_accuracy": [
# "lsat-ar",
# "lsat-lr",
# "lsat-rc",
"logiqa-en",
"sat-math",
"sat-en",
# "aqua-rat",
"sat-en-without-passage",
"gaokao-english",
"logiqa-zh",
"gaokao-chinese",
"gaokao-geography",
"gaokao-history",
"gaokao-biology",
"gaokao-chemistry",
],
"single_choice_accuracy": [
# "lsat-ar",
# "lsat-lr",
# "lsat-rc",
"logiqa-en",
"sat-math",
"sat-en",
# "aqua-rat",
"sat-en-without-passage",
"gaokao-english",
"logiqa-zh",
"gaokao-chinese",
"gaokao-geography",
"gaokao-history",
"gaokao-biology",
"gaokao-chemistry",
],
"multi_choice_accuracy": ["jec-qa-kd", "jec-qa-ca", "gaokao-physics", "gaokao-mathqa"],
"math_equivalence": ["gaokao-mathcloze", "math"],
"perplexity": ["ALL"],
"ppl_score_over_choices": [
"lsat-ar",
"lsat-lr",
"lsat-rc",
"logiqa-en",
"sat-math",
"sat-en",
"aqua-rat",
"sat-en-without-passage",
"gaokao-english",
"logiqa-zh",
"jec-qa-kd",
"jec-qa-ca",
"gaokao-chinese",
"gaokao-geography",
"gaokao-history",
"gaokao-biology",
"gaokao-chemistry",
"gaokao-physics",
"gaokao-mathqa",
],
"ppl_score": ["ALL"],
},
"CMMLUDataset": {
"first_token_accuracy": ["ALL"],
"single_choice_accuracy": ["ALL"],
"perplexity": ["ALL"],
"ppl_score_over_choices": ["ALL"],
"ppl_score": ["ALL"],
},
"GaoKaoBenchDataset": {
"combined_single_choice_accuracy": [
"English MCQs",
"Biology MCQs",
"Chemistry MCQs",
"History MCQs",
"Math I MCQs",
"Math II MCQs",
"Political Science MCQs",
],
"first_token_accuracy": [
"English MCQs",
"Biology MCQs",
"Chemistry MCQs",
"History MCQs",
"Math I MCQs",
"Math II MCQs",
"Political Science MCQs",
],
"single_choice_accuracy": [
"English MCQs",
"Biology MCQs",
"Chemistry MCQs",
"History MCQs",
"Math I MCQs",
"Math II MCQs",
"Political Science MCQs",
],
"multi_choice_accuracy": [
"Chinese Lang and Usage MCQs",
"Chinese Modern Lit",
"English Fill in Blanks",
"English Reading Comp",
"Geography MCQs",
"Physics MCQs",
"English Cloze Test",
],
"math_equivalence": ["Math I Fill-in-the-Blank", "Math II Fill-in-the-Blank"],
"rouge_score": ["English Language Cloze Passage"],
"rouge_zh_score": [
"Chinese Language Famous Passages and Sentences Dictation",
"Chemistry Open-ended Questions",
"History Open-ended Questions",
"Biology Open-ended Questions",
"Political Science Open-ended Questions",
"English Language Error Correction",
"Chinese Language Language and Writing Skills Open-ended Questions",
"Math II Open-ended Questions",
"Chinese Language Literary Text Reading",
"Chinese Language Ancient Poetry Reading",
"Chinese Language Classical Chinese Reading",
"Physics Open-ended Questions",
"Math I Open-ended Questions",
"Geography Open-ended Questions",
"Chinese Language Practical Text Reading",
],
"perplexity": ["ALL"],
"ppl_score_over_choices": ["ALL"],
"ppl_score": ["ALL"],
},
"LongBenchDataset": {
"f1_score": ["hotpotqa", "2wikimqa", "musique", "narrativeqa", "qasper", "multifieldqa_en", "triviaqa"],
"f1_zh_score": ["multifieldqa_zh"],
"rouge_score": ["gov_report", "qmsum", "multi_news", "samsum"],
"rouge_zh_score": ["dureader", "vcsum"],
"retrieval_score": ["passage_retrieval_en"],
"retrieval_zh_score": ["passage_retrieval_zh"],
"classification_score": ["trec", "lsht"],
"code_sim_score": ["lcc", "repobench-p"],
"count_score": ["passage_count"],
"perplexity": ["ALL"],
"ppl_score": ["ALL"],
},
"MMLUDataset": {
"first_token_accuracy": ["ALL"],
"single_choice_accuracy": ["ALL"],
"accuracy": ["ALL"],
"perplexity": ["ALL"],
"ppl_score_over_choices": ["ALL"],
"ppl_score": ["ALL"],
},
"MTBenchDataset": {"mtbench_single_judge": ["ALL"]},
"CValuesDataset": {"first_token_accuracy": ["ALL"]},
"SafetyBenchZHDataset": {"first_token_accuracy": ["ALL"]},
"SafetyBenchENDataset": {"first_token_accuracy": ["ALL"]},
"GSMDataset": {
"loss_over_all_tokens": ["ALL"],
"gsm_accuracy": ["ALL"],
},
}
def _fix_fracs(string):
substrs = string.split("\\frac")
new_str = substrs[0]
if len(substrs) > 1:
substrs = substrs[1:]
for substr in substrs:
new_str += "\\frac"
if substr[0] == "{":
new_str += substr
else:
try:
assert len(substr) >= 2
except:
return string
a = substr[0]
b = substr[1]
if b != "{":
if len(substr) > 2:
post_substr = substr[2:]
new_str += "{" + a + "}{" + b + "}" + post_substr
else:
new_str += "{" + a + "}{" + b + "}"
else:
if len(substr) > 2:
post_substr = substr[2:]
new_str += "{" + a + "}" + b + post_substr
else:
new_str += "{" + a + "}" + b
string = new_str
return string
def _fix_a_slash_b(string):
if len(string.split("/")) != 2:
return string
a = string.split("/")[0]
b = string.split("/")[1]
try:
a = int(a)
b = int(b)
assert string == "{}/{}".format(a, b)
new_string = "\\frac{" + str(a) + "}{" + str(b) + "}"
return new_string
except:
return string
def _remove_right_units(string):
# "\\text{ " only ever occurs (at least in the val set) when describing units
if "\\text{ " in string:
splits = string.split("\\text{ ")
assert len(splits) == 2
return splits[0]
else:
return string
def _fix_sqrt(string):
if "\\sqrt" not in string:
return string
splits = string.split("\\sqrt")
new_string = splits[0]
for split in splits[1:]:
if split[0] != "{":
a = split[0]
new_substr = "\\sqrt{" + a + "}" + split[1:]
else:
new_substr = "\\sqrt" + split
new_string += new_substr
return new_string
def _strip_string(string):
# linebreaks
string = string.replace("\n", "")
# print(string)
# remove inverse spaces
string = string.replace("\\!", "")
# print(string)
# replace \\ with \
string = string.replace("\\\\", "\\")
# print(string)
# replace tfrac and dfrac with frac
string = string.replace("tfrac", "frac")
string = string.replace("dfrac", "frac")
# print(string)
# remove \left and \right
string = string.replace("\\left", "")
string = string.replace("\\right", "")
# print(string)
# Remove circ (degrees)
string = string.replace("^{\\circ}", "")
string = string.replace("^\\circ", "")
# remove dollar signs
string = string.replace("\\$", "")
# remove units (on the right)
string = _remove_right_units(string)
# remove percentage
string = string.replace("\\%", "")
string = string.replace("\%", "")
# " 0." equivalent to " ." and "{0." equivalent to "{." Alternatively, add "0" if "." is the start of the string
string = string.replace(" .", " 0.")
string = string.replace("{.", "{0.")
# if empty, return empty string
if len(string) == 0:
return string
if string[0] == ".":
string = "0" + string
# to consider: get rid of e.g. "k = " or "q = " at beginning
if len(string.split("=")) == 2:
if len(string.split("=")[0]) <= 2:
string = string.split("=")[1]
# fix sqrt3 --> sqrt{3}
string = _fix_sqrt(string)
# remove spaces
string = string.replace(" ", "")
# \frac1b or \frac12 --> \frac{1}{b} and \frac{1}{2}, etc. Even works with \frac1{72} (but not \frac{72}1). Also does a/b --> \\frac{a}{b}
string = _fix_fracs(string)
# manually change 0.5 --> \frac{1}{2}
if string == "0.5":
string = "\\frac{1}{2}"
# NOTE: X/Y changed to \frac{X}{Y} in dataset, but in simple cases fix in case the model output is X/Y
string = _fix_a_slash_b(string)
return string
def parse_math_answer(raw_string):
def remove_boxed(s):
left = "\\boxed{"
try:
assert s[: len(left)] == left
assert s[-1] == "}"
answer = s[len(left) : -1]
if "=" in answer:
answer = answer.split("=")[-1].lstrip(" ")
return answer
except:
return None
def last_boxed_only_string(string):
idx = string.rfind("\\boxed")
if idx < 0:
idx = string.rfind("\\fbox")
if idx < 0:
return None
i = idx
right_brace_idx = None
num_left_braces_open = 0
while i < len(string):
if string[i] == "{":
num_left_braces_open += 1
if string[i] == "}":
num_left_braces_open -= 1
if num_left_braces_open == 0:
right_brace_idx = i
break
i += 1
if right_brace_idx == None:
retval = None
else:
retval = string[idx : right_brace_idx + 1]
return retval
def get_answer_with_dollar_sign(s):
first_pattern = "\$(.*)\$"
last_match = None
matches = re.findall(first_pattern, s)
if matches:
last_match = matches[-1]
if "=" in last_match:
last_match = last_match.split("=")[-1].lstrip(" ")
return last_match
def get_answer_without_dollar_sign(s):
last_match = None
if "=" in s:
last_match = s.split("=")[-1].lstrip(" ").rstrip(".")
if "\\n" in last_match:
last_match = last_match.split("\\n")[0]
else:
pattern = "(?:\\$)?\d+(?:\.\d+)?(?![\w\d])"
matches = re.findall(pattern, s)
if matches:
last_match = matches[-1]
return last_match
if "\\boxed" in raw_string:
answer = remove_boxed(last_boxed_only_string(raw_string))
else:
answer = get_answer_with_dollar_sign(raw_string)
if not answer:
answer = get_answer_without_dollar_sign(raw_string)
return answer
def math_equivalence(prediction, reference, **kwargs):
prediction = parse_math_answer(prediction)
if prediction is None and reference is None:
print("WARNING: Both None")
return False
if prediction is None or reference is None:
return False
try:
ss1 = _strip_string(prediction)
ss2 = _strip_string(reference)
return ss1 == ss2
except:
return prediction == reference
def multi_choice_accuracy(prediction, reference, **kwargs):
# Only find uppercase letters not surrounded by lowercase letters
all_classes = kwargs.get("all_classes", None)
if all_classes:
pattern = f"(?<![a-z])[{all_classes[0]}-{all_classes[-1]}](?![a-z])"
else:
pattern = "(?<![a-z])[A-F](?![a-z])"
prediction = re.findall(pattern, prediction)
reference = re.findall(pattern, reference)
prediction_set = set(prediction)
reference_set = set(reference)
score = 0.0
for p in prediction_set:
if p not in reference_set:
return 0.0
else:
score += 1 / len(reference_set)
return score
def accuracy_by_options(question, prediction, reference):
pattern = r"[A-Z]\. [^\n]+"
options = re.findall(pattern, question)
answer = prediction.split("\n\n")[0]
for option in options:
choice, content = option.split(". ", 1)
if choice == reference and content == answer:
return 1
return 0
def combined_single_choice_accuracy(prediction, reference, **kwargs):
return single_choice_accuracy(prediction, reference, **kwargs)
def single_choice_accuracy(prediction, reference, **kwargs):
# Only find uppercase letters not surrounded by lowercase letters
all_classes = kwargs.get("all_classes", None)
if all_classes:
pattern = f"(?<![a-z])[{all_classes[0]}-{all_classes[-1]}](?![a-z])"
else:
pattern = "(?<![a-z])[A-F](?![a-z])"
prediction = re.findall(pattern, prediction)[0:1]
reference = re.findall(pattern, reference)
assert len(reference) == 1
prediction_set = set(prediction)
reference_set = set(reference)
if prediction_set == reference_set:
return 1.0
return 0.0
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def normalize_zh_answer(s):
"""Lower text and remove punctuation, extra whitespace."""
def white_space_fix(text):
return "".join(text.split())
def remove_punc(text):
cn_punctuation = "!?。。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏."
all_punctuation = set(string.punctuation + cn_punctuation)
return "".join(ch for ch in text if ch not in all_punctuation)
def lower(text):
return text.lower()
return white_space_fix(remove_punc(lower(s)))
def count_score(prediction, reference, **kwargs):
numbers = re.findall(r"\d+", prediction)
right_num = 0
for number in numbers:
if str(number) == str(reference):
right_num += 1
final_score = 0.0 if len(numbers) == 0 else right_num / len(numbers)
return float(final_score)
def retrieval_score(prediction, reference, **kwargs):
pattern = r"Paragraph (\d+)"
matches = re.findall(pattern, reference)
ground_truth_id = matches[0]
numbers = re.findall(r"\d+", prediction)
right_num = 0
for number in numbers:
if str(number) == str(ground_truth_id):
right_num += 1
final_score = 0.0 if len(numbers) == 0 else right_num / len(numbers)
return float(final_score)
def retrieval_zh_score(prediction, reference, **kwargs):
pattern = r"段落(\d+)"
matches = re.findall(pattern, reference)
ground_truth_id = matches[0]
numbers = re.findall(r"\d+", prediction)
right_num = 0
for number in numbers:
if str(number) == str(ground_truth_id):
right_num += 1
final_score = 0.0 if len(numbers) == 0 else right_num / len(numbers)
return float(final_score)
def code_sim_score(prediction, reference, **kwargs):
all_lines = prediction.lstrip("\n").split("\n")
prediction = ""
for line in all_lines:
if ("`" not in line) and ("#" not in line) and ("//" not in line):
prediction = line
break
return fuzz.ratio(prediction, reference) / 100
def classification_score(prediction, reference, **kwargs):
em_match_list = []
all_classes = kwargs["all_classes"]
for class_name in all_classes:
if class_name in prediction:
em_match_list.append(class_name)
for match_term in em_match_list:
if match_term in reference and match_term != reference:
em_match_list.remove(match_term)
if em_match_list != 0:
if reference in em_match_list:
score = 1.0 / len(em_match_list)
else:
score = 0.0
else:
best_match = None
highest_similarity = 0
for string in all_classes:
similarity = difflib.SequenceMatcher(None, string, prediction).ratio()
if similarity > highest_similarity:
highest_similarity = similarity
best_match = string
score = float(best_match == reference)
return score
def rouge_score(prediction, reference, **kwargs):
rouge = Rouge()
try:
scores = rouge.get_scores([prediction], [reference], avg=True)
except:
return 0.0
return scores["rouge-l"]["f"]
def rouge_zh_score(prediction, reference, **kwargs):
prediction = " ".join(list(jieba.cut(prediction, cut_all=False)))
reference = " ".join(list(jieba.cut(reference, cut_all=False)))
score = rouge_score(prediction, reference)
return score
def _f1_score(prediction, reference, **kwargs):
common = Counter(prediction) & Counter(reference)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction)
recall = 1.0 * num_same / len(reference)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def f1_score(prediction, reference, **kwargs):
normalized_prediction = normalize_answer(prediction)
normalized_ground_truth = normalize_answer(reference)
prediction_tokens = normalized_prediction.split()
ground_truth_tokens = normalized_ground_truth.split()
return _f1_score(prediction_tokens, ground_truth_tokens)
def f1_zh_score(prediction, reference, **kwargs):
prediction_tokens = list(jieba.cut(prediction, cut_all=False))
ground_truth_tokens = list(jieba.cut(reference, cut_all=False))
prediction_tokens = [normalize_zh_answer(token) for token in prediction_tokens]
ground_truth_tokens = [normalize_zh_answer(token) for token in ground_truth_tokens]
prediction_tokens = [token for token in prediction_tokens if len(token) > 0]
ground_truth_tokens = [token for token in ground_truth_tokens if len(token) > 0]
return _f1_score(prediction_tokens, ground_truth_tokens)
def extract_answer_hf(completion):
match = ANS_RE.search(completion)
if match:
match_str = match.group(1).strip()
match_str = match_str.replace(",", "")
return eval(match_str)
else:
return INVALID_ANS
def get_match_str(match, idx):
match_str = match[idx]
match_str = match_str.replace(",", "")
if match_str.endswith("."):
match_str = match_str[:-1]
if match_str.endswith(".00"):
match_str = match_str[:-3]
if match_str.endswith(".0"):
match_str = match_str[:-2]
return match_str
def extract_answer(completion):
match1 = re.findall(ans_re1, completion)
match2 = re.findall(ans_re2, completion)
ans = []
if match1:
match_str1 = get_match_str(match1, -1)
ans.append(match_str1)
if match2:
match_str2 = get_match_str(match2, -1).replace("$", "")
ans.append(match_str2)
answer = INVALID_ANS
try:
if len(ans) > 0:
answer = eval(ans[-1])
except Exception as e:
print(e)
return answer
return answer
def is_correct(completion, answer):
gold = extract_answer_hf(answer)
assert gold != INVALID_ANS, "No ground truth answer found in the document."
completion = completion.split("answer is")[-1]
return extract_answer(completion) == gold
def gsm_accuracy(prediction, reference, **kwargs):
prediction = prediction.split("\n\n\n")[0]
prediction = prediction.split("\n\n")[0]
prediction = prediction.split("Question:")[0]
return 1.0 if is_correct(prediction, reference) else 0.0
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalEval/colossal_eval/evaluate/dataset_evaluator/gpt_judge.py | applications/ColossalEval/colossal_eval/evaluate/dataset_evaluator/gpt_judge.py | # Code adapted from https://github.com/lm-sys/FastChat/tree/main/fastchat/llm_judge
import ast
import concurrent.futures
import copy
import json
import os
import re
import time
from typing import Any, Dict, List
import numpy as np
import openai
import tqdm
MODEL = "gpt-4"
API_MAX_RETRY = 16
API_RETRY_SLEEP = 10
API_ERROR_OUTPUT = "$ERROR$"
NEED_REF_CATS = ["math", "reasoning", "coding"]
one_score_pattern = re.compile("\[\[(\d+\.?\d*)\]\]")
one_score_pattern_backup = re.compile("\[(\d+\.?\d*)\]")
def load_mt_prompts(prompt_file: str):
prompts = {}
with open(prompt_file) as fin:
for line in fin:
line = json.loads(line)
prompts[line["name"]] = line
return prompts
def get_mt_prompt(prompts: Dict[str, str], multiturn: bool, math: bool):
if math and multiturn:
return prompts["single-math-v1-multi-turn"]
elif math and not multiturn:
return prompts["single-math-v1"]
elif not math and multiturn:
return prompts["single-v1-multi-turn"]
elif not math and not multiturn:
return prompts["single-v1"]
def chat_compeletion_openai(messages: List[Dict], temperature: float = 0.0, max_tokens: int = 2048):
output = API_ERROR_OUTPUT
model = MODEL
for _ in range(API_MAX_RETRY):
try:
response = openai.ChatCompletion.create(
model=model,
messages=messages,
n=1,
temperature=temperature,
max_tokens=max_tokens,
)
output = response["choices"][0]["message"]["content"]
break
except openai.error.OpenAIError as e:
print(type(e), e)
time.sleep(API_RETRY_SLEEP)
return output
def get_mtbench_judgements(question: Dict[str, Any], prompts: Dict[str, str]):
id = question["id"]
judgement = {"id": id, "judgements": [], "ratings": []}
category = question["category"]
math = category in NEED_REF_CATS
turn_number = len(question["instruction"])
for num in range(turn_number):
assert (len(question["target"]) >= 1 and math) or not math
kwargs = {}
if num >= 1:
prompt = get_mt_prompt(prompts, multiturn=True, math=math)
if len(question["target"]) >= 1 and math:
kwargs = {f"ref_answer_{i+1}": question["target"][i] for i in range(len(question["target"]))}
user_prompt = prompt["prompt_template"].format(
question_1=question["instruction"][0],
question_2=question["instruction"][1],
answer_1=question["output"][0],
answer_2=question["output"][1],
**kwargs,
)
else:
prompt = get_mt_prompt(prompts, multiturn=False, math=math)
if len(question["target"]) >= 1 and math:
kwargs = {"ref_answer_1": question["target"][0]}
user_prompt = prompt["prompt_template"].format(
question=question["instruction"][0],
answer=question["output"][0],
**kwargs,
)
rating = -1
sys_prompt = prompt["system_prompt"]
messages = [{"role": "system", "content": sys_prompt}, {"role": "user", "content": user_prompt}]
judgement_str = chat_compeletion_openai(messages, temperature=0.0, max_tokens=2048)
match = re.search(one_score_pattern, judgement_str)
if not match:
match = re.search(one_score_pattern_backup, judgement_str)
if match:
rating = ast.literal_eval(match.groups()[0])
else:
rating = -1
judgement["judgements"].append(judgement_str)
judgement["ratings"].append(rating)
return judgement
def mtbench_single_judge(data: List[Dict], config_path: str):
judgements = []
prompt_dir = os.path.dirname(config_path)
prompts = load_mt_prompts(os.path.join(prompt_dir, "mtbench_judge_prompts.jsonl"))
with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:
futures = []
for i, question in enumerate(data):
future = executor.submit(get_mtbench_judgements, question, prompts)
futures.append(future)
for future in tqdm.tqdm(
concurrent.futures.as_completed(futures),
desc=f"MTBench single judge for {data[0]['category']}",
total=len(futures),
):
judgements.append(future.result())
judgements.sort(key=lambda x: x["id"])
judgements_by_id = {j["id"]: j for j in judgements}
data_to_dump = copy.deepcopy(data)
for d in data_to_dump:
id = d["id"]
d["judgements"] = judgements_by_id[id]["judgements"]
d["ratings"] = judgements_by_id[id]["ratings"]
avg_ratings = np.mean([j["ratings"] for j in judgements], axis=0)
return data_to_dump, avg_ratings
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalEval/colossal_eval/evaluate/dataset_evaluator/__init__.py | applications/ColossalEval/colossal_eval/evaluate/dataset_evaluator/__init__.py | from .dataset_evaluator import DatasetEvaluator
__all__ = ["DatasetEvaluator"]
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalEval/colossal_eval/evaluate/dataset_evaluator/dataset_evaluator.py | applications/ColossalEval/colossal_eval/evaluate/dataset_evaluator/dataset_evaluator.py | import os
from typing import Dict, List, Union
import colossal_eval.evaluate.dataset_evaluator.metrics as metric_helper
import numpy as np
import tqdm
from colossal_eval.utils import jdump
import colossal_eval.evaluate.dataset_evaluator.gpt_judge as gpt_helper # noqa
LabelBasedMetrics = ["first_token_accuracy", "matthews_correlation"]
LossBasedMetrics = [
"perplexity",
"ppl_score",
"ppl_score_over_choices",
"per_byte_perplexity",
"per_byte_ppl_score",
"loss_over_all_tokens",
]
CombinedMetrics = ["combined_single_choice_accuracy"]
GPTMetrics = ["mtbench_single_judge"]
OtherMetrics = [
"f1_score",
"f1_zh_score",
"rouge_score",
"rouge_zh_score",
"retrieval_score",
"retrieval_zh_score",
"classification_score",
"code_sim_score",
"count_score",
"multi_choice_accuracy",
"math_equivalence",
"single_choice_accuracy",
"gsm_accuracy",
]
class DatasetEvaluator(object):
"""
Dataset evaluator.
"""
def __init__(self, config_path: str, save_path: str):
self.config_path = config_path
self.save_path = save_path
def _calculate_label_metrics(self, metric: str, category: str):
"""Calculate label-based metrics."""
weight = len(self.data[category]["data"]) / self.metric_total_length[metric]
str_label_map = {
choice: idx for idx, choice in enumerate(self.data[category]["inference_kwargs"]["all_classes"])
}
references = [str_label_map[sample["target"]] for sample in self.data[category]["data"]]
[sample["output"] for sample in self.data[category]["data"]]
flag = False
logits = []
for i, sample in enumerate(self.data[category]["data"]):
if np.any(np.isnan(np.array(list(sample["logits_over_choices"].values())))):
if not flag:
print(
f"NaN in the logits, switch to exact match for category {category} in dataset {self.dataset_name} in model {self.model_name}."
)
flag = True
score = 0
for ref in sample["target"]:
score = max(
score,
metric_helper.single_choice_accuracy(
sample["output"], ref, all_classes=self.data[category]["inference_kwargs"]["all_classes"]
),
)
score = max(
score,
metric_helper.accuracy_by_options(sample["input"], sample["output"], ref),
)
logits.append(references[i] if score == 1 else -1)
else:
logits.append(np.argmax(np.array(list(sample["logits_over_choices"].values()))))
references = np.array(references)
logits = np.array(logits)
scores = np.sum(references == logits) / len(self.data[category]["data"]) * 100
self.evaluation_results[metric][category] = (scores, len(self.data[category]["data"]))
self.evaluation_results[metric]["ALL"] += scores * weight
def _calculate_combined_metrics(self, metric: str, category: str):
"""Calculate combined metrics."""
weight = len(self.data[category]["data"]) / self.metric_total_length[metric]
references = [sample["target"] for sample in self.data[category]["data"]]
predictions = [sample["output"] for sample in self.data[category]["data"]]
str_label_map = {
choice: idx for idx, choice in enumerate(self.data[category]["inference_kwargs"]["all_classes"])
}
references_labels = [str_label_map[sample["target"][0]] for sample in self.data[category]["data"]]
predictions = [sample["output"] for sample in self.data[category]["data"]]
flag = False
logits = []
for i, sample in enumerate(self.data[category]["data"]):
if np.any(np.isnan(np.array(list(sample["logits_over_choices"].values())))):
if not flag:
print(
f"NaN in the logits, switch to exact match for category {category} in dataset {self.dataset_name} in model {self.model_name}."
)
flag = True
score = 0
for ref in sample["target"]:
score = max(
score,
metric_helper.single_choice_accuracy(
sample["output"], ref, all_classes=self.data[category]["inference_kwargs"]["all_classes"]
),
)
logits.append(references[i] if score == 1 else -1)
else:
logits.append(np.argmax(np.array(list(sample["logits_over_choices"].values()))))
metric_method = eval("metric_helper." + metric)
total_score = 0.0
for prediction, reference, references_label, softmax in zip(predictions, references, references_labels, logits):
score = 0.0
for ref in reference:
score = max(
score,
metric_method(prediction, ref, all_classes=self.data[category]["inference_kwargs"]["all_classes"]),
)
if references_label == softmax:
score = 1
total_score += score
total_score = total_score * 100 / len(self.data[category]["data"])
self.evaluation_results[metric][category] = (total_score, len(self.data[category]["data"]))
self.evaluation_results[metric]["ALL"] += total_score * weight
def _calculate_other_metrics(self, metric: str, category: str):
"""Calculate other metrics."""
weight = len(self.data[category]["data"]) / self.metric_total_length[metric]
references = [
sample["target"] if isinstance(sample["target"], list) else [sample["target"]]
for sample in self.data[category]["data"]
]
predictions = [sample["output"] for sample in self.data[category]["data"]]
metric_method = eval("metric_helper." + metric)
total_score = 0.0
for prediction, reference in zip(predictions, references):
score = 0.0
for ref in reference:
score = max(
score,
metric_method(prediction, ref, all_classes=self.data[category]["inference_kwargs"]["all_classes"]),
)
total_score += score
total_score = total_score * 100 / len(predictions)
self.evaluation_results[metric][category] = (total_score, len(self.data[category]["data"]))
self.evaluation_results[metric]["ALL"] += total_score * weight
def _calculate_gpt_metrics(self, metric: str, category: str):
"""Calculate gpt metrics."""
weight = len(self.data[category]["data"]) / self.metric_total_length[metric]
metric_method = eval("gpt_helper." + metric)
judgements, avg_ratings = metric_method(self.data[category]["data"], self.config_path)
self.judgements[category] = judgements
self.evaluation_results[metric][category] = (np.mean(avg_ratings), len(self.data[category]["data"]))
self.evaluation_results[metric]["ALL"] += np.mean(avg_ratings) * weight
for i in range(avg_ratings.shape[0]):
if f"{metric}_{i+1}" not in self.evaluation_results:
self.evaluation_results[f"{metric}_{i+1}"] = {cat: 0 for cat in (["ALL"] + self.categories)}
self.evaluation_results[f"{metric}_{i+1}"][category] = (avg_ratings[i], len(self.data[category]["data"]))
self.evaluation_results[f"{metric}_{i+1}"]["ALL"] += avg_ratings[i] * weight
def _calculate_loss_metrics(self, metric: str, category: str):
"""Calculate perplexity."""
if metric == "perplexity":
weight = len(self.data[category]["data"]) / self.metric_total_length[metric]
losses = [min(sample["loss"]) for sample in self.data[category]["data"]]
perplexity = np.mean(np.exp(np.array(losses)))
self.evaluation_results["perplexity"][category] = (perplexity, len(self.data[category]["data"]))
self.evaluation_results["perplexity"]["ALL"] += perplexity * weight
elif metric == "ppl_score":
weight = len(self.data[category]["data"]) / self.metric_total_length[metric]
losses = [min(sample["loss"]) for sample in self.data[category]["data"]]
perplexity_score = np.mean(np.exp(-np.array(losses))) * 100
self.evaluation_results["ppl_score"][category] = (perplexity_score, len(self.data[category]["data"]))
self.evaluation_results["ppl_score"]["ALL"] += perplexity_score * weight
elif metric == "ppl_score_over_choices" and self.data[category]["inference_kwargs"]["all_classes"] is not None:
weight = len(self.data[category]["data"]) / self.metric_total_length[metric]
loss_over_choices = [sample["loss_over_choices"] for sample in self.data[category]["data"]]
perplexity_score_over_choices = np.mean(np.exp(-np.array(loss_over_choices))) * 100
self.evaluation_results["ppl_score_over_choices"][category] = (
perplexity_score_over_choices,
len(self.data[category]["data"]),
)
self.evaluation_results["ppl_score_over_choices"]["ALL"] += perplexity_score_over_choices * weight
elif metric == "per_byte_perplexity":
weight = len(self.data[category]["data"]) / self.metric_total_length[metric]
losses = [min(sample["loss_sum"]) for sample in self.data[category]["data"]]
perplexity = np.mean(np.exp(np.array(losses) / np.array(self.N_bytes[category])))
self.evaluation_results["per_byte_perplexity"][category] = perplexity
self.evaluation_results["per_byte_perplexity"]["ALL"] += perplexity * weight
elif metric == "per_byte_ppl_score":
weight = len(self.data[category]["data"]) / self.metric_total_length[metric]
losses = [min(sample["loss_sum"]) for sample in self.data[category]["data"]]
perplexity_score = np.mean(np.exp(-np.array(losses) / np.array(self.N_bytes[category]))) * 100
self.evaluation_results["per_byte_ppl_score"][category] = perplexity_score
self.evaluation_results["per_byte_ppl_score"]["ALL"] += perplexity_score * weight
elif metric == "loss_over_all_tokens":
weight = len(self.data[category]["data"]) / self.metric_total_length[metric]
losses = [min(sample["loss_sum"]) for sample in self.data[category]["data"]]
token_nums = [sample["token_num"][np.argmin(sample["loss_sum"])] for sample in self.data[category]["data"]]
perplexity = np.sum(np.array(losses)) / np.sum(np.array(token_nums))
self.evaluation_results["loss_over_all_tokens"][category] = perplexity
self.evaluation_results["loss_over_all_tokens"]["ALL"] += perplexity * weight
# The number of tokens can be used for normalizing.
# See https://github.com/SkyworkAI/Skywork/issues/43#issuecomment-1811733834
print(f"{self.model_name} {category} token num: {np.sum(np.array(token_nums))}")
def _evaluate(self):
"""Calculate and return evaluation results"""
for metric in self.metrics:
pbar = tqdm.tqdm(
desc=f"{self.dataset_name}-{metric}-{self.model_name}", total=len(self.suggested_categories[metric])
)
if metric in LabelBasedMetrics:
for category in self.suggested_categories[metric]:
self._calculate_label_metrics(metric, category)
pbar.update(1)
elif metric in LossBasedMetrics:
for category in self.suggested_categories[metric]:
self._calculate_loss_metrics(metric, category)
pbar.update(1)
elif metric in CombinedMetrics:
for category in self.suggested_categories[metric]:
self._calculate_combined_metrics(metric, category)
pbar.update(1)
elif metric in GPTMetrics:
for category in self.suggested_categories[metric]:
self._calculate_gpt_metrics(metric, category)
pbar.update(1)
elif metric in OtherMetrics:
for category in self.suggested_categories[metric]:
self._calculate_other_metrics(metric, category)
pbar.update(1)
else:
raise Exception(f"{metric} not supported.")
if self.judgements:
judgement_path = os.path.join(self.save_path, f"{self.model_name}_judgements.json")
jdump(self.judgements, judgement_path)
return self.evaluation_results
def get_evaluation_results(
self, data: Dict[str, Union[str, Dict]], dataset_name: str, model_name: str, metrics: List[str]
):
"""
Evaluate inference data on the given metrics.
Args:
data: Data to be evaluated.
dataset_name: Name of the dataset
model_name: Name of the model
metrics: Metrics used to evaluate.
"""
self.data = data["inference_results"]
self.dataset_name = dataset_name
self.dataset_class = data["dataset_class"]
self.model_name = model_name
self.categories = list(self.data.keys())
self.metrics = metrics
self.judgements = {}
self.evaluation_results = {
metric: {category: 0 for category in (["ALL"] + self.categories)} for metric in self.metrics
}
self.total_length = 0
self.total_single_choices = 0
for value in self.data.values():
self.total_length += len(value["data"])
if value["inference_kwargs"]["all_classes"] is not None:
self.total_single_choices += len(value["data"])
self.metric_total_length = {metric: 0 for metric in self.metrics}
self.suggested_categories = {metric: [] for metric in self.metrics}
for metric in self.metrics:
# Train and reference split use same metric as test split.
self.suggested_categories[metric] = metric_helper.metrics4subcategory[self.dataset_class][metric]
if "ALL" in self.suggested_categories[metric]:
self.suggested_categories[metric] = self.categories
self.metric_total_length[metric] = self.total_length
continue
for category in self.suggested_categories[metric]:
self.metric_total_length[metric] += len(self.data[category]["data"])
if "per_byte_perplexity" in self.metrics or "per_byte_ppl_score" in self.metrics:
self.N_bytes = {category: [] for category in self.categories}
for category in self.categories:
samples = self.data[category]["data"]
for sample in samples:
self.N_bytes[category].append(sample["byte_num"][0])
return self._evaluate()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalEval/examples/dataset_evaluation/inference.py | applications/ColossalEval/examples/dataset_evaluation/inference.py | import argparse
import copy
import os
from typing import Dict, List
import torch.distributed as dist
from colossal_eval import dataset, models, utils
from colossal_eval.dataset.base import DistributedDataset
from torch.utils.data import DataLoader, DistributedSampler
import colossalai
from colossalai.accelerator import get_accelerator
from colossalai.cluster import ProcessGroupMesh
from colossalai.logging import get_dist_logger
from colossalai.shardformer import ShardConfig
logger = get_dist_logger()
os.environ["TOKENIZERS_PARALLELISM"] = "false"
def rm_and_merge(
dp_size: int,
save_path: str,
model_names: List[str],
dataset_names: Dict[str, List],
dataset_classes: Dict[str, List],
) -> None:
"""
Remove inference result per rank and merge them into one file.
Args:
dp_size: Number of groups for data parallel.
save_path: The folder for storing inference results.
model_names: Names of models for inference.
dataset_names: Names of dataset for inference.
dataset_classes: Dataset class for different inference results. We need to save dataset class to smooth the evaluation process.
"""
for model_name in model_names:
for dataset_name, categories in dataset_names.items():
all_answers_with_dataset_class = {}
all_answers_with_dataset_class["dataset_class"] = dataset_classes[dataset_name]
all_answers = {}
for category in categories:
all_answers[category] = {"data": []}
answers = {"data": []}
for r in range(dp_size):
directory = os.path.join(
save_path, model_name, f"{dataset_name}_{category}_inference_results_dp_rank{r}.json"
)
if not os.path.exists(directory):
raise Exception(
f"Directory {directory} not found. There may be an error during inference time."
)
else:
rank_answers = utils.jload(directory)
deduplidate_answers = [x for x in rank_answers["data"] if x not in answers["data"]]
answers["data"].extend(deduplidate_answers)
answers["inference_kwargs"] = rank_answers["inference_kwargs"]
for r in range(dp_size):
try:
directory = os.path.join(
save_path, model_name, f"{dataset_name}_{category}_inference_results_dp_rank{r}.json"
)
os.remove(directory)
except Exception as e:
print(e)
all_answers[category] = answers
all_answers_with_dataset_class["inference_results"] = all_answers
logger.info(f"Save inference results of model {model_name} on dataset {dataset_name}.")
utils.jdump(
all_answers_with_dataset_class,
os.path.join(save_path, model_name, f"{dataset_name}_inference_results.json"),
)
logger.info(f"Save inference results of model {model_name} for all dataset.")
logger.info(f"Save inference results of all models for all dataset.")
def main(args):
colossalai.launch_from_torch(seed=42)
accelerator = get_accelerator()
world_size = dist.get_world_size()
rank = dist.get_rank()
DP_AXIS = 0
TP_AXIS = 1
dp_size = world_size // args.tp_size
if rank == 0:
logger.info("Setting TP and DP...")
logger.info(f"TP size: {args.tp_size}, DP size: {dp_size}")
if world_size % args.tp_size != 0:
raise Exception(
f"TP size is {args.tp_size} while world size is {world_size}! Please make sure world size is a multiple of TP size!"
)
pg_mesh = ProcessGroupMesh(dp_size, args.tp_size)
tp_group = pg_mesh.get_group_along_axis(TP_AXIS)
coordinates = pg_mesh._coord
dp_rank = coordinates[DP_AXIS]
tp_rank = coordinates[TP_AXIS]
shard_config = (
ShardConfig(
tensor_parallel_process_group=tp_group,
enable_tensor_parallelism=args.tp_size > 1,
parallel_output=False,
enable_all_optimization=True,
)
if args.tp_size > 1
else None
)
inference_data = {}
dataset_classes = {}
debug_args = {}
few_shot_args = {}
multiturn_args = {}
config = utils.jload(args.config)
model_parameters = config["model"]
dataset_parameters = config["dataset"]
for dataset_parameter in dataset_parameters:
path = dataset_parameter["path"]
save_path = dataset_parameter["save_path"]
dataset_name = dataset_parameter["name"]
debug_args[dataset_name] = dataset_parameter["debug"]
few_shot_args[dataset_name] = dataset_parameter["few_shot"]
forward_only = dataset_parameter.get("forward_only", False)
load_train = dataset_parameter.get("load_train", False)
load_reference = dataset_parameter.get("load_reference", False)
if not args.load_dataset:
if os.path.exists(save_path):
dataset_ = utils.jload(save_path)
inference_data[dataset_name] = dataset_["test"]
else:
raise Exception(
"Can't find the converted dataset. You may set load_dataset True to store the dataset first."
)
continue
dataset_classes[dataset_name] = dataset_parameter["dataset_class"]
dataset_class = eval(f"dataset.{dataset_parameter['dataset_class']}")
if not issubclass(dataset_class, dataset.BaseDataset):
raise ValueError(f"Dataset class {dataset_parameter['dataset_class']} is not a subclass of BaseDataset.")
dataset_ = dataset_class(path, logger, dataset_parameter["few_shot"], forward_only, load_train, load_reference)
dataset_.save(save_path)
if hasattr(dataset_, "multiturn") and dataset_.multiturn:
multiturn_args[dataset_name] = True
logger.info(f"{dataset_parameter['dataset_class']} is a multiturn dataset.")
else:
multiturn_args[dataset_name] = False
inference_data[dataset_name] = dataset_.dataset["test"]
if load_train and "train" in dataset_.dataset:
new_dataset_name = f"{dataset_name}_train"
debug_args[new_dataset_name] = dataset_parameter["debug"]
few_shot_args[new_dataset_name] = dataset_parameter["few_shot"]
inference_data[new_dataset_name] = dataset_.dataset["train"]
dataset_classes[new_dataset_name] = dataset_parameter["dataset_class"]
if load_reference and "reference" in dataset_.dataset:
new_dataset_name = f"{dataset_name}_reference"
debug_args[new_dataset_name] = dataset_parameter["debug"]
few_shot_args[new_dataset_name] = dataset_parameter["few_shot"]
inference_data[new_dataset_name] = dataset_.dataset["reference"]
dataset_classes[new_dataset_name] = dataset_parameter["dataset_class"]
if rank == 0:
logger.info(f"Dataset for inference are: {list(inference_data.keys())}")
for model_parameter in model_parameters:
model_name = model_parameter["name"]
model_class = eval(f"models.{model_parameter['model_class']}")
paramerters = model_parameter["parameters"]
batch_size = paramerters["batch_size"]
paramerters.update({"logger": logger})
paramerters.update({"prompt_template": utils.prompt_templates[paramerters["prompt_template"]]})
paramerters.update({"shard_config": shard_config})
model_ = model_class(**paramerters)
if not issubclass(model_class, models.BaseModel):
raise ValueError(f"Model class {model_parameter['model_class']} is not a subclass of BaseModel.")
for dataset_name, split_data in inference_data.items():
prev_questions = None
for category, category_data in split_data.items():
num_turn = category_data["inference_kwargs"].get("turns", 1)
if few_shot_args[dataset_name] and category_data["inference_kwargs"].get("few_shot_data", None) is None:
raise Exception(f"Dataset {dataset_name} doesn't have few-shot data for category {category}!")
answers_to_dump = copy.deepcopy(category_data)
for turn in range(num_turn):
if turn == 0:
dist_dataset = DistributedDataset(category_data["data"])
else:
dist_dataset = DistributedDataset(prev_questions)
sampler = DistributedSampler(
dist_dataset,
num_replicas=pg_mesh.size(DP_AXIS),
rank=pg_mesh.coordinate(DP_AXIS),
shuffle=False,
)
questions_loader = DataLoader(
dist_dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=8,
pin_memory=True,
collate_fn=lambda x: x,
)
category_data["inference_kwargs"]["dataset"] = dataset_name
category_data["inference_kwargs"]["category"] = category
answers_per_rank = model_.inference(
data_loader=questions_loader,
inference_kwargs=category_data["inference_kwargs"],
debug=debug_args[dataset_name],
)
prev_questions = answers_per_rank
answers_to_dump["data"] = answers_per_rank
if tp_rank == 0:
utils.jdump(
answers_to_dump,
os.path.join(
args.inference_save_path,
model_name,
f"{dataset_name}_{category}_inference_results_dp_rank{dp_rank}.json",
),
)
logger.info(f"Rank {rank} peak device mem: {accelerator.max_memory_allocated()/1024**3:.3f} GB")
del model_
accelerator.empty_cache()
dist.barrier()
if rank == 0:
model_names = [model_parameter["name"] for model_parameter in model_parameters]
dataset_names = {key: list(inference_data[key].keys()) for key in inference_data}
rm_and_merge(dp_size, args.inference_save_path, model_names, dataset_names, dataset_classes)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="ColossalEval inference process.")
parser.add_argument("--config", type=str, default=None, required=True, help="path to config file")
parser.add_argument("--load_dataset", default=False, action="store_true")
parser.add_argument("--inference_save_path", type=str, default=None, help="path to save inference results")
parser.add_argument("--tp_size", type=int, default=1, help="tensor parallel size, used for large model inference")
args = parser.parse_args()
main(args)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalEval/examples/dataset_evaluation/eval_dataset.py | applications/ColossalEval/examples/dataset_evaluation/eval_dataset.py | import argparse
import os
import tabulate
from colossal_eval.evaluate.dataset_evaluator import DatasetEvaluator
from colossal_eval.utils import jdump, jload
def main(args):
config = jload(args.config)
evaluation_results = {dataset["name"]: {} for dataset in config["dataset"]}
evaluation_results_table = {dataset["name"]: {} for dataset in config["dataset"]}
evaluator = DatasetEvaluator(args.config, args.evaluation_results_save_path)
for dataset_parameter in config["dataset"]:
dataset_name = dataset_parameter["name"]
metrics = dataset_parameter["metrics"]
results_metric_model = {metric: {model["name"]: None for model in config["model"]} for metric in metrics}
for model in config["model"]:
model_name = model["name"]
data = jload(
os.path.join(args.inference_results_path, model_name, f"{dataset_name}_inference_results.json")
)
results = evaluator.get_evaluation_results(data, dataset_name, model_name, metrics)
for metric, score in results.items():
if metric not in results_metric_model:
results_metric_model[metric] = {model["name"]: None for model in config["model"]}
results_metric_model[metric][model_name] = score["ALL"]
evaluation_results[dataset_name][model_name] = results
evaluation_results_table[dataset_name] = results_metric_model
table = []
header = ["dataset", "metric"] + [model["name"] for model in config["model"]]
table.append(header)
for dataset_parameter in config["dataset"]:
dataset_name = dataset_parameter["name"]
metrics = dataset_parameter["metrics"]
for metric, model_results in evaluation_results_table[dataset_name].items():
row = [dataset_name]
for model, score in model_results.items():
if len(row) == 1:
row.extend([metric, "{:.02f}".format(score)])
else:
row.append("{:.02f}".format(score))
table.append(row)
table = tabulate.tabulate(table, headers="firstrow")
print(table)
os.makedirs(args.evaluation_results_save_path, exist_ok=True)
with open(os.path.join(args.evaluation_results_save_path, "evaluation_results_table.txt"), "w") as file:
file.write(table)
jdump(evaluation_results, os.path.join(args.evaluation_results_save_path, "evaluation_results.json"))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="ColossalEval evaluation process.")
parser.add_argument("--config", type=str, default=None, required=True, help="path to config file")
parser.add_argument("--inference_results_path", type=str, default=None, help="path to inference results")
parser.add_argument(
"--evaluation_results_save_path", type=str, default=None, help="path to save evaluation results"
)
args = parser.parse_args()
main(args)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalEval/examples/gpt_evaluation/inference.py | applications/ColossalEval/examples/gpt_evaluation/inference.py | import argparse
import copy
import os
from typing import Dict, List
import torch
import torch.distributed as dist
from colossal_eval import dataset, models, utils
import colossalai
from colossalai.cluster import ProcessGroupMesh
from colossalai.logging import get_dist_logger
from colossalai.shardformer import ShardConfig
logger = get_dist_logger()
def rm_and_merge(
dp_size: int,
save_path: str,
model_names: List[str],
dataset_names: Dict[str, List],
dataset_classes: Dict[str, List],
) -> None:
"""
Remove inference result per rank and merge them into one file.
Args:
dp_size: Number of groups for data parallel.
save_path: The folder for storing inference results.
model_names: Names of models for inference.
dataset_names: Names of dataset for inference.
dataset_classes: Dataset class for different inference results. We need to save dataset class to smooth the evaluation process.
"""
for model_name in model_names:
for dataset_name, categories in dataset_names.items():
all_answers_with_dataset_class = {}
all_answers_with_dataset_class["dataset_class"] = dataset_classes[dataset_name]
all_answers = {}
for category in categories:
all_answers[category] = {"data": []}
answers = {"data": []}
for r in range(dp_size):
directory = os.path.join(
save_path, model_name, f"{dataset_name}_{category}_inference_results_dp_rank{r}.json"
)
if not os.path.exists(directory):
raise Exception(
f"Directory {directory} not found. There may be an error during inference time."
)
else:
rank_answers = utils.jload(directory)
answers["data"].extend(rank_answers["data"])
answers["inference_kwargs"] = rank_answers["inference_kwargs"]
for r in range(dp_size):
try:
directory = os.path.join(
save_path, model_name, f"{dataset_name}_{category}_inference_results_dp_rank{r}.json"
)
os.remove(directory)
except Exception as e:
print(e)
all_answers[category] = answers
all_answers_with_dataset_class["inference_results"] = all_answers
logger.info(f"Save inference results of model {model_name} on dataset {dataset_name}.")
utils.jdump(
all_answers_with_dataset_class,
os.path.join(save_path, model_name, f"{dataset_name}_inference_results.json"),
)
logger.info(f"Save inference results of model {model_name} for all dataset.")
logger.info(f"Save inference results of all models for all dataset.")
def main(args):
colossalai.launch_from_torch(seed=42)
world_size = dist.get_world_size()
rank = dist.get_rank()
DP_AXIS = 0
TP_AXIS = 1
dp_size = world_size // args.tp_size
if rank == 0:
logger.info("Setting TP and DP...")
logger.info(f"TP size: {args.tp_size}, DP size: {dp_size}")
if world_size % args.tp_size != 0:
raise Exception(
f"TP size is {args.tp_size} while world size is {world_size}! Please make sure world size is a multiple of TP size!"
)
pg_mesh = ProcessGroupMesh(dp_size, args.tp_size)
tp_group = pg_mesh.get_group_along_axis(TP_AXIS)
coordinates = pg_mesh._coord
dp_rank = coordinates[DP_AXIS]
tp_rank = coordinates[TP_AXIS]
shard_config = (
ShardConfig(tensor_parallel_process_group=tp_group, enable_tensor_parallelism=args.tp_size > 1)
if args.tp_size > 1
else None
)
inference_data = {}
dataset_classes = {}
debug_args = {}
few_shot_args = {}
multiturn_args = {}
config = utils.jload(args.config)
model_parameters = config["model"]
dataset_parameters = config["dataset"]
for dataset_parameter in dataset_parameters:
path = dataset_parameter["path"]
save_path = dataset_parameter["save_path"]
dataset_name = dataset_parameter["name"]
debug_args[dataset_name] = dataset_parameter["debug"]
few_shot_args[dataset_name] = dataset_parameter["few_shot"]
forward_only = dataset_parameter.get("forward_only", False)
load_train = dataset_parameter.get("load_train", False)
load_reference = dataset_parameter.get("load_reference", False)
if not args.load_dataset:
if os.path.exists(save_path):
dataset_ = utils.jload(save_path)
inference_data[dataset_name] = dataset_["test"]
else:
raise Exception(
"Can't find the converted dataset. You may set load_dataset True to store the dataset first."
)
continue
dataset_classes[dataset_name] = dataset_parameter["dataset_class"]
dataset_class = eval(f"dataset.{dataset_parameter['dataset_class']}")
if not issubclass(dataset_class, dataset.BaseDataset):
raise ValueError(f"Dataset class {dataset_parameter['dataset_class']} is not a subclass of BaseDataset.")
dataset_ = dataset_class(path, logger, dataset_parameter["few_shot"], forward_only, load_train, load_reference)
dataset_.save(save_path)
if hasattr(dataset_, "multiturn") and dataset_.multiturn:
multiturn_args[dataset_name] = True
logger.info(f"{dataset_parameter['dataset_class']} is a multiturn dataset.")
else:
multiturn_args[dataset_name] = False
inference_data[dataset_name] = dataset_.dataset["test"]
if load_train and "train" in dataset_.dataset:
new_dataset_name = f"{dataset_name}_train"
debug_args[new_dataset_name] = dataset_parameter["debug"]
few_shot_args[new_dataset_name] = dataset_parameter["few_shot"]
inference_data[new_dataset_name] = dataset_.dataset["train"]
dataset_classes[new_dataset_name] = dataset_parameter["dataset_class"]
if load_reference and "reference" in dataset_.dataset:
new_dataset_name = f"{dataset_name}_reference"
debug_args[new_dataset_name] = dataset_parameter["debug"]
few_shot_args[new_dataset_name] = dataset_parameter["few_shot"]
inference_data[new_dataset_name] = dataset_.dataset["reference"]
dataset_classes[new_dataset_name] = dataset_parameter["dataset_class"]
if rank == 0:
logger.info(f"Dataset for inference are: {list(inference_data.keys())}")
for model_parameter in model_parameters:
model_name = model_parameter["name"]
model_class = eval(f"models.{model_parameter['model_class']}")
paramerters = model_parameter["parameters"]
paramerters.update({"logger": logger})
paramerters.update({"prompt_template": utils.prompt_templates[paramerters["prompt_template"]]})
paramerters.update({"shard_config": shard_config})
model_ = model_class(**paramerters)
if not issubclass(model_class, models.BaseModel):
raise ValueError(f"Model class {model_parameter['model_class']} is not a subclass of BaseModel.")
for dataset_name, split_data in inference_data.items():
start = 0
prev_questions = None
for category, category_data in split_data.items():
num_turn = category_data["inference_kwargs"].get("turns", 1)
if few_shot_args[dataset_name] and category_data["inference_kwargs"].get("few_shot_data", None) is None:
raise Exception(f"Dataset {dataset_name} doesn't have few-shot data for category {category}!")
answers_to_dump = copy.deepcopy(category_data)
partition_size = len(category_data["data"]) // dp_size
redundant = len(category_data["data"]) % dp_size
# Ensure that the amount of data for inference is as consistent as possible across different processes.
lengths = [partition_size for _ in range(dp_size)]
for j in range(redundant):
lengths[(j + start) % dp_size] += 1
start = (start + redundant) % dp_size
for turn in range(num_turn):
if turn == 0:
questions = category_data["data"][
sum(lengths[0:dp_rank]) : sum(lengths[0:dp_rank]) + lengths[dp_rank]
]
else:
questions = prev_questions
answers_per_rank = model_.inference(
questions, inference_kwargs=category_data["inference_kwargs"], debug=debug_args[dataset_name]
)
prev_questions = answers_per_rank
answers_to_dump["data"] = answers_per_rank
if tp_rank == 0:
utils.jdump(
answers_to_dump,
os.path.join(
args.inference_save_path,
model_name,
f"{dataset_name}_{category}_inference_results_dp_rank{dp_rank}.json",
),
)
logger.info(f"Rank {rank} peak CUDA mem: {torch.cuda.max_memory_allocated()/1024**3:.3f} GB")
del model_
torch.cuda.empty_cache()
dist.barrier()
if rank == 0:
model_names = [model_parameter["name"] for model_parameter in model_parameters]
dataset_names = {key: list(inference_data[key].keys()) for key in inference_data}
rm_and_merge(dp_size, args.inference_save_path, model_names, dataset_names, dataset_classes)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="ColossalEval inference process.")
parser.add_argument("--config", type=str, default=None, required=True, help="path to config file")
parser.add_argument("--load_dataset", default=False, action="store_true")
parser.add_argument("--inference_save_path", type=str, default=None, help="path to save inference results")
parser.add_argument("--tp_size", type=int, default=1, help="tensor parallel size, used for large model inference")
args = parser.parse_args()
main(args)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalEval/examples/gpt_evaluation/eval.py | applications/ColossalEval/examples/gpt_evaluation/eval.py | import argparse
import os
import openai
from colossal_eval.evaluate.evaluator import Evaluator
from colossal_eval.utils import jload
def main(args):
assert len(args.answer_file_list) == len(
args.model_name_list
), "The number of answer files and model names should be equal!"
# load config
config = jload(args.config_file)
if config["language"] in ["cn", "en"]:
# get metric settings for all categories
metrics_per_category = {}
for category in config["category"].keys():
metrics_all = {}
for metric_type, metrics in config["category"][category].items():
metrics_all[metric_type] = metrics
metrics_per_category[category] = metrics_all
battle_prompt = None
if args.battle_prompt_file:
battle_prompt = jload(args.battle_prompt_file)
gpt_evaluation_prompt = None
if args.gpt_evaluation_prompt_file:
gpt_evaluation_prompt = jload(args.gpt_evaluation_prompt_file)
if len(args.model_name_list) == 2 and not battle_prompt:
raise Exception("No prompt file for battle provided. Please specify the prompt file for battle!")
if len(args.model_name_list) == 1 and not gpt_evaluation_prompt:
raise Exception(
"No prompt file for gpt evaluation provided. Please specify the prompt file for gpt evaluation!"
)
if args.gpt_model == "text-davinci-003" and args.gpt_with_reference:
raise Exception(
"GPT evaluation with reference is not supported for text-davinci-003. You should specify chat models such as gpt-3.5-turbo or gpt-4."
)
# initialize evaluator
evaluator = Evaluator(
metrics_per_category,
battle_prompt,
gpt_evaluation_prompt,
args.gpt_model,
config["language"],
args.gpt_with_reference,
)
if len(args.model_name_list) == 2:
answers_1 = jload(args.answer_file_list[0])
answers_2 = jload(args.answer_file_list[1])
answers1 = []
for category, value in answers_1.items():
answers1.extend(value["data"])
answers2 = []
for category, value in answers_2.items():
answers2.extend(value["data"])
assert len(answers1) == len(answers2), "The number of answers for two models should be equal!"
evaluator.battle(answers1=answers1, answers2=answers2)
evaluator.save(args.save_path, args.model_name_list)
elif len(args.model_name_list) == 1:
targets = jload(args.target_file)
answers = jload(args.answer_file_list[0])
references = []
for category, value in targets["test"].items():
references.extend(value["data"])
predictions = []
for category, value in answers.items():
predictions.extend(value["data"])
assert len(references) == len(
predictions
), "The number of target answers and model answers should be equal!"
evaluator.evaluate(
answers=predictions, targets=references, save_path=args.save_path, model_name=args.model_name_list[0]
)
evaluator.save(args.save_path, args.model_name_list)
else:
raise ValueError("Unsupported number of answer files and model names!")
else:
raise ValueError(f'Unsupported language {config["language"]}!')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="ColossalAI LLM evaluation pipeline.")
parser.add_argument(
"--config_file", type=str, default=None, required=True, help="path to the file of target results"
)
parser.add_argument("--battle_prompt_file", type=str, default=None, help="path to the prompt file for battle")
parser.add_argument(
"--gpt_evaluation_prompt_file", type=str, default=None, help="path to the prompt file for gpt evaluation"
)
parser.add_argument("--target_file", type=str, default=None, help="path to the target answer (ground truth) file")
parser.add_argument(
"--answer_file_list",
type=str,
nargs="+",
default=[],
required=True,
help="path to the answer files of at most 2 models",
)
parser.add_argument(
"--model_name_list", type=str, nargs="+", default=[], required=True, help="the names of at most 2 models"
)
parser.add_argument(
"--gpt_model",
default="gpt-3.5-turbo-16k",
choices=["text-davinci-003", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4"],
help="which GPT model to use for evaluation",
)
parser.add_argument(
"--gpt_with_reference",
default=False,
action="store_true",
help="whether to include reference answer in gpt evaluation",
)
parser.add_argument("--save_path", type=str, default="results", help="path to save evaluation results")
parser.add_argument("--openai_key", type=str, default=None, required=True, help="Your openai key")
args = parser.parse_args()
if args.openai_key is not None:
os.environ["OPENAI_API_KEY"] = args.openai_key
openai.api_key = os.getenv("OPENAI_API_KEY")
main(args)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/setup.py | applications/ColossalQA/setup.py | from setuptools import find_packages, setup
def fetch_requirements(path):
with open(path, "r") as fd:
return [r.strip() for r in fd.readlines()]
def fetch_readme():
with open("README.md", encoding="utf-8") as f:
return f.read()
def fetch_version():
with open("version.txt", "r") as f:
return f.read().strip()
print(find_packages(exclude=("tests", "*.egg-info", "data", "examples")))
setup(
name="colossalqa",
version=fetch_version(),
packages=find_packages(exclude=("tests", "*.egg-info", "data", "examples")),
description="Colossal-AI powered retrieval QA",
long_description=fetch_readme(),
long_description_content_type="text/markdown",
license="Apache Software License 2.0",
url="https://github.com/hpcaitech/Coati",
install_requires=fetch_requirements("requirements.txt"),
python_requires=">=3.6",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Environment :: GPU :: NVIDIA CUDA",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: System :: Distributed Computing",
],
)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/colossalqa/mylogging.py | applications/ColossalQA/colossalqa/mylogging.py | """
Class for logging with extra control for debugging
"""
import logging
class ColossalQALogger:
"""This is a distributed event logger class essentially based on :class:`logging`.
Args:
name (str): The name of the logger.
Note:
Logging types: ``info``, ``warning``, ``debug`` and ``error``
"""
__instances = dict()
def __init__(self, name):
if name in ColossalQALogger.__instances:
raise ValueError("Logger with the same name has been created")
else:
self._name = name
self._logger = logging.getLogger(name)
ColossalQALogger.__instances[name] = self
@staticmethod
def get_instance(name: str):
"""Get the unique single logger instance based on name.
Args:
name (str): The name of the logger.
Returns:
DistributedLogger: A DistributedLogger object
"""
if name in ColossalQALogger.__instances:
return ColossalQALogger.__instances[name]
else:
logger = ColossalQALogger(name=name)
return logger
def info(self, message: str, verbose: bool = False) -> None:
"""Log an info message.
Args:
message (str): The message to be logged.
verbose (bool): Whether to print the message to stdout.
"""
if verbose:
logging.basicConfig(level=logging.INFO)
self._logger.info(message)
def warning(self, message: str, verbose: bool = False) -> None:
"""Log a warning message.
Args:
message (str): The message to be logged.
verbose (bool): Whether to print the message to stdout.
"""
if verbose:
self._logger.warning(message)
def debug(self, message: str, verbose: bool = False) -> None:
"""Log a debug message.
Args:
message (str): The message to be logged.
verbose (bool): Whether to print the message to stdout.
"""
if verbose:
self._logger.debug(message)
def error(self, message: str) -> None:
"""Log an error message.
Args:
message (str): The message to be logged.
"""
self._logger.error(message)
def get_logger(name: str = None, level=logging.INFO) -> ColossalQALogger:
"""
Get the logger by name, if name is None, return the default logger
"""
if name:
logger = ColossalQALogger.get_instance(name=name)
else:
logger = ColossalQALogger.get_instance(name="colossalqa")
return logger
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/colossalqa/retrieval_conversation_en.py | applications/ColossalQA/colossalqa/retrieval_conversation_en.py | """
Script for Chinese retrieval based conversation system backed by ChatGLM
"""
from typing import Tuple
from colossalqa.chain.retrieval_qa.base import RetrievalQA
from colossalqa.local.llm import ColossalAPI, ColossalLLM
from colossalqa.memory import ConversationBufferWithSummary
from colossalqa.mylogging import get_logger
from colossalqa.prompt.prompt import PROMPT_DISAMBIGUATE_EN, PROMPT_RETRIEVAL_QA_EN
from colossalqa.retriever import CustomRetriever
from langchain import LLMChain
logger = get_logger()
class EnglishRetrievalConversation:
"""
Wrapper class for Chinese retrieval conversation system
"""
def __init__(self, retriever: CustomRetriever, model_path: str, model_name: str) -> None:
"""
Setup retrieval qa chain for Chinese retrieval based QA
"""
logger.info(f"model_name: {model_name}; model_path: {model_path}", verbose=True)
colossal_api = ColossalAPI.get_api(model_name, model_path)
self.llm = ColossalLLM(n=1, api=colossal_api)
# Define the retriever
self.retriever = retriever
# Define the chain to preprocess the input
# Disambiguate the input. e.g. "What is the capital of that country?" -> "What is the capital of France?"
# Prompt is summarization prompt
self.llm_chain_disambiguate = LLMChain(
llm=self.llm,
prompt=PROMPT_DISAMBIGUATE_EN,
llm_kwargs={"max_new_tokens": 30, "temperature": 0.6, "do_sample": True},
)
self.retriever.set_rephrase_handler(self.disambiguity)
# Define memory with summarization ability
self.memory = ConversationBufferWithSummary(
llm=self.llm, llm_kwargs={"max_new_tokens": 50, "temperature": 0.6, "do_sample": True}
)
self.memory.initiate_document_retrieval_chain(
self.llm,
PROMPT_RETRIEVAL_QA_EN,
self.retriever,
chain_type_kwargs={
"chat_history": "",
},
)
self.retrieval_chain = RetrievalQA.from_chain_type(
llm=self.llm,
verbose=False,
chain_type="stuff",
retriever=self.retriever,
chain_type_kwargs={"prompt": PROMPT_RETRIEVAL_QA_EN, "memory": self.memory},
llm_kwargs={"max_new_tokens": 50, "temperature": 0.75, "do_sample": True},
)
def disambiguity(self, input: str):
out = self.llm_chain_disambiguate.run(input=input, chat_history=self.memory.buffer, stop=["\n"])
return out.split("\n")[0]
@classmethod
def from_retriever(
cls, retriever: CustomRetriever, model_path: str, model_name: str
) -> "EnglishRetrievalConversation":
return cls(retriever, model_path, model_name)
def run(self, user_input: str, memory: ConversationBufferWithSummary) -> Tuple[str, ConversationBufferWithSummary]:
if memory:
# TODO add translation chain here
self.memory.buffered_history.messages = memory.buffered_history.messages
self.memory.summarized_history_temp.messages = memory.summarized_history_temp.messages
return (
self.retrieval_chain.run(
query=user_input,
stop=[self.memory.human_prefix + ": "],
rejection_trigger_keywords=["cannot answer the question"],
rejection_answer="Sorry, this question cannot be answered based on the information provided.",
).split("\n")[0],
self.memory,
)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/colossalqa/memory.py | applications/ColossalQA/colossalqa/memory.py | """
Implement a memory class for storing conversation history
Support long term and short term memory
"""
from typing import Any, Dict, List
from colossalqa.chain.memory.summary import ConversationSummaryMemory
from colossalqa.chain.retrieval_qa.load_chain import load_qa_chain
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.memory.chat_message_histories.in_memory import ChatMessageHistory
from langchain.schema import BaseChatMessageHistory
from langchain.schema.messages import BaseMessage
from langchain.schema.retriever import BaseRetriever
from pydantic import Field
class ConversationBufferWithSummary(ConversationSummaryMemory):
"""Memory class for storing information about entities."""
# Define dictionary to store information about entities.
# Store the most recent conversation history
buffered_history: BaseChatMessageHistory = Field(default_factory=ChatMessageHistory)
# Temp buffer
summarized_history_temp: BaseChatMessageHistory = Field(default_factory=ChatMessageHistory)
human_prefix: str = "Human"
ai_prefix: str = "Assistant"
buffer: str = "" # Formated conversation in str
existing_summary: str = "" # Summarization of stale converstion in str
# Define key to pass information about entities into prompt.
memory_key: str = "chat_history"
input_key: str = "question"
retriever: BaseRetriever = None
max_tokens: int = 2000
chain: BaseCombineDocumentsChain = None
input_chain_type_kwargs: List = {}
@property
def buffer(self) -> Any:
"""String buffer of memory."""
return self.buffer_as_messages if self.return_messages else self.buffer_as_str
@property
def buffer_as_str(self) -> str:
"""Exposes the buffer as a string in case return_messages is True."""
self.buffer = self.format_dialogue()
return self.buffer
@property
def buffer_as_messages(self) -> List[BaseMessage]:
"""Exposes the buffer as a list of messages in case return_messages is False."""
return self.buffered_history.messages
def clear(self):
"""Clear all the memory"""
self.buffered_history.clear()
self.summarized_history_temp.clear()
def initiate_document_retrieval_chain(
self, llm: Any, prompt_template: Any, retriever: Any, chain_type_kwargs: Dict[str, Any] = {}
) -> None:
"""
Since we need to calculate the length of the prompt, we need to initiate a retrieval chain
to calculate the length of the prompt.
Args:
llm: the language model for the retrieval chain (we won't actually return the output)
prompt_template: the prompt template for constructing the retrieval chain
retriever: the retriever for the retrieval chain
max_tokens: the max length of the prompt (not include the output)
chain_type_kwargs: the kwargs for the retrieval chain
memory_key: the key for the chat history
input_key: the key for the input query
"""
self.retriever = retriever
input_chain_type_kwargs = {k: v for k, v in chain_type_kwargs.items() if k not in [self.memory_key]}
self.input_chain_type_kwargs = input_chain_type_kwargs
self.chain = load_qa_chain(llm, chain_type="stuff", prompt=prompt_template, **self.input_chain_type_kwargs)
@property
def memory_variables(self) -> List[str]:
"""Define the variables we are providing to the prompt."""
return [self.memory_key]
def format_dialogue(self, lang: str = "en") -> str:
"""Format memory into two parts--- summarization of historical conversation and most recent conversation"""
if len(self.summarized_history_temp.messages) != 0:
for i in range(int(len(self.summarized_history_temp.messages) / 2)):
self.existing_summary = (
self.predict_new_summary(
self.summarized_history_temp.messages[i * 2 : i * 2 + 2], self.existing_summary, stop=["\n\n"]
)
.strip()
.split("\n")[0]
.strip()
)
for i in range(int(len(self.summarized_history_temp.messages) / 2)):
self.summarized_history_temp.messages.pop(0)
self.summarized_history_temp.messages.pop(0)
conversation_buffer = []
for t in self.buffered_history.messages:
if t.type == "human":
prefix = self.human_prefix
else:
prefix = self.ai_prefix
conversation_buffer.append(prefix + ": " + t.content)
conversation_buffer = "\n".join(conversation_buffer)
if len(self.existing_summary) > 0:
if lang == "en":
message = f"A summarization of historical conversation:\n{self.existing_summary}\nMost recent conversation:\n{conversation_buffer}"
elif lang == "zh":
message = f"历史对话概要:\n{self.existing_summary}\n最近的对话:\n{conversation_buffer}"
else:
raise ValueError("Unsupported language")
return message
else:
message = conversation_buffer
return message
def get_conversation_length(self):
"""Get the length of the formatted conversation"""
prompt = self.format_dialogue()
length = self.llm.get_num_tokens(prompt)
return length
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Load the memory variables.
Summarize oversize conversation to fit into the length constraint defined by max_tokene
Args:
inputs: the kwargs of the chain of your definition
Returns:
a dict that maps from memory key to the formated dialogue
the formated dialogue has the following format
if conversation is too long:
A summarization of historical conversation:
{summarization}
Most recent conversation:
Human: XXX
Assistant: XXX
...
otherwise
Human: XXX
Assistant: XXX
...
"""
# Calculate remain length
if "input_documents" in inputs:
# Run in a retrieval qa chain
docs = inputs["input_documents"]
else:
# For test
docs = self.retriever.get_relevant_documents(inputs[self.input_key])
inputs[self.memory_key] = ""
inputs = {k: v for k, v in inputs.items() if k in [self.chain.input_key, self.input_key, self.memory_key]}
prompt_length = self.chain.prompt_length(docs, **inputs)
remain = self.max_tokens - prompt_length
while self.get_conversation_length() > remain:
if len(self.buffered_history.messages) <= 2:
raise RuntimeError("Exceed max_tokens, trunk size of retrieved documents is too large")
temp = self.buffered_history.messages.pop(0)
self.summarized_history_temp.messages.append(temp)
temp = self.buffered_history.messages.pop(0)
self.summarized_history_temp.messages.append(temp)
return {self.memory_key: self.format_dialogue()}
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
input_str, output_str = self._get_input_output(inputs, outputs)
self.buffered_history.add_user_message(input_str.strip())
self.buffered_history.add_ai_message(output_str.strip())
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/colossalqa/retrieval_conversation_zh.py | applications/ColossalQA/colossalqa/retrieval_conversation_zh.py | """
Script for Chinese retrieval based conversation system backed by ChatGLM
"""
from typing import Tuple
from colossalqa.chain.retrieval_qa.base import RetrievalQA
from colossalqa.local.llm import ColossalAPI, ColossalLLM
from colossalqa.memory import ConversationBufferWithSummary
from colossalqa.mylogging import get_logger
from colossalqa.prompt.prompt import PROMPT_DISAMBIGUATE_ZH, PROMPT_RETRIEVAL_QA_ZH, SUMMARY_PROMPT_ZH
from colossalqa.retriever import CustomRetriever
from langchain import LLMChain
logger = get_logger()
class ChineseRetrievalConversation:
"""
Wrapper class for Chinese retrieval conversation system
"""
def __init__(self, retriever: CustomRetriever, model_path: str, model_name: str) -> None:
"""
Setup retrieval qa chain for Chinese retrieval based QA
"""
# Local coati api
logger.info(f"model_name: {model_name}; model_path: {model_path}", verbose=True)
colossal_api = ColossalAPI.get_api(model_name, model_path)
self.llm = ColossalLLM(n=1, api=colossal_api)
# Define the retriever
self.retriever = retriever
# Define the chain to preprocess the input
# Disambiguate the input. e.g. "What is the capital of that country?" -> "What is the capital of France?"
# Prompt is summarization prompt
self.llm_chain_disambiguate = LLMChain(
llm=self.llm,
prompt=PROMPT_DISAMBIGUATE_ZH,
llm_kwargs={"max_new_tokens": 30, "temperature": 0.6, "do_sample": True},
)
self.retriever.set_rephrase_handler(self.disambiguity)
# Define memory with summarization ability
self.memory = ConversationBufferWithSummary(
llm=self.llm,
prompt=SUMMARY_PROMPT_ZH,
human_prefix="用户",
ai_prefix="Assistant",
max_tokens=2000,
llm_kwargs={"max_new_tokens": 50, "temperature": 0.6, "do_sample": True},
)
self.memory.initiate_document_retrieval_chain(
self.llm,
PROMPT_RETRIEVAL_QA_ZH,
self.retriever,
chain_type_kwargs={
"chat_history": "",
},
)
self.retrieval_chain = RetrievalQA.from_chain_type(
llm=self.llm,
verbose=False,
chain_type="stuff",
retriever=self.retriever,
chain_type_kwargs={"prompt": PROMPT_RETRIEVAL_QA_ZH, "memory": self.memory},
llm_kwargs={"max_new_tokens": 150, "temperature": 0.9, "do_sample": True},
)
def disambiguity(self, input: str):
out = self.llm_chain_disambiguate.run(input=input, chat_history=self.memory.buffer, stop=["\n"])
return out.split("\n")[0]
@classmethod
def from_retriever(
cls, retriever: CustomRetriever, model_path: str, model_name: str
) -> "ChineseRetrievalConversation":
return cls(retriever, model_path, model_name)
def run(self, user_input: str, memory: ConversationBufferWithSummary) -> Tuple[str, ConversationBufferWithSummary]:
if memory:
# TODO add translation chain here
self.memory.buffered_history.messages = memory.buffered_history.messages
self.memory.summarized_history_temp.messages = memory.summarized_history_temp.messages
return (
self.retrieval_chain.run(
query=user_input,
stop=["</答案>"],
doc_prefix="支持文档",
rejection_trigger_keywords=["无法回答该问题"],
rejection_answer="抱歉,根据提供的信息无法回答该问题。",
).split("\n")[0],
self.memory,
)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/colossalqa/utils.py | applications/ColossalQA/colossalqa/utils.py | import re
from typing import Union
from colossalqa.mylogging import get_logger
from sqlalchemy import Engine, MetaData, create_engine
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.ext.declarative import declarative_base
logger = get_logger()
def drop_table(engine: Engine) -> None:
"""
Drop all existing table
"""
Base = declarative_base()
metadata = MetaData()
metadata.reflect(bind=engine)
for key in metadata.tables:
table = metadata.tables[key]
if table is not None:
Base.metadata.drop_all(engine, [table], checkfirst=True)
def create_empty_sql_database(database_uri):
try:
# Create an SQLAlchemy engine to connect to the database
engine = create_engine(database_uri)
# Create the database
engine.connect()
logger.info(f"Database created at {database_uri}")
except SQLAlchemyError as e:
logger.error(f"Error creating database: {str(e)}")
return engine, database_uri
def destroy_sql_database(sql_engine: Union[Engine, str]) -> None:
"""
Destroy an sql database
"""
if isinstance(sql_engine, str):
sql_engine = create_engine(sql_engine)
drop_table(sql_engine)
sql_engine.dispose()
sql_engine = None
def detect_lang_naive(s):
"""
Naive function for language detection, should be replaced by an independent layer
"""
remove_nota = "[’·°–!\"#$%&'()*+,-./:;<=>?@,。?★、…【】()《》?“”‘’![\\]^_`{|}~]+"
s = re.sub(remove_nota, "", s)
s = re.sub("[0-9]", "", s).strip()
res = re.sub("[a-zA-Z]", "", s).strip()
if len(res) <= 0:
return "en"
else:
return "zh"
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/colossalqa/__init__.py | applications/ColossalQA/colossalqa/__init__.py | python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false | |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/colossalqa/retriever.py | applications/ColossalQA/colossalqa/retriever.py | """
Code for custom retriver with incremental update
"""
import copy
import hashlib
import os
from collections import defaultdict
from typing import Any, Callable, Dict, List
from colossalqa.mylogging import get_logger
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.embeddings.base import Embeddings
from langchain.indexes import SQLRecordManager, index
from langchain.schema.retriever import BaseRetriever, Document
from langchain.vectorstores.base import VectorStore
from langchain.vectorstores.chroma import Chroma
logger = get_logger()
class CustomRetriever(BaseRetriever):
"""
Custom retriever class with support for incremental update of indexes
"""
vector_stores: Dict[str, VectorStore] = {}
sql_index_database: Dict[str, str] = {}
record_managers: Dict[str, SQLRecordManager] = {}
sql_db_chains = []
k = 3
rephrase_handler: Callable = None
buffer: Dict = []
buffer_size: int = 5
verbose: bool = False
sql_file_path: str = None
@classmethod
def from_documents(
cls,
documents: List[Document],
embeddings: Embeddings,
**kwargs: Any,
) -> BaseRetriever:
k = kwargs.pop("k", 3)
cleanup = kwargs.pop("cleanup", "incremental")
mode = kwargs.pop("mode", "by_source")
ret = cls(k=k)
ret.add_documents(documents, embedding=embeddings, cleanup=cleanup, mode=mode)
return ret
def add_documents(
self,
docs: Dict[str, Document] = [],
cleanup: str = "incremental",
mode: str = "by_source",
embedding: Embeddings = None,
) -> None:
"""
Add documents to retriever
Args:
docs: the documents to add
cleanup: choose from "incremental" (update embeddings, skip existing embeddings) and "full" (destroy and rebuild retriever)
mode: choose from "by source" (documents are grouped by source) and "merge" (documents are merged into one vector store)
"""
if cleanup == "full":
# Cleanup
for source in self.vector_stores:
os.remove(self.sql_index_database[source])
# Add documents
data_by_source = defaultdict(list)
if mode == "by_source":
for doc in docs:
data_by_source[doc.metadata["source"]].append(doc)
elif mode == "merge":
data_by_source["merged"] = docs
for source in data_by_source:
if source not in self.vector_stores:
hash_encoding = hashlib.sha3_224(source.encode()).hexdigest()
if os.path.exists(f"{self.sql_file_path}/{hash_encoding}.db"):
# Remove the stale file
os.remove(f"{self.sql_file_path}/{hash_encoding}.db")
# Create a new sql database to store indexes, sql files are stored in the same directory as the source file
sql_path = f"sqlite:///{self.sql_file_path}/{hash_encoding}.db"
# to record the sql database with their source as index
self.sql_index_database[source] = f"{self.sql_file_path}/{hash_encoding}.db"
self.vector_stores[source] = Chroma(embedding_function=embedding, collection_name=hash_encoding)
self.record_managers[source] = SQLRecordManager(source, db_url=sql_path)
self.record_managers[source].create_schema()
index(
data_by_source[source],
self.record_managers[source],
self.vector_stores[source],
cleanup=cleanup,
source_id_key="source",
)
def clear_documents(self):
"""Clear all document vectors from database"""
for source in self.vector_stores:
index([], self.record_managers[source], self.vector_stores[source], cleanup="full", source_id_key="source")
self.vector_stores = {}
self.sql_index_database = {}
self.record_managers = {}
def __del__(self):
for source in self.sql_index_database:
if os.path.exists(self.sql_index_database[source]):
os.remove(self.sql_index_database[source])
def set_sql_database_chain(self, db_chains) -> None:
"""
set sql agent chain to retrieve information from sql database
Not used in this version
"""
self.sql_db_chains = db_chains
def set_rephrase_handler(self, handler: Callable = None) -> None:
"""
Set a handler to preprocess the input str before feed into the retriever
"""
self.rephrase_handler = handler
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun = None,
score_threshold: float = None,
return_scores: bool = False,
) -> List[Document]:
"""
This function is called by the retriever to get the relevant documents.
recent vistied queries are stored in buffer, if the query is in buffer, return the documents directly
Args:
query: the query to be searched
run_manager: the callback manager for retriever run
Returns:
documents: the relevant documents
"""
for buffered_doc in self.buffer:
if buffered_doc[0] == query:
return buffered_doc[1]
query_ = str(query)
# Use your existing retriever to get the documents
if self.rephrase_handler:
query = self.rephrase_handler(query)
documents = []
for k in self.vector_stores:
# Retrieve documents from each retriever
vectorstore = self.vector_stores[k]
documents.extend(vectorstore.similarity_search_with_score(query, self.k, score_threshold=score_threshold))
# print(documents)
# Return the top k documents among all retrievers
documents = sorted(documents, key=lambda x: x[1], reverse=False)[: self.k]
if return_scores:
# Return score
documents = copy.deepcopy(documents)
for doc in documents:
doc[0].metadata["score"] = doc[1]
documents = [doc[0] for doc in documents]
# Retrieve documents from sql database (not applicable for the local chains)
for sql_chain in self.sql_db_chains:
documents.append(
Document(
page_content=f"Query: {query} Answer: {sql_chain.run(query)}", metadata={"source": "sql_query"}
)
)
if len(self.buffer) < self.buffer_size:
self.buffer.append([query_, documents])
else:
self.buffer.pop(0)
self.buffer.append([query_, documents])
logger.info(f"retrieved documents:\n{str(documents)}", verbose=self.verbose)
return documents
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/colossalqa/retrieval_conversation_universal.py | applications/ColossalQA/colossalqa/retrieval_conversation_universal.py | """
Multilingual retrieval based conversation system
"""
from typing import List
from colossalqa.data_loader.document_loader import DocumentLoader
from colossalqa.mylogging import get_logger
from colossalqa.retrieval_conversation_en import EnglishRetrievalConversation
from colossalqa.retrieval_conversation_zh import ChineseRetrievalConversation
from colossalqa.retriever import CustomRetriever
from colossalqa.text_splitter import ChineseTextSplitter
from colossalqa.utils import detect_lang_naive
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter
logger = get_logger()
class UniversalRetrievalConversation:
"""
Wrapper class for bilingual retrieval conversation system
"""
def __init__(
self,
embedding_model_path: str = "moka-ai/m3e-base",
embedding_model_device: str = "cpu",
zh_model_path: str = None,
zh_model_name: str = None,
en_model_path: str = None,
en_model_name: str = None,
sql_file_path: str = None,
files_zh: List[List[str]] = None,
files_en: List[List[str]] = None,
text_splitter_chunk_size=100,
text_splitter_chunk_overlap=10,
) -> None:
"""
Wrapper for multilingual retrieval qa class (Chinese + English)
Args:
embedding_model_path: local or huggingface embedding model
embedding_model_device:
files_zh: [[file_path, name_of_file, separator],...] defines the files used as supporting documents for Chinese retrieval QA
files_en: [[file_path, name_of_file, separator],...] defines the files used as supporting documents for English retrieval QA
"""
self.embedding = HuggingFaceEmbeddings(
model_name=embedding_model_path,
model_kwargs={"device": embedding_model_device},
encode_kwargs={"normalize_embeddings": False},
)
print("Select files for constructing Chinese retriever")
docs_zh = self.load_supporting_docs(
files=files_zh,
text_splitter=ChineseTextSplitter(
chunk_size=text_splitter_chunk_size, chunk_overlap=text_splitter_chunk_overlap
),
)
# Create retriever
self.information_retriever_zh = CustomRetriever(
k=3, sql_file_path=sql_file_path.replace(".db", "_zh.db"), verbose=True
)
self.information_retriever_zh.add_documents(
docs=docs_zh, cleanup="incremental", mode="by_source", embedding=self.embedding
)
print("Select files for constructing English retriever")
docs_en = self.load_supporting_docs(
files=files_en,
text_splitter=RecursiveCharacterTextSplitter(
chunk_size=text_splitter_chunk_size, chunk_overlap=text_splitter_chunk_overlap
),
)
# Create retriever
self.information_retriever_en = CustomRetriever(
k=3, sql_file_path=sql_file_path.replace(".db", "_en.db"), verbose=True
)
self.information_retriever_en.add_documents(
docs=docs_en, cleanup="incremental", mode="by_source", embedding=self.embedding
)
self.chinese_retrieval_conversation = ChineseRetrievalConversation.from_retriever(
self.information_retriever_zh, model_path=zh_model_path, model_name=zh_model_name
)
self.english_retrieval_conversation = EnglishRetrievalConversation.from_retriever(
self.information_retriever_en, model_path=en_model_path, model_name=en_model_name
)
self.memory = None
def load_supporting_docs(self, files: List[List[str]] = None, text_splitter: TextSplitter = None):
"""
Load supporting documents, currently, all documents will be stored in one vector store
"""
documents = []
if files:
for file in files:
retriever_data = DocumentLoader([[file["data_path"], file["name"]]]).all_data
splits = text_splitter.split_documents(retriever_data)
documents.extend(splits)
else:
while True:
file = input("Select a file to load or press Enter to exit:")
if file == "":
break
data_name = input("Enter a short description of the data:")
separator = input(
"Enter a separator to force separating text into chunks, if no separator is given, the default separator is '\\n\\n', press ENTER directly to skip:"
)
separator = separator if separator != "" else "\n\n"
retriever_data = DocumentLoader([[file, data_name.replace(" ", "_")]]).all_data
# Split
splits = text_splitter.split_documents(retriever_data)
documents.extend(splits)
return documents
def start_test_session(self):
"""
Simple multilingual session for testing purpose, with naive language selection mechanism
"""
while True:
user_input = input("User: ")
lang = detect_lang_naive(user_input)
if "END" == user_input:
print("Agent: Happy to chat with you :)")
break
agent_response = self.run(user_input, which_language=lang)
print(f"Agent: {agent_response}")
def run(self, user_input: str, which_language=str):
"""
Generate the response given the user input and a str indicates the language requirement of the output string
"""
assert which_language in ["zh", "en"]
if which_language == "zh":
agent_response, self.memory = self.chinese_retrieval_conversation.run(user_input, self.memory)
else:
agent_response, self.memory = self.english_retrieval_conversation.run(user_input, self.memory)
return agent_response.split("\n")[0]
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/colossalqa/chain/__init__.py | applications/ColossalQA/colossalqa/chain/__init__.py | python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false | |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/colossalqa/chain/memory/summary.py | applications/ColossalQA/colossalqa/chain/memory/summary.py | """
Custom SummarizerMixin base class and ConversationSummaryMemory class
Modified from Original Source
This code is based on LangChain Ai's langchain, which can be found at
https://github.com/langchain-ai/langchain
The original code is licensed under the MIT license.
"""
from __future__ import annotations
from typing import Any, Dict, List, Type
from langchain.chains.llm import LLMChain
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.prompt import SUMMARY_PROMPT
from langchain.pydantic_v1 import BaseModel, root_validator
from langchain.schema import BaseChatMessageHistory, BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.messages import BaseMessage, SystemMessage, get_buffer_string
class SummarizerMixin(BaseModel):
"""
Mixin for summarizer.
"""
human_prefix: str = "Human"
ai_prefix: str = "Assistant"
llm: BaseLanguageModel
prompt: BasePromptTemplate = SUMMARY_PROMPT
summary_message_cls: Type[BaseMessage] = SystemMessage
llm_kwargs: Dict = {}
def predict_new_summary(self, messages: List[BaseMessage], existing_summary: str, stop: List = []) -> str:
"""
Recursively summarize a conversation by generating a new summary using
the last round of conversation and the existing summary.
"""
new_lines = get_buffer_string(
messages,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
chain = LLMChain(llm=self.llm, prompt=self.prompt, llm_kwargs=self.llm_kwargs)
return chain.predict(summary=existing_summary, new_lines=new_lines, stop=stop)
class ConversationSummaryMemory(BaseChatMemory, SummarizerMixin):
"""Conversation summarizer to chat memory."""
buffer: str = ""
memory_key: str = "history"
@classmethod
def from_messages(
cls,
llm: BaseLanguageModel,
chat_memory: BaseChatMessageHistory,
summarize_step: int = 2,
**kwargs: Any,
) -> ConversationSummaryMemory:
obj = cls(llm=llm, chat_memory=chat_memory, **kwargs)
for i in range(0, len(obj.chat_memory.messages), summarize_step):
obj.buffer = obj.predict_new_summary(obj.chat_memory.messages[i : i + summarize_step], obj.buffer)
return obj
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables."""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
if self.return_messages:
buffer: Any = [self.summary_message_cls(content=self.buffer)]
else:
buffer = self.buffer
return {self.memory_key: buffer}
@root_validator()
def validate_prompt_input_variables(cls, values: Dict) -> Dict:
"""Validate that prompt input variables are consistent."""
prompt_variables = values["prompt"].input_variables
expected_keys = {"summary", "new_lines"}
if expected_keys != set(prompt_variables):
raise ValueError(
"Got unexpected prompt input variables. The prompt expects "
f"{prompt_variables}, but it should have {expected_keys}."
)
return values
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
super().save_context(inputs, outputs)
self.buffer = self.predict_new_summary(self.chat_memory.messages[-2:], self.buffer)
def clear(self) -> None:
"""Clear memory contents."""
super().clear()
self.buffer = ""
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/colossalqa/chain/memory/__init__.py | applications/ColossalQA/colossalqa/chain/memory/__init__.py | python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false | |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/colossalqa/chain/retrieval_qa/stuff.py | applications/ColossalQA/colossalqa/chain/retrieval_qa/stuff.py | """
Chain that combines documents by stuffing into context
Modified from Original Source
This code is based on LangChain Ai's langchain, which can be found at
https://github.com/langchain-ai/langchain
The original code is licensed under the MIT license.
"""
import copy
from typing import Any, List
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.docstore.document import Document
from langchain.schema import format_document
class CustomStuffDocumentsChain(StuffDocumentsChain):
"""Chain that combines documents by stuffing into context.
This chain takes a list of documents and first combines them into a single string.
It does this by formatting each document into a string with the `document_prompt`
and then joining them together with `document_separator`. It then adds that new
string to the inputs with the variable name set by `document_variable_name`.
Those inputs are then passed to the `llm_chain`.
Example:
.. code-block:: python
from langchain.chains import StuffDocumentsChain, LLMChain
from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI
# This controls how each document will be formatted. Specifically,
# it will be passed to `format_document` - see that function for more
# details.
document_prompt = PromptTemplate(
input_variables=["page_content"],
template="{page_content}"
)
document_variable_name = "context"
llm = OpenAI()
# The prompt here should take as an input variable the
# `document_variable_name`
prompt = PromptTemplate.from_template(
"Summarize this content: {context}"
)
llm_chain = LLMChain(llm=llm, prompt=prompt)
chain = StuffDocumentsChain(
llm_chain=llm_chain,
document_prompt=document_prompt,
document_variable_name=document_variable_name
)
"""
def _get_inputs(self, docs: List[Document], **kwargs: Any) -> dict:
"""Construct inputs from kwargs and docs.
Format and the join all the documents together into one input with name
`self.document_variable_name`. The pluck any additional variables
from **kwargs.
Args:
docs: List of documents to format and then join into single input
**kwargs: additional inputs to chain, will pluck any other required
arguments from here.
Returns:
dictionary of inputs to LLMChain
"""
# Format each document according to the prompt
# if the document is in the key-value format has a 'is_key_value_mapping'=True in meta_data and has 'value' in metadata
# use the value to replace the key
doc_prefix = kwargs.get("doc_prefix", "Supporting Document")
docs_ = []
for id, doc in enumerate(docs):
doc_ = copy.deepcopy(doc)
if doc_.metadata.get("is_key_value_mapping", False) and "value" in doc_.metadata:
doc_.page_content = str(doc_.metadata["value"])
prefix = doc_prefix + str(id)
doc_.page_content = str(prefix + ":" + (" " if doc_.page_content[0] != " " else "") + doc_.page_content)
docs_.append(doc_)
doc_strings = [format_document(doc, self.document_prompt) for doc in docs_]
arg_list = ["stop", "temperature", "top_k", "top_p", "max_new_tokens"]
arg_list.extend(self.llm_chain.prompt.input_variables)
# Join the documents together to put them in the prompt.
inputs = {k: v for k, v in kwargs.items() if k in arg_list}
inputs[self.document_variable_name] = self.document_separator.join(doc_strings)
return inputs
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/colossalqa/chain/retrieval_qa/__init__.py | applications/ColossalQA/colossalqa/chain/retrieval_qa/__init__.py | python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false | |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/colossalqa/chain/retrieval_qa/base.py | applications/ColossalQA/colossalqa/chain/retrieval_qa/base.py | """
Chain for question-answering against a vector database.
Modified from Original Source
This code is based on LangChain Ai's langchain, which can be found at
https://github.com/langchain-ai/langchain
The original code is licensed under the MIT license.
"""
from __future__ import annotations
import copy
import inspect
from typing import Any, Dict, List, Optional
from colossalqa.chain.retrieval_qa.load_chain import load_qa_chain
from colossalqa.chain.retrieval_qa.stuff import CustomStuffDocumentsChain
from langchain.callbacks.manager import AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, Callbacks
from langchain.chains.llm import LLMChain
from langchain.chains.question_answering.stuff_prompt import PROMPT_SELECTOR
from langchain.chains.retrieval_qa.base import BaseRetrievalQA
from langchain.prompts import PromptTemplate
from langchain.pydantic_v1 import Field
from langchain.schema import BaseRetriever, Document
from langchain.schema.language_model import BaseLanguageModel
class CustomBaseRetrievalQA(BaseRetrievalQA):
"""Base class for question-answering chains."""
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[PromptTemplate] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> BaseRetrievalQA:
"""Initialize from LLM."""
llm_kwargs = kwargs.pop("llm_kwargs", {})
_prompt = prompt or PROMPT_SELECTOR.get_prompt(llm)
llm_chain = LLMChain(llm=llm, prompt=_prompt, callbacks=callbacks, llm_kwargs=llm_kwargs)
document_prompt = kwargs.get(
"document_prompt", PromptTemplate(input_variables=["page_content"], template="Context:\n{page_content}")
)
combine_documents_chain = CustomStuffDocumentsChain(
llm_chain=llm_chain,
document_variable_name="context",
document_prompt=document_prompt,
callbacks=callbacks,
)
return cls(
combine_documents_chain=combine_documents_chain,
callbacks=callbacks,
**kwargs,
)
@classmethod
def from_chain_type(
cls,
llm: BaseLanguageModel,
chain_type: str = "stuff",
chain_type_kwargs: Optional[dict] = None,
**kwargs: Any,
) -> BaseRetrievalQA:
"""Load chain from chain type."""
llm_kwargs = kwargs.pop("llm_kwargs", {})
_chain_type_kwargs = chain_type_kwargs or {}
combine_documents_chain = load_qa_chain(llm, chain_type=chain_type, **_chain_type_kwargs, llm_kwargs=llm_kwargs)
return cls(combine_documents_chain=combine_documents_chain, **kwargs)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Run get_relevant_text and llm on input query.
If chain has 'return_source_documents' as 'True', returns
the retrieved documents as well under the key 'source_documents'.
Example:
.. code-block:: python
res = indexqa({'query': 'This is my query'})
answer, docs = res['result'], res['source_documents']
"""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
question = inputs[self.input_key]
accepts_run_manager = "run_manager" in inspect.signature(self._get_docs).parameters
if accepts_run_manager:
docs = self._get_docs(question, run_manager=_run_manager)
else:
docs = self._get_docs(question) # type: ignore[call-arg]
kwargs = {
k: v
for k, v in inputs.items()
if k in ["stop", "temperature", "top_k", "top_p", "max_new_tokens", "doc_prefix"]
}
if self.combine_documents_chain.memory is not None:
buffered_history_backup, summarized_history_temp_backup = copy.deepcopy(
self.combine_documents_chain.memory.buffered_history
), copy.deepcopy(self.combine_documents_chain.memory.summarized_history_temp)
else:
buffered_history_backup = None
summarized_history_temp_backup = None
answer = self.combine_documents_chain.run(
input_documents=docs, question=question, callbacks=_run_manager.get_child(), **kwargs
)
if summarized_history_temp_backup is not None and buffered_history_backup is not None:
(
self.combine_documents_chain.memory.buffered_history,
self.combine_documents_chain.memory.summarized_history_temp,
) = copy.deepcopy(buffered_history_backup), copy.deepcopy(summarized_history_temp_backup)
# if rejection_trigger_keywords is not given, return the response from LLM directly
rejection_trigger_keywords = inputs.get("rejection_trigger_keywords", [])
answer = answer if all([rej not in answer for rej in rejection_trigger_keywords]) else None
if answer is None:
answer = inputs.get("rejection_answer", "抱歉,根据提供的信息无法回答该问题。")
if self.combine_documents_chain.memory is not None:
self.combine_documents_chain.memory.save_context({"question": question}, {"output": answer})
if self.return_source_documents:
return {self.output_key: answer, "source_documents": docs}
else:
return {self.output_key: answer}
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Run get_relevant_text and llm on input query.
If chain has 'return_source_documents' as 'True', returns
the retrieved documents as well under the key 'source_documents'.
Example:
.. code-block:: python
res = indexqa({'query': 'This is my query'})
answer, docs = res['result'], res['source_documents']
"""
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
question = inputs[self.input_key]
accepts_run_manager = "run_manager" in inspect.signature(self._aget_docs).parameters
if accepts_run_manager:
docs = await self._aget_docs(question, run_manager=_run_manager)
else:
docs = await self._aget_docs(question) # type: ignore[call-arg]
kwargs = {
k: v
for k, v in inputs.items()
if k in ["stop", "temperature", "top_k", "top_p", "max_new_tokens", "doc_prefix"]
}
answer = await self.combine_documents_chain.arun(
input_documents=docs, question=question, callbacks=_run_manager.get_child(), **kwargs
)
# if rejection_trigger_keywords is not given, return the response from LLM directly
rejection_trigger_keywords = inputs.get("rejection_trigger_keywords", [])
answer = (
answer
if all([rej not in answer for rej in rejection_trigger_keywords]) or len(rejection_trigger_keywords) == 0
else None
)
if answer is None:
answer = inputs.get("rejection_answer", "抱歉,根据提供的信息无法回答该问题。")
self.combine_documents_chain.memory.save_context({"question": question}, {"output": answer})
if self.return_source_documents:
return {self.output_key: answer, "source_documents": docs}
else:
return {self.output_key: answer}
class RetrievalQA(CustomBaseRetrievalQA):
"""Chain for question-answering against an index.
Example:
.. code-block:: python
from langchain.llms import OpenAI
from langchain.chains import RetrievalQA
from langchain.faiss import FAISS
from langchain.vectorstores.base import VectorStoreRetriever
retriever = VectorStoreRetriever(vectorstore=FAISS(...))
retrievalQA = RetrievalQA.from_llm(llm=OpenAI(), retriever=retriever)
"""
retriever: BaseRetriever = Field(exclude=True)
def _get_docs(
self,
question: str,
*,
run_manager: CallbackManagerForChainRun,
) -> List[Document]:
"""Get docs."""
return self.retriever.get_relevant_documents(question, callbacks=run_manager.get_child())
async def _aget_docs(
self,
question: str,
*,
run_manager: AsyncCallbackManagerForChainRun,
) -> List[Document]:
"""Get docs."""
return await self.retriever.aget_relevant_documents(question, callbacks=run_manager.get_child())
@property
def _chain_type(self) -> str:
"""Return the chain type."""
return "retrieval_qa"
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/colossalqa/chain/retrieval_qa/load_chain.py | applications/ColossalQA/colossalqa/chain/retrieval_qa/load_chain.py | """
Load question answering chains.
For now, only the stuffed chain is modified
Modified from Original Source
This code is based on LangChain Ai's langchain, which can be found at
https://github.com/langchain-ai/langchain
The original code is licensed under the MIT license.
"""
import copy
from typing import Any, Mapping, Optional, Protocol
from colossalqa.chain.retrieval_qa.stuff import CustomStuffDocumentsChain
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.manager import Callbacks
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.llm import LLMChain
from langchain.chains.question_answering import stuff_prompt
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.prompt_template import BasePromptTemplate
class LoadingCallable(Protocol):
"""Interface for loading the combine documents chain."""
def __call__(self, llm: BaseLanguageModel, **kwargs: Any) -> BaseCombineDocumentsChain:
"""Callable to load the combine documents chain."""
def _load_stuff_chain(
llm: BaseLanguageModel,
prompt: Optional[BasePromptTemplate] = None,
document_variable_name: str = "context",
verbose: Optional[bool] = None,
callback_manager: Optional[BaseCallbackManager] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> CustomStuffDocumentsChain:
_prompt = prompt or stuff_prompt.PROMPT_SELECTOR.get_prompt(llm)
if "llm_kwargs" in kwargs:
llm_kwargs = copy.deepcopy(kwargs["llm_kwargs"])
del kwargs["llm_kwargs"]
else:
llm_kwargs = {}
llm_chain = LLMChain(
llm=llm,
prompt=_prompt,
verbose=verbose,
callback_manager=callback_manager,
callbacks=callbacks,
llm_kwargs=llm_kwargs,
)
return CustomStuffDocumentsChain(
llm_chain=llm_chain,
document_variable_name=document_variable_name,
verbose=verbose,
callback_manager=callback_manager,
callbacks=callbacks,
**kwargs,
)
def load_qa_chain(
llm: BaseLanguageModel,
chain_type: str = "stuff",
verbose: Optional[bool] = None,
callback_manager: Optional[BaseCallbackManager] = None,
**kwargs: Any,
) -> BaseCombineDocumentsChain:
"""Load question answering chain.
Args:
llm: Language Model to use in the chain.
chain_type: Type of document combining chain to use. Should be one of "stuff",
"map_reduce", "map_rerank", and "refine".
verbose: Whether chains should be run in verbose mode or not. Note that this
applies to all chains that make up the final chain.
callback_manager: Callback manager to use for the chain.
Returns:
A chain to use for question answering.
"""
loader_mapping: Mapping[str, LoadingCallable] = {"stuff": _load_stuff_chain}
if chain_type not in loader_mapping:
raise ValueError(f"Got unsupported chain type: {chain_type}. " f"Should be one of {loader_mapping.keys()}")
return loader_mapping[chain_type](llm, verbose=verbose, callback_manager=callback_manager, **kwargs)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/colossalqa/prompt/prompt.py | applications/ColossalQA/colossalqa/prompt/prompt.py | """
All custom prompt templates are defined here.
"""
from langchain.prompts.prompt import PromptTemplate
# Below are Chinese retrieval qa prompts
_CUSTOM_SUMMARIZER_TEMPLATE_ZH = """请递进式地总结所提供的当前对话,将当前对话的摘要内容添加到先前已有的摘要上,返回一个融合了当前对话的新的摘要。
例1:
已有的摘要:
人类问Assistant对人工智能的看法。人工智能认为人工智能是一种善的力量。
新的对话内容:
人类: 为什么你认为人工智能是一种好的力量?
Assistant: 因为人工智能将帮助人类充分发挥潜力。
新的摘要:
人类问Assistant对人工智能的看法。人工智能认为人工智能是一种积极的力量,因为它将帮助人类充分发挥潜力。
示例结束
已有的摘要:
{summary}
新的对话内容:
{new_lines}
新的摘要:"""
_ZH_RETRIEVAL_QA_PROMPT = """<指令>根据下列支持文档和对话历史,简洁和专业地来回答问题。如果无法从支持文档中得到答案,请说 “根据已知信息无法回答该问题”。回答中请不要涉及支持文档中没有提及的信息,答案请使用中文。 </指令>
{context}
<对话历史>
{chat_history}
</对话历史>
<问题>{question}</问题>
答案:"""
ZH_RETRIEVAL_QA_TRIGGER_KEYWORDS = ["无法回答该问题"]
ZH_RETRIEVAL_QA_REJECTION_ANSWER = "抱歉,根据提供的信息无法回答该问题。"
_ZH_RETRIEVAL_CLASSIFICATION_USE_CASE = """使用提供的参考案例判断客户遇到的故障所属的故障原因分类。
背景信息:
{context}
客服记录:
{question}
故障原因分类:"""
_ZH_DISAMBIGUATION_PROMPT = """你是一个乐于助人、恭敬而诚实的助手。你总是按照指示去做。
请用聊天记录中提到的具体名称或实体名称替换给定句子中的任何模糊或有歧义的指代,如果没有提供聊天记录或句子中不包含模糊或有歧义的指代,则只输出原始句子。您的输出应该是消除歧义的句子本身(与“消除歧义的句子:”在同一行中),并且不包含任何其他内容。
下面是一个例子:
聊天记录:
用户: 我有一个朋友,张三。你认识他吗?
Assistant: 我认识一个叫张三的人
句子: 他最喜欢的食物是什么?
消除歧义的句子: 张三最喜欢的食物是什么?
聊天记录:
{chat_history}
句子: {input}
消除歧义的句子:"""
# Below are English retrieval qa prompts
_EN_RETRIEVAL_QA_PROMPT = """[INST] <<SYS>>Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist content.
If the answer cannot be inferred based on the given context, please say "I cannot answer the question based on the information given.".<</SYS>>
Use the context and chat history to answer the question.
context:
{context}
chat history
{chat_history}
question: {question}
answer:"""
EN_RETRIEVAL_QA_TRIGGER_KEYWORDS = ["cannot answer the question"]
EN_RETRIEVAL_QA_REJECTION_ANSWER = "Sorry, this question cannot be answered based on the information provided."
_EN_DISAMBIGUATION_PROMPT = """[INST] <<SYS>>You are a helpful, respectful and honest assistant. You always follow the instruction.<</SYS>>
Please replace any ambiguous references in the given sentence with the specific names or entities mentioned in the chat history or just output the original sentence if no chat history is provided or if the sentence doesn't contain ambiguous references. Your output should be the disambiguated sentence itself (in the same line as "disambiguated sentence:") and contain nothing else.
Here is an example:
Chat history:
Human: I have a friend, Mike. Do you know him?
Assistant: Yes, I know a person named Mike
sentence: What's his favorite food?
disambiguated sentence: What's Mike's favorite food?
[/INST]
Chat history:
{chat_history}
sentence: {input}
disambiguated sentence:"""
# Prompt templates
# English retrieval prompt, the model generates answer based on this prompt
PROMPT_RETRIEVAL_QA_EN = PromptTemplate(
template=_EN_RETRIEVAL_QA_PROMPT, input_variables=["question", "chat_history", "context"]
)
# English disambigate prompt, which replace any ambiguous references in the user's input with the specific names or entities mentioned in the chat history
PROMPT_DISAMBIGUATE_EN = PromptTemplate(template=_EN_DISAMBIGUATION_PROMPT, input_variables=["chat_history", "input"])
# Chinese summary prompt, which summarize the chat history
SUMMARY_PROMPT_ZH = PromptTemplate(input_variables=["summary", "new_lines"], template=_CUSTOM_SUMMARIZER_TEMPLATE_ZH)
# Chinese disambigate prompt, which replace any ambiguous references in the user's input with the specific names or entities mentioned in the chat history
PROMPT_DISAMBIGUATE_ZH = PromptTemplate(template=_ZH_DISAMBIGUATION_PROMPT, input_variables=["chat_history", "input"])
# Chinese retrieval prompt, the model generates answer based on this prompt
PROMPT_RETRIEVAL_QA_ZH = PromptTemplate(
template=_ZH_RETRIEVAL_QA_PROMPT, input_variables=["question", "chat_history", "context"]
)
# Chinese retrieval prompt for a use case to analyze fault causes
PROMPT_RETRIEVAL_CLASSIFICATION_USE_CASE_ZH = PromptTemplate(
template=_ZH_RETRIEVAL_CLASSIFICATION_USE_CASE, input_variables=["question", "context"]
)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/colossalqa/text_splitter/utils.py | applications/ColossalQA/colossalqa/text_splitter/utils.py | import re
def remove_format(text: str) -> str:
# if the accout of \t, \r, \v, \f is less than 3, replace \t, \r, \v, \f with space
if len(re.findall(r"\s", text.replace(" ", ""))) > 3:
# in case this is a line of a table
return text
return re.sub(r"\s", " ", text)
# remove newlines
def get_cleaned_paragraph(s: str) -> str:
text = str(s)
text = re.sub(r"\n{3,}", r"\n", text) # replace \n\n\n... with \n
text = re.sub("\n\n", "", text)
lines = text.split("\n")
lines_remove_format = [remove_format(line) for line in lines]
return lines_remove_format
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/colossalqa/text_splitter/chinese_text_splitter.py | applications/ColossalQA/colossalqa/text_splitter/chinese_text_splitter.py | """
Code for Chinese text splitter
"""
from typing import Any, List, Optional
from colossalqa.text_splitter.utils import get_cleaned_paragraph
from langchain.text_splitter import RecursiveCharacterTextSplitter
class ChineseTextSplitter(RecursiveCharacterTextSplitter):
def __init__(self, separators: Optional[List[str]] = None, is_separator_regrx: bool = False, **kwargs: Any):
self._separators = separators or ["\n\n", "\n", ",", "。", "!", "?", "?"]
if "chunk_size" not in kwargs:
kwargs["chunk_size"] = 50
if "chunk_overlap" not in kwargs:
kwargs["chunk_overlap"] = 10
super().__init__(separators=separators, keep_separator=True, **kwargs)
self._is_separator_regex = is_separator_regrx
def split_text(self, text: str) -> List[str]:
"""Return the list of separated text chunks"""
cleaned_paragraph = get_cleaned_paragraph(text)
splitted = []
for paragraph in cleaned_paragraph:
segs = super().split_text(paragraph)
for i in range(len(segs) - 1):
if segs[i][-1] not in self._separators:
pos = text.find(segs[i])
pos_end = pos + len(segs[i])
if i > 0:
last_sentence_start = max([text.rfind(m, 0, pos) for m in ["。", "!", "?"]])
pos = last_sentence_start + 1
segs[i] = str(text[pos:pos_end])
if i != len(segs) - 1:
next_sentence_end = max([text.find(m, pos_end) for m in ["。", "!", "?"]])
segs[i] = str(text[pos : next_sentence_end + 1])
splitted.append(segs[i])
if len(splitted) <= 1:
return splitted
splitted_text = []
i = 1
if splitted[0] not in splitted[1]:
splitted_text.append([splitted[0], 0])
if splitted[-1] not in splitted[-2]:
splitted_text.append([splitted[-1], len(splitted) - 1])
while i < len(splitted) - 1:
if splitted[i] not in splitted[i + 1] and splitted[i] not in splitted[i - 1]:
splitted_text.append([splitted[i], i])
i += 1
splitted_text = sorted(splitted_text, key=lambda x: x[1])
splitted_text = [splitted_text[i][0] for i in range(len(splitted_text))]
ret = []
for s in splitted_text:
if s not in ret:
ret.append(s)
return ret
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/colossalqa/text_splitter/__init__.py | applications/ColossalQA/colossalqa/text_splitter/__init__.py | from .chinese_text_splitter import ChineseTextSplitter
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/colossalqa/local/llm.py | applications/ColossalQA/colossalqa/local/llm.py | """
API and LLM warpper class for running LLMs locally
Usage:
import os
model_path = os.environ.get("ZH_MODEL_PATH")
model_name = "chatglm2"
colossal_api = ColossalAPI(model_name, model_path)
llm = ColossalLLM(n=1, api=colossal_api)
TEST_PROMPT_CHATGLM="续写文章:惊蛰一过,春寒加剧。先是料料峭峭,继而雨季开始,"
logger.info(llm(TEST_PROMPT_CHATGLM, max_new_tokens=100), verbose=True)
"""
from typing import Any, List, Mapping, Optional
import torch
from colossalqa.local.utils import get_response, post_http_request
from colossalqa.mylogging import get_logger
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from transformers import AutoModelForCausalLM, AutoTokenizer
logger = get_logger()
class ColossalAPI:
"""
API for calling LLM.generate
"""
__instances = dict()
def __init__(self, model_type: str, model_path: str, ckpt_path: str = None) -> None:
"""
Configure model
"""
if model_type + model_path + (ckpt_path or "") in ColossalAPI.__instances:
return
else:
ColossalAPI.__instances[model_type + model_path + (ckpt_path or "")] = self
self.model_type = model_type
self.model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.float16, trust_remote_code=True)
if ckpt_path is not None:
state_dict = torch.load(ckpt_path)
self.model.load_state_dict(state_dict)
self.model.to(torch.cuda.current_device())
# Configure tokenizer
self.tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
self.model.eval()
@staticmethod
def get_api(model_type: str, model_path: str, ckpt_path: str = None):
if model_type + model_path + (ckpt_path or "") in ColossalAPI.__instances:
return ColossalAPI.__instances[model_type + model_path + (ckpt_path or "")]
else:
return ColossalAPI(model_type, model_path, ckpt_path)
def generate(self, input: str, **kwargs) -> str:
"""
Generate response given the prompt
Args:
input: input string
**kwargs: language model keyword type arguments, such as top_k, top_p, temperature, max_new_tokens...
Returns:
output: output string
"""
if self.model_type in ["chatglm", "chatglm2"]:
inputs = {
k: v.to(torch.cuda.current_device()) for k, v in self.tokenizer(input, return_tensors="pt").items()
}
else:
inputs = {
"input_ids": self.tokenizer(input, return_tensors="pt")["input_ids"].to(torch.cuda.current_device())
}
output = self.model.generate(**inputs, **kwargs)
output = output.cpu()
prompt_len = inputs["input_ids"].size(1)
response = output[0, prompt_len:]
output = self.tokenizer.decode(response, skip_special_tokens=True)
return output
class VllmAPI:
def __init__(self, host: str = "localhost", port: int = 8077) -> None:
# Configure api for model served through web
self.host = host
self.port = port
self.url = f"http://{self.host}:{self.port}/generate"
def generate(self, input: str, **kwargs):
output = get_response(post_http_request(input, self.url, **kwargs))[0]
return output[len(input) :]
class ColossalLLM(LLM):
"""
Langchain LLM wrapper for a local LLM
"""
n: int
api: Any
kwargs = {"max_new_tokens": 100}
@property
def _llm_type(self) -> str:
return "custom"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
logger.info(f"kwargs:{kwargs}\nstop:{stop}\nprompt:{prompt}", verbose=self.verbose)
for k in self.kwargs:
if k not in kwargs:
kwargs[k] = self.kwargs[k]
generate_args = {k: kwargs[k] for k in kwargs if k not in ["stop", "n"]}
out = self.api.generate(prompt, **generate_args)
if isinstance(stop, list) and len(stop) != 0:
for stopping_words in stop:
if stopping_words in out:
out = out.split(stopping_words)[0]
logger.info(f"{prompt}{out}", verbose=self.verbose)
return out
@property
def _identifying_params(self) -> Mapping[str, int]:
"""Get the identifying parameters."""
return {"n": self.n}
def get_token_ids(self, text: str) -> List[int]:
"""Return the ordered ids of the tokens in a text.
Args:
text: The string input to tokenize.
Returns:
A list of ids corresponding to the tokens in the text, in order they occur
in the text.
"""
# use the colossal llm's tokenizer instead of langchain's cached GPT2 tokenizer
return self.api.tokenizer.encode(text)
class VllmLLM(LLM):
"""
Langchain LLM wrapper for a local LLM
"""
n: int
api: Any
kwargs = {"max_new_tokens": 100}
@property
def _llm_type(self) -> str:
return "custom"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
for k in self.kwargs:
if k not in kwargs:
kwargs[k] = self.kwargs[k]
logger.info(f"kwargs:{kwargs}\nstop:{stop}\nprompt:{prompt}", verbose=self.verbose)
generate_args = {k: kwargs[k] for k in kwargs if k in ["n", "max_tokens", "temperature", "stream"]}
out = self.api.generate(prompt, **generate_args)
if len(stop) != 0:
for stopping_words in stop:
if stopping_words in out:
out = out.split(stopping_words)[0]
logger.info(f"{prompt}{out}", verbose=self.verbose)
return out
def set_host_port(self, host: str = "localhost", port: int = 8077, **kwargs) -> None:
if "max_tokens" not in kwargs:
kwargs["max_tokens"] = 100
self.kwargs = kwargs
self.api = VllmAPI(host=host, port=port)
@property
def _identifying_params(self) -> Mapping[str, int]:
"""Get the identifying parameters."""
return {"n": self.n}
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/colossalqa/local/pangu_llm.py | applications/ColossalQA/colossalqa/local/pangu_llm.py | """
LLM wrapper for Pangu
Usage:
# URL: “盘古大模型套件管理”->点击“服务管理”->“模型列表”->点击想要使用的模型的“复制路径”
# USERNAME: 华为云控制台:“我的凭证”->“API凭证”下的“IAM用户名”,也就是你登录IAM账户的名字
# PASSWORD: IAM用户的密码
# DOMAIN_NAME: 华为云控制台:“我的凭证”->“API凭证”下的“用户名”,也就是公司管理IAM账户的总账户名
os.environ["URL"] = ""
os.environ["URLNAME"] = ""
os.environ["PASSWORD"] = ""
os.environ["DOMAIN_NAME"] = ""
pg = Pangu(id=1)
pg.set_auth_config()
res = pg('你是谁') # 您好,我是华为盘古大模型。我能够通过和您对话互动为您提供帮助。请问您有什么想问我的吗?
"""
import http.client
import json
from typing import Any, List, Mapping, Optional
import requests
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
class Pangu(LLM):
"""
A custom LLM class that integrates pangu models
"""
n: int
gen_config: dict = None
auth_config: dict = None
def __init__(self, gen_config=None, **kwargs):
super(Pangu, self).__init__(**kwargs)
if gen_config is None:
self.gen_config = {"user": "User", "max_tokens": 50, "temperature": 0.95, "n": 1}
else:
self.gen_config = gen_config
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {"n": self.n}
@property
def _llm_type(self) -> str:
return "pangu"
def _call(self, prompt: str, stop: Optional[List[str]] = None, **kwargs) -> str:
"""
Args:
prompt: The prompt to pass into the model.
stop: A list of strings to stop generation when encountered
Returns:
The string generated by the model
"""
# Update the generation arguments
for key, value in kwargs.items():
if key in self.gen_config:
self.gen_config[key] = value
response = self.text_completion(prompt, self.gen_config, self.auth_config)
text = response["choices"][0]["text"]
if stop is not None:
for stopping_words in stop:
if stopping_words in text:
text = text.split(stopping_words)[0]
return text
def set_auth_config(self, **kwargs):
url = get_from_dict_or_env(kwargs, "url", "URL")
username = get_from_dict_or_env(kwargs, "username", "USERNAME")
password = get_from_dict_or_env(kwargs, "password", "PASSWORD")
domain_name = get_from_dict_or_env(kwargs, "domain_name", "DOMAIN_NAME")
region = url.split(".")[1]
auth_config = {}
auth_config["endpoint"] = url[url.find("https://") + 8 : url.find(".com") + 4]
auth_config["resource_path"] = url[url.find(".com") + 4 :]
auth_config["auth_token"] = self.get_latest_auth_token(region, username, password, domain_name)
self.auth_config = auth_config
def get_latest_auth_token(self, region, username, password, domain_name):
url = f"https://iam.{region}.myhuaweicloud.com/v3/auth/tokens"
payload = json.dumps(
{
"auth": {
"identity": {
"methods": ["password"],
"password": {"user": {"name": username, "password": password, "domain": {"name": domain_name}}},
},
"scope": {"project": {"name": region}},
}
}
)
headers = {"Content-Type": "application/json"}
response = requests.request("POST", url, headers=headers, data=payload)
return response.headers["X-Subject-Token"]
def text_completion(self, text, gen_config, auth_config):
conn = http.client.HTTPSConnection(auth_config["endpoint"])
payload = json.dumps(
{
"prompt": text,
"user": gen_config["user"],
"max_tokens": gen_config["max_tokens"],
"temperature": gen_config["temperature"],
"n": gen_config["n"],
}
)
headers = {
"X-Auth-Token": auth_config["auth_token"],
"Content-Type": "application/json",
}
conn.request("POST", auth_config["resource_path"], payload, headers)
res = conn.getresponse()
data = res.read()
data = json.loads(data.decode("utf-8"))
return data
def chat_model(self, messages, gen_config, auth_config):
conn = http.client.HTTPSConnection(auth_config["endpoint"])
payload = json.dumps(
{
"messages": messages,
"user": gen_config["user"],
"max_tokens": gen_config["max_tokens"],
"temperature": gen_config["temperature"],
"n": gen_config["n"],
}
)
headers = {
"X-Auth-Token": auth_config["auth_token"],
"Content-Type": "application/json",
}
conn.request("POST", auth_config["resource_path"], payload, headers)
res = conn.getresponse()
data = res.read()
data = json.loads(data.decode("utf-8"))
return data
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/colossalqa/local/utils.py | applications/ColossalQA/colossalqa/local/utils.py | """
Generation utilities
"""
import json
from typing import List
import requests
def post_http_request(
prompt: str, api_url: str, n: int = 1, max_tokens: int = 100, temperature: float = 0.0, stream: bool = False
) -> requests.Response:
headers = {"User-Agent": "Test Client"}
pload = {
"prompt": prompt,
"n": 1,
"use_beam_search": False,
"temperature": temperature,
"max_tokens": max_tokens,
"stream": stream,
}
response = requests.post(api_url, headers=headers, json=pload, stream=True, timeout=3)
return response
def get_response(response: requests.Response) -> List[str]:
data = json.loads(response.content)
output = data["text"]
return output
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/colossalqa/local/colossalcloud_llm.py | applications/ColossalQA/colossalqa/local/colossalcloud_llm.py | """
LLM wrapper for LLMs running on ColossalCloud Platform
Usage:
os.environ['URL'] = ""
os.environ['HOST'] = ""
gen_config = {
'max_new_tokens': 100,
# 'top_k': 2,
'top_p': 0.9,
'temperature': 0.5,
'repetition_penalty': 2,
}
llm = ColossalCloudLLM(n=1)
llm.set_auth_config()
resp = llm(prompt='What do you call a three-ton kangaroo?', **gen_config)
print(resp) # super-heavyweight awesome-natured yawning Australian creature!
"""
import json
from typing import Any, Mapping
import requests
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
class ColossalCloudLLM(LLM):
"""
A custom LLM class that integrates LLMs running on the ColossalCloud Platform
"""
n: int
gen_config: dict = None
auth_config: dict = None
valid_gen_para: list = ["max_new_tokens", "top_k", "top_p", "temperature", "repetition_penalty"]
def __init__(self, gen_config=None, **kwargs):
"""
Args:
gen_config: config for generation,
max_new_tokens: 50 by default
top_k: (1, vocab_size)
top_p: (0, 1) if not None
temperature: (0, inf) if not None
repetition_penalty: (1, inf) if not None
"""
super(ColossalCloudLLM, self).__init__(**kwargs)
if gen_config is None:
self.gen_config = {"max_new_tokens": 50}
else:
assert "max_new_tokens" in gen_config, "max_new_tokens is a compulsory key in the gen config"
self.gen_config = gen_config
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {"n": self.n}
@property
def _llm_type(self) -> str:
return "ColossalCloudLLM"
def set_auth_config(self, **kwargs):
url = get_from_dict_or_env(kwargs, "url", "URL")
host = get_from_dict_or_env(kwargs, "host", "HOST")
auth_config = {}
auth_config["endpoint"] = url
auth_config["Host"] = host
self.auth_config = auth_config
def _call(self, prompt: str, stop=None, **kwargs: Any) -> str:
"""
Args:
prompt: The prompt to pass into the model.
stop: A list of strings to stop generation when encountered
Returns:
The string generated by the model
"""
# Update the generation arguments
for key, value in kwargs.items():
if key not in self.valid_gen_para:
raise KeyError(
f"Invalid generation parameter: '{key}'. Valid keys are: {', '.join(self.valid_gen_para)}"
)
if key in self.gen_config:
self.gen_config[key] = value
resp_text = self.text_completion(prompt, self.gen_config, self.auth_config)
# TODO: This may cause excessive tokens count
if stop is not None:
for stopping_words in stop:
if stopping_words in resp_text:
resp_text = resp_text.split(stopping_words)[0]
return resp_text
def text_completion(self, prompt, gen_config, auth_config):
# Required Parameters
endpoint = auth_config.pop("endpoint")
max_new_tokens = gen_config.pop("max_new_tokens")
# Optional Parameters
optional_params = ["top_k", "top_p", "temperature", "repetition_penalty"] # Self.optional
gen_config = {key: gen_config[key] for key in optional_params if key in gen_config}
# Define the data payload
data = {"max_new_tokens": max_new_tokens, "history": [{"instruction": prompt, "response": ""}], **gen_config}
headers = {"Content-Type": "application/json", **auth_config} # 'Host',
# Make the POST request
response = requests.post(endpoint, headers=headers, data=json.dumps(data))
response.raise_for_status() # raise error if return code is not 200(success)
# Check the response
return response.text
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/colossalqa/local/__init__.py | applications/ColossalQA/colossalqa/local/__init__.py | python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false | |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/colossalqa/data_loader/document_loader.py | applications/ColossalQA/colossalqa/data_loader/document_loader.py | """
Class for loading document type data
"""
import glob
from typing import List
from colossalqa.mylogging import get_logger
from langchain.document_loaders import (
JSONLoader,
PyPDFLoader,
TextLoader,
UnstructuredHTMLLoader,
UnstructuredMarkdownLoader,
)
from langchain.document_loaders.csv_loader import CSVLoader
logger = get_logger()
SUPPORTED_DATA_FORMAT = [".csv", ".json", ".html", ".md", ".pdf", ".txt", ".jsonl"]
class DocumentLoader:
"""
Load documents from different files into list of langchain Documents
"""
def __init__(self, files: List, **kwargs) -> None:
"""
Args:
files: list of files (list[file path, name])
**kwargs: keyword type arguments, useful for certain document types
"""
self.data = {}
self.kwargs = kwargs
for item in files:
path = item[0] if isinstance(item, list) else item
logger.info(f"Loading data from {path}")
self.load_data(path)
logger.info("Data loaded")
self.all_data = []
for key in self.data:
if isinstance(self.data[key], list):
for item in self.data[key]:
if isinstance(item, list):
self.all_data.extend(item)
else:
self.all_data.append(item)
def load_data(self, path: str) -> None:
"""
Load data. Please refer to https://python.langchain.com/docs/modules/data_connection/document_loaders/
for specific format requirements.
Args:
path: path to a file
To load files with glob path, here are some examples.
Load all file from directory: folder1/folder2/*
Load all pdf file from directory: folder1/folder2/*.pdf
"""
files = []
# Handle glob expression
try:
files = glob.glob(path)
except Exception as e:
logger.error(e)
if len(files) == 0:
raise ValueError("Unsupported file/directory format. For directories, please use glob expression")
elif len(files) == 1:
path = files[0]
else:
for file in files:
self.load_data(file)
return
# Load data if the path is a file
logger.info(f"load {path}", verbose=True)
if path.endswith(".csv"):
# Load csv
loader = CSVLoader(file_path=path, encoding="utf8")
data = loader.load()
self.data[path] = data
elif path.endswith(".txt"):
# Load txt
loader = TextLoader(path, encoding="utf8")
data = loader.load()
self.data[path] = data
elif path.endswith("html"):
# Load html
loader = UnstructuredHTMLLoader(path, encoding="utf8")
data = loader.load()
self.data[path] = data
elif path.endswith("json"):
# Load json
loader = JSONLoader(
file_path=path,
jq_schema=self.kwargs.get("jq_schema", ".data[]"),
content_key=self.kwargs.get("content_key", "content"),
metadata_func=self.kwargs.get("metadata_func", None),
)
data = loader.load()
self.data[path] = data
elif path.endswith("jsonl"):
# Load jsonl
loader = JSONLoader(
file_path=path, jq_schema=self.kwargs.get("jq_schema", ".data[].content"), json_lines=True
)
data = loader.load()
self.data[path] = data
elif path.endswith(".md"):
# Load markdown
loader = UnstructuredMarkdownLoader(path)
data = loader.load()
self.data[path] = data
elif path.endswith(".pdf"):
# Load pdf
loader = PyPDFLoader(path)
data = loader.load_and_split()
self.data[path] = data
else:
if "." in path.split("/")[-1]:
raise ValueError(f"Unsupported file format {path}. Supported formats: {SUPPORTED_DATA_FORMAT}")
else:
# May ba a directory, we strictly follow the glob path and will not load files in subdirectories
pass
def clear(self):
"""
Clear loaded data.
"""
self.data = {}
self.kwargs = {}
self.all_data = []
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/colossalqa/data_loader/table_dataloader.py | applications/ColossalQA/colossalqa/data_loader/table_dataloader.py | """
Class for loading table type data. please refer to Pandas-Input/Output for file format details.
"""
import glob
import os
import pandas as pd
from colossalqa.mylogging import get_logger
from colossalqa.utils import drop_table
from sqlalchemy import create_engine
logger = get_logger()
SUPPORTED_DATA_FORMAT = [".csv", ".xlsx", ".xls", ".json", ".html", ".h5", ".hdf5", ".parquet", ".feather", ".dta"]
class TableLoader:
"""
Load tables from different files and serve a sql database for database operations
"""
def __init__(self, files: str, sql_path: str = "sqlite:///mydatabase.db", verbose=False, **kwargs) -> None:
"""
Args:
files: list of files (list[file path, name])
sql_path: how to serve the sql database
**kwargs: keyword type arguments, useful for certain document types
"""
self.data = {}
self.verbose = verbose
self.sql_path = sql_path
self.kwargs = kwargs
self.sql_engine = create_engine(self.sql_path)
drop_table(self.sql_engine)
self.sql_engine = create_engine(self.sql_path)
for item in files:
path = item[0]
dataset_name = item[1]
if not os.path.exists(path):
raise FileNotFoundError(f"{path} doesn't exists")
if not any([path.endswith(i) for i in SUPPORTED_DATA_FORMAT]):
raise TypeError(f"{path} not supported. Supported type {SUPPORTED_DATA_FORMAT}")
logger.info("loading data", verbose=self.verbose)
self.load_data(path)
logger.info("data loaded", verbose=self.verbose)
self.to_sql(path, dataset_name)
def load_data(self, path):
"""
Load data and serve the data as sql database.
Data must be in pandas format
"""
files = []
# Handle glob expression
try:
files = glob.glob(path)
except Exception as e:
logger.error(e)
if len(files) == 0:
raise ValueError("Unsupported file/directory format. For directories, please use glob expression")
elif len(files) == 1:
path = files[0]
else:
for file in files:
self.load_data(file)
if path.endswith(".csv"):
# Load csv
self.data[path] = pd.read_csv(path)
elif path.endswith(".xlsx") or path.endswith(".xls"):
# Load excel
self.data[path] = pd.read_excel(path) # You can adjust the sheet_name as needed
elif path.endswith(".json"):
# Load json
self.data[path] = pd.read_json(path)
elif path.endswith(".html"):
# Load html
html_tables = pd.read_html(path)
# Choose the desired table from the list of DataFrame objects
self.data[path] = html_tables[0] # You may need to adjust this index
elif path.endswith(".h5") or path.endswith(".hdf5"):
# Load h5
self.data[path] = pd.read_hdf(path, key=self.kwargs.get("key", "data")) # You can adjust the key as needed
elif path.endswith(".parquet"):
# Load parquet
self.data[path] = pd.read_parquet(path, engine="fastparquet")
elif path.endswith(".feather"):
# Load feather
self.data[path] = pd.read_feather(path)
elif path.endswith(".dta"):
# Load dta
self.data[path] = pd.read_stata(path)
else:
raise ValueError("Unsupported file format")
def to_sql(self, path, table_name):
"""
Serve the data as sql database.
"""
self.data[path].to_sql(table_name, con=self.sql_engine, if_exists="replace", index=False)
logger.info(f"Loaded to Sqlite3\nPath: {path}", verbose=self.verbose)
return self.sql_path
def get_sql_path(self):
return self.sql_path
def __del__(self):
if self.sql_engine:
drop_table(self.sql_engine)
self.sql_engine.dispose()
del self.data
del self.sql_engine
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/colossalqa/data_loader/__init__.py | applications/ColossalQA/colossalqa/data_loader/__init__.py | python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false | |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/tests/test_retrieval_qa.py | applications/ColossalQA/tests/test_retrieval_qa.py | import os
from colossalqa.retrieval_conversation_universal import UniversalRetrievalConversation
def test_en_retrievalQA():
data_path_en = os.environ.get("TEST_DATA_PATH_EN")
data_path_zh = os.environ.get("TEST_DATA_PATH_ZH")
en_model_path = os.environ.get("EN_MODEL_PATH")
zh_model_path = os.environ.get("ZH_MODEL_PATH")
zh_model_name = os.environ.get("ZH_MODEL_NAME")
en_model_name = os.environ.get("EN_MODEL_NAME")
sql_file_path = os.environ.get("SQL_FILE_PATH")
qa_session = UniversalRetrievalConversation(
files_en=[{"data_path": data_path_en, "name": "company information", "separator": "\n"}],
files_zh=[{"data_path": data_path_zh, "name": "company information", "separator": "\n"}],
zh_model_path=zh_model_path,
en_model_path=en_model_path,
zh_model_name=zh_model_name,
en_model_name=en_model_name,
sql_file_path=sql_file_path,
)
ans = qa_session.run("which company runs business in hotel industry?", which_language="en")
print(ans)
def test_zh_retrievalQA():
data_path_en = os.environ.get("TEST_DATA_PATH_EN")
data_path_zh = os.environ.get("TEST_DATA_PATH_ZH")
en_model_path = os.environ.get("EN_MODEL_PATH")
zh_model_path = os.environ.get("ZH_MODEL_PATH")
zh_model_name = os.environ.get("ZH_MODEL_NAME")
en_model_name = os.environ.get("EN_MODEL_NAME")
sql_file_path = os.environ.get("SQL_FILE_PATH")
qa_session = UniversalRetrievalConversation(
files_en=[{"data_path": data_path_en, "name": "company information", "separator": "\n"}],
files_zh=[{"data_path": data_path_zh, "name": "company information", "separator": "\n"}],
zh_model_path=zh_model_path,
en_model_path=en_model_path,
zh_model_name=zh_model_name,
en_model_name=en_model_name,
sql_file_path=sql_file_path,
)
ans = qa_session.run("哪家公司在经营酒店业务?", which_language="zh")
print(ans)
if __name__ == "__main__":
test_en_retrievalQA()
test_zh_retrievalQA()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/tests/test_text_splitter.py | applications/ColossalQA/tests/test_text_splitter.py | from colossalqa.text_splitter.chinese_text_splitter import ChineseTextSplitter
def test_text_splitter():
# unit test
spliter = ChineseTextSplitter(chunk_size=30, chunk_overlap=0)
out = spliter.split_text(
"移动端语音唤醒模型,检测关键词为“小云小云”。模型主体为4层FSMN结构,使用CTC训练准则,参数量750K,适用于移动端设备运行。模型输入为Fbank特征,输出为基于char建模的中文全集token预测,测试工具根据每一帧的预测数据进行后处理得到输入音频的实时检测结果。模型训练采用“basetrain + finetune”的模式,basetrain过程使用大量内部移动端数据,在此基础上,使用1万条设备端录制安静场景“小云小云”数据进行微调,得到最终面向业务的模型。后续用户可在basetrain模型基础上,使用其他关键词数据进行微调,得到新的语音唤醒模型,但暂时未开放模型finetune功能。"
)
print(len(out))
assert len(out) == 4 # ChineseTextSplitter will not break sentence. Hence the actual chunk size is not 30
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/tests/test_memory.py | applications/ColossalQA/tests/test_memory.py | import os
from colossalqa.data_loader.document_loader import DocumentLoader
from colossalqa.local.llm import ColossalAPI, ColossalLLM
from colossalqa.memory import ConversationBufferWithSummary
from colossalqa.prompt.prompt import PROMPT_RETRIEVAL_QA_ZH
from colossalqa.retriever import CustomRetriever
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
def test_memory_long():
model_path = os.environ.get("EN_MODEL_PATH")
data_path = os.environ.get("TEST_DATA_PATH_EN")
model_name = os.environ.get("EN_MODEL_NAME")
sql_file_path = os.environ.get("SQL_FILE_PATH")
if not os.path.exists(sql_file_path):
os.makedirs(sql_file_path)
colossal_api = ColossalAPI.get_api(model_name, model_path)
llm = ColossalLLM(n=4, api=colossal_api)
memory = ConversationBufferWithSummary(
llm=llm, max_tokens=600, llm_kwargs={"max_new_tokens": 50, "temperature": 0.6, "do_sample": True}
)
retriever_data = DocumentLoader([[data_path, "company information"]]).all_data
# Split
text_splitter = RecursiveCharacterTextSplitter(chunk_size=100, chunk_overlap=20)
splits = text_splitter.split_documents(retriever_data)
embedding = HuggingFaceEmbeddings(
model_name="moka-ai/m3e-base", model_kwargs={"device": "cpu"}, encode_kwargs={"normalize_embeddings": False}
)
# Create retriever
information_retriever = CustomRetriever(k=3, sql_file_path=sql_file_path)
information_retriever.add_documents(docs=splits, cleanup="incremental", mode="by_source", embedding=embedding)
memory.initiate_document_retrieval_chain(
llm,
PROMPT_RETRIEVAL_QA_ZH,
information_retriever,
chain_type_kwargs={
"chat_history": "",
},
)
# This keep the prompt length excluding dialogues the same
docs = information_retriever.get_relevant_documents("this is a test input.")
prompt_length = memory.chain.prompt_length(docs, **{"question": "this is a test input.", "chat_history": ""})
remain = 600 - prompt_length
have_summarization_flag = False
for i in range(40):
chat_history = memory.load_memory_variables({"question": "this is a test input.", "input_documents": docs})[
"chat_history"
]
assert memory.get_conversation_length() <= remain
memory.save_context({"question": "this is a test input."}, {"output": "this is a test output."})
if "A summarization of historical conversation:" in chat_history:
have_summarization_flag = True
assert have_summarization_flag == True
def test_memory_short():
model_path = os.environ.get("EN_MODEL_PATH")
data_path = os.environ.get("TEST_DATA_PATH_EN")
model_name = os.environ.get("EN_MODEL_NAME")
sql_file_path = os.environ.get("SQL_FILE_PATH")
if not os.path.exists(sql_file_path):
os.makedirs(sql_file_path)
colossal_api = ColossalAPI.get_api(model_name, model_path)
llm = ColossalLLM(n=4, api=colossal_api)
memory = ConversationBufferWithSummary(
llm=llm, llm_kwargs={"max_new_tokens": 50, "temperature": 0.6, "do_sample": True}
)
retriever_data = DocumentLoader([[data_path, "company information"]]).all_data
# Split
text_splitter = RecursiveCharacterTextSplitter(chunk_size=100, chunk_overlap=20)
splits = text_splitter.split_documents(retriever_data)
embedding = HuggingFaceEmbeddings(
model_name="moka-ai/m3e-base", model_kwargs={"device": "cpu"}, encode_kwargs={"normalize_embeddings": False}
)
# create retriever
information_retriever = CustomRetriever(k=3, sql_file_path=sql_file_path)
information_retriever.add_documents(docs=splits, cleanup="incremental", mode="by_source", embedding=embedding)
memory.initiate_document_retrieval_chain(
llm,
PROMPT_RETRIEVAL_QA_ZH,
information_retriever,
chain_type_kwargs={
"chat_history": "",
},
)
# This keep the prompt length excluding dialogues the same
docs = information_retriever.get_relevant_documents("this is a test input.", return_scores=True)
for i in range(4):
chat_history = memory.load_memory_variables({"question": "this is a test input.", "input_documents": docs})[
"chat_history"
]
assert chat_history.count("Assistant: this is a test output.") == i
assert chat_history.count("Human: this is a test input.") == i
memory.save_context({"question": "this is a test input."}, {"output": "this is a test output."})
if __name__ == "__main__":
test_memory_short()
test_memory_long()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/tests/__init__.py | applications/ColossalQA/tests/__init__.py | python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false | |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/tests/test_document_loader.py | applications/ColossalQA/tests/test_document_loader.py | import os
from colossalqa.data_loader.document_loader import DocumentLoader
def test_add_document():
PATH = os.environ.get("TEST_DOCUMENT_LOADER_DATA_PATH")
files = [[PATH, "all data"]]
document_loader = DocumentLoader(files)
documents = document_loader.all_data
all_files = []
for doc in documents:
assert isinstance(doc.page_content, str) == True
if doc.metadata["source"] not in all_files:
all_files.append(doc.metadata["source"])
print(all_files)
assert len(all_files) == 6
if __name__ == "__main__":
test_add_document()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/examples/retrieval_conversation_en.py | applications/ColossalQA/examples/retrieval_conversation_en.py | """
Script for English retrieval based conversation system backed by LLaMa2
"""
import argparse
import os
from colossalqa.chain.retrieval_qa.base import RetrievalQA
from colossalqa.data_loader.document_loader import DocumentLoader
from colossalqa.local.llm import ColossalAPI, ColossalLLM
from colossalqa.memory import ConversationBufferWithSummary
from colossalqa.prompt.prompt import (
EN_RETRIEVAL_QA_REJECTION_ANSWER,
EN_RETRIEVAL_QA_TRIGGER_KEYWORDS,
PROMPT_DISAMBIGUATE_EN,
PROMPT_RETRIEVAL_QA_EN,
)
from colossalqa.retriever import CustomRetriever
from langchain import LLMChain
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
if __name__ == "__main__":
# Parse arguments
parser = argparse.ArgumentParser(description="English retrieval based conversation system backed by LLaMa2")
parser.add_argument("--model_path", type=str, default=None, help="path to the model")
parser.add_argument("--model_name", type=str, default=None, help="name of the model")
parser.add_argument(
"--sql_file_path", type=str, default=None, help="path to the a empty folder for storing sql files for indexing"
)
args = parser.parse_args()
if not os.path.exists(args.sql_file_path):
os.makedirs(args.sql_file_path)
colossal_api = ColossalAPI.get_api(args.model_name, args.model_path)
llm = ColossalLLM(n=1, api=colossal_api)
# Define the retriever
information_retriever = CustomRetriever(k=3, sql_file_path=args.sql_file_path, verbose=True)
# Setup embedding model locally
embedding = HuggingFaceEmbeddings(
model_name="moka-ai/m3e-base", model_kwargs={"device": "cpu"}, encode_kwargs={"normalize_embeddings": False}
)
# Define memory with summarization ability
memory = ConversationBufferWithSummary(
llm=llm, max_tokens=2000, llm_kwargs={"max_new_tokens": 50, "temperature": 0.6, "do_sample": True}
)
# Define the chain to preprocess the input
# Disambiguate the input. e.g. "What is the capital of that country?" -> "What is the capital of France?"
llm_chain_disambiguate = LLMChain(
llm=llm, prompt=PROMPT_DISAMBIGUATE_EN, llm_kwargs={"max_new_tokens": 30, "temperature": 0.6, "do_sample": True}
)
def disambiguity(input):
out = llm_chain_disambiguate.run(input=input, chat_history=memory.buffer, stop=["\n"])
return out.split("\n")[0]
# Load data to vector store
print("Select files for constructing retriever")
documents = []
while True:
file = input("Enter a file path or press Enter directory without input to exit:").strip()
if file == "":
break
data_name = input("Enter a short description of the data:")
separator = input(
"Enter a separator to force separating text into chunks, if no separator is given, the default separator is '\\n\\n'. Note that"
+ "we use neural text spliter to split texts into chunks, the seperator only serves as a delimiter to force split long passage into"
+ " chunks before passing to the neural network. Press ENTER directly to skip:"
)
separator = separator if separator != "" else "\n\n"
retriever_data = DocumentLoader([[file, data_name.replace(" ", "_")]]).all_data
# Split
text_splitter = RecursiveCharacterTextSplitter(chunk_size=100, chunk_overlap=20)
splits = text_splitter.split_documents(retriever_data)
documents.extend(splits)
# Create retriever
information_retriever.add_documents(docs=documents, cleanup="incremental", mode="by_source", embedding=embedding)
# Set document retrieval chain, we need this chain to calculate prompt length
memory.initiate_document_retrieval_chain(
llm,
PROMPT_RETRIEVAL_QA_EN,
information_retriever,
chain_type_kwargs={
"chat_history": "",
},
)
# Define retrieval chain
retrieval_chain = RetrievalQA.from_chain_type(
llm=llm,
verbose=False,
chain_type="stuff",
retriever=information_retriever,
chain_type_kwargs={"prompt": PROMPT_RETRIEVAL_QA_EN, "memory": memory},
llm_kwargs={"max_new_tokens": 50, "temperature": 0.75, "do_sample": True},
)
# Set disambiguity handler
information_retriever.set_rephrase_handler(disambiguity)
# Start conversation
while True:
user_input = input("User: ")
if "END" == user_input:
print("Agent: Happy to chat with you :)")
break
agent_response = retrieval_chain.run(
query=user_input,
stop=["Human: "],
rejection_trigger_keywords=EN_RETRIEVAL_QA_TRIGGER_KEYWORDS,
rejection_answer=EN_RETRIEVAL_QA_REJECTION_ANSWER,
)
agent_response = agent_response.split("\n")[0]
print(f"Agent: {agent_response}")
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/examples/retrieval_intent_classification_zh_customer_service.py | applications/ColossalQA/examples/retrieval_intent_classification_zh_customer_service.py | """
Script for English retrieval based conversation system backed by LLaMa2
"""
import argparse
import os
from colossalqa.chain.retrieval_qa.base import RetrievalQA
from colossalqa.data_loader.document_loader import DocumentLoader
from colossalqa.local.llm import ColossalAPI, ColossalLLM
from colossalqa.prompt.prompt import PROMPT_RETRIEVAL_CLASSIFICATION_USE_CASE_ZH
from colossalqa.retriever import CustomRetriever
from colossalqa.text_splitter import ChineseTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
if __name__ == "__main__":
# Parse arguments
parser = argparse.ArgumentParser(description="English retrieval based conversation system backed by LLaMa2")
parser.add_argument("--model_path", type=str, default=None, help="path to the model")
parser.add_argument("--model_name", type=str, default=None, help="name of the model")
parser.add_argument(
"--sql_file_path", type=str, default=None, help="path to the a empty folder for storing sql files for indexing"
)
args = parser.parse_args()
if not os.path.exists(args.sql_file_path):
os.makedirs(args.sql_file_path)
colossal_api = ColossalAPI.get_api(args.model_name, args.model_path)
llm = ColossalLLM(n=1, api=colossal_api)
# Define the retriever
information_retriever = CustomRetriever(k=2, sql_file_path=args.sql_file_path, verbose=True)
# Setup embedding model locally
embedding = HuggingFaceEmbeddings(
model_name="moka-ai/m3e-base", model_kwargs={"device": "cpu"}, encode_kwargs={"normalize_embeddings": False}
)
# Load data to vector store
print("Select files for constructing retriever")
documents = []
# define metadata function which is used to format the prompt with value in metadata instead of key,
# the later is langchain's default behavior
def metadata_func(data_sample, additional_fields):
"""
metadata_func (Callable[Dict, Dict]): A function that takes in the JSON
object extracted by the jq_schema and the default metadata and returns
a dict of the updated metadata.
To use key-value format, the metadata_func should be defined as follows:
metadata = {'value': 'a string to be used to format the prompt', 'is_key_value_mapping': True}
"""
metadata = {}
metadata["value"] = f"Question: {data_sample['key']}\nAnswer:{data_sample['value']}"
metadata["is_key_value_mapping"] = True
assert "value" not in additional_fields
assert "is_key_value_mapping" not in additional_fields
metadata.update(additional_fields)
return metadata
retriever_data = DocumentLoader(
[["../data/data_sample/custom_service_classification.json", "CustomerServiceDemo"]],
content_key="key",
metadata_func=metadata_func,
).all_data
# Split
text_splitter = ChineseTextSplitter()
splits = text_splitter.split_documents(retriever_data)
documents.extend(splits)
# Create retriever
information_retriever.add_documents(docs=documents, cleanup="incremental", mode="by_source", embedding=embedding)
# Define retrieval chain
retrieval_chain = RetrievalQA.from_chain_type(
llm=llm,
verbose=True,
chain_type="stuff",
retriever=information_retriever,
chain_type_kwargs={"prompt": PROMPT_RETRIEVAL_CLASSIFICATION_USE_CASE_ZH},
llm_kwargs={"max_new_tokens": 50, "temperature": 0.75, "do_sample": True},
)
# Set disambiguity handler
# Start conversation
while True:
user_input = input("User: ")
if "END" == user_input:
print("Agent: Happy to chat with you :)")
break
# 要使用和custom_service_classification.json 里的key 类似的句子做输入
agent_response = retrieval_chain.run(query=user_input, stop=["Human: "])
agent_response = agent_response.split("\n")[0]
print(f"Agent: {agent_response}")
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/examples/retrieval_conversation_en_customer_service.py | applications/ColossalQA/examples/retrieval_conversation_en_customer_service.py | """
Script for English retrieval based conversation system backed by LLaMa2
"""
import argparse
import json
import os
from colossalqa.chain.retrieval_qa.base import RetrievalQA
from colossalqa.data_loader.document_loader import DocumentLoader
from colossalqa.local.llm import ColossalAPI, ColossalLLM
from colossalqa.memory import ConversationBufferWithSummary
from colossalqa.prompt.prompt import (
EN_RETRIEVAL_QA_REJECTION_ANSWER,
EN_RETRIEVAL_QA_TRIGGER_KEYWORDS,
PROMPT_DISAMBIGUATE_EN,
PROMPT_RETRIEVAL_QA_EN,
)
from colossalqa.retriever import CustomRetriever
from langchain import LLMChain
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
if __name__ == "__main__":
# Parse arguments
parser = argparse.ArgumentParser(description="English retrieval based conversation system backed by LLaMa2")
parser.add_argument("--model_path", type=str, default=None, help="path to the model")
parser.add_argument("--model_name", type=str, default=None, help="name of the model")
parser.add_argument(
"--sql_file_path", type=str, default=None, help="path to the a empty folder for storing sql files for indexing"
)
args = parser.parse_args()
if not os.path.exists(args.sql_file_path):
os.makedirs(args.sql_file_path)
colossal_api = ColossalAPI.get_api(args.model_name, args.model_path)
llm = ColossalLLM(n=1, api=colossal_api)
# Define the retriever
information_retriever = CustomRetriever(k=3, sql_file_path=args.sql_file_path, verbose=True)
# Setup embedding model locally
embedding = HuggingFaceEmbeddings(
model_name="moka-ai/m3e-base", model_kwargs={"device": "cpu"}, encode_kwargs={"normalize_embeddings": False}
)
# Define memory with summarization ability
memory = ConversationBufferWithSummary(
llm=llm, max_tokens=2000, llm_kwargs={"max_new_tokens": 50, "temperature": 0.6, "do_sample": True}
)
# Define the chain to preprocess the input
# Disambiguate the input. e.g. "What is the capital of that country?" -> "What is the capital of France?"
llm_chain_disambiguate = LLMChain(
llm=llm, prompt=PROMPT_DISAMBIGUATE_EN, llm_kwargs={"max_new_tokens": 30, "temperature": 0.6, "do_sample": True}
)
def disambiguity(input):
out = llm_chain_disambiguate.run(input=input, chat_history=memory.buffer, stop=["\n"])
return out.split("\n")[0]
# Load data to vector store
print("Select files for constructing retriever")
documents = []
# preprocess data
if not os.path.exists("../data/data_sample/custom_service_preprocessed.json"):
if not os.path.exists("../data/data_sample/custom_service.json"):
raise ValueError(
"custom_service.json not found, please download the data from HuggingFace Datasets: qgyd2021/e_commerce_customer_service"
)
data = json.load(open("../data/data_sample/custom_service.json", "r", encoding="utf8"))
preprocessed = []
for row in data["rows"]:
preprocessed.append({"key": row["row"]["query"], "value": row["row"]["response"]})
data = {}
data["data"] = preprocessed
with open("../data/data_sample/custom_service_preprocessed.json", "w", encoding="utf8") as f:
json.dump(data, f, ensure_ascii=False)
# define metadata function which is used to format the prompt with value in metadata instead of key,
# the later is langchain's default behavior
def metadata_func(data_sample, additional_fields):
"""
metadata_func (Callable[Dict, Dict]): A function that takes in the JSON
object extracted by the jq_schema and the default metadata and returns
a dict of the updated metadata.
To use key-value format, the metadata_func should be defined as follows:
metadata = {'value': 'a string to be used to format the prompt', 'is_key_value_mapping': True}
"""
metadata = {}
metadata["value"] = f"Question: {data_sample['key']}\nAnswer:{data_sample['value']}"
metadata["is_key_value_mapping"] = True
assert "value" not in additional_fields
assert "is_key_value_mapping" not in additional_fields
metadata.update(additional_fields)
return metadata
retriever_data = DocumentLoader(
[["../data/data_sample/custom_service_preprocessed.json", "CustomerServiceDemo"]],
content_key="key",
metadata_func=metadata_func,
).all_data
# Split
text_splitter = RecursiveCharacterTextSplitter(chunk_size=100, chunk_overlap=20)
splits = text_splitter.split_documents(retriever_data)
documents.extend(splits)
# Create retriever
information_retriever.add_documents(docs=documents, cleanup="incremental", mode="by_source", embedding=embedding)
# Set document retrieval chain, we need this chain to calculate prompt length
memory.initiate_document_retrieval_chain(
llm,
PROMPT_RETRIEVAL_QA_EN,
information_retriever,
chain_type_kwargs={
"chat_history": "",
},
)
# Define retrieval chain
retrieval_chain = RetrievalQA.from_chain_type(
llm=llm,
verbose=False,
chain_type="stuff",
retriever=information_retriever,
chain_type_kwargs={"prompt": PROMPT_RETRIEVAL_QA_EN, "memory": memory},
llm_kwargs={"max_new_tokens": 50, "temperature": 0.75, "do_sample": True},
)
# Set disambiguity handler
information_retriever.set_rephrase_handler(disambiguity)
# Start conversation
while True:
user_input = input("User: ")
if "END" == user_input:
print("Agent: Happy to chat with you :)")
break
agent_response = retrieval_chain.run(
query=user_input,
stop=["Human: "],
rejection_trigger_keywords=EN_RETRIEVAL_QA_TRIGGER_KEYWORDS,
rejection_answer=EN_RETRIEVAL_QA_REJECTION_ANSWER,
)
agent_response = agent_response.split("\n")[0]
print(f"Agent: {agent_response}")
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/examples/retrieval_conversation_chatgpt.py | applications/ColossalQA/examples/retrieval_conversation_chatgpt.py | """
Multilingual retrieval based conversation system backed by ChatGPT
"""
import argparse
import os
from colossalqa.data_loader.document_loader import DocumentLoader
from colossalqa.memory import ConversationBufferWithSummary
from colossalqa.retriever import CustomRetriever
from langchain import LLMChain
from langchain.chains import RetrievalQA
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.llms import OpenAI
from langchain.prompts.prompt import PromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Multilingual retrieval based conversation system backed by ChatGPT")
parser.add_argument("--open_ai_key_path", type=str, default=None, help="path to the model")
parser.add_argument(
"--sql_file_path", type=str, default=None, help="path to the a empty folder for storing sql files for indexing"
)
args = parser.parse_args()
if not os.path.exists(args.sql_file_path):
os.makedirs(args.sql_file_path)
# Setup openai key
# Set env var OPENAI_API_KEY or load from a file
openai_key = open(args.open_ai_key_path).read()
os.environ["OPENAI_API_KEY"] = openai_key
llm = OpenAI(temperature=0.6)
information_retriever = CustomRetriever(k=3, sql_file_path=args.sql_file_path, verbose=True)
# VectorDB
embedding = HuggingFaceEmbeddings(
model_name="moka-ai/m3e-base", model_kwargs={"device": "cpu"}, encode_kwargs={"normalize_embeddings": False}
)
# Define memory with summarization ability
memory = ConversationBufferWithSummary(llm=llm)
# Load data to vector store
print("Select files for constructing retriever")
documents = []
while True:
file = input("Enter a file path or press Enter directory without input to exit:").strip()
if file == "":
break
data_name = input("Enter a short description of the data:")
retriever_data = DocumentLoader([[file, data_name.replace(" ", "_")]]).all_data
# Split
text_splitter = RecursiveCharacterTextSplitter(chunk_size=200, chunk_overlap=0)
splits = text_splitter.split_documents(retriever_data)
documents.extend(splits)
# Create retriever
information_retriever.add_documents(docs=documents, cleanup="incremental", mode="by_source", embedding=embedding)
prompt_template = """Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
If the answer cannot be inferred based on the given context, please don't share false information.
Use the context and chat history to respond to the human's input at the end or carry on the conversation. You should generate one response only. No following up is needed.
context:
{context}
chat history
{chat_history}
Human: {question}
Assistant:"""
prompt_template_disambiguate = """You are a helpful, respectful and honest assistant. You always follow the instruction.
Please replace any ambiguous references in the given sentence with the specific names or entities mentioned in the chat history or just output the original sentence if no chat history is provided or if the sentence doesn't contain ambiguous references. Your output should be the disambiguated sentence itself (in the same line as "disambiguated sentence:") and contain nothing else.
Here is an example:
Chat history:
Human: I have a friend, Mike. Do you know him?
Assistant: Yes, I know a person named Mike
sentence: What's his favorite food?
disambiguated sentence: What's Mike's favorite food?
END OF EXAMPLE
Chat history:
{chat_history}
sentence: {input}
disambiguated sentence:"""
PROMPT = PromptTemplate(template=prompt_template, input_variables=["question", "chat_history", "context"])
memory.initiate_document_retrieval_chain(
llm,
PROMPT,
information_retriever,
chain_type_kwargs={
"chat_history": "",
},
)
PROMPT_DISAMBIGUATE = PromptTemplate(
template=prompt_template_disambiguate, input_variables=["chat_history", "input"]
)
llm_chain = RetrievalQA.from_chain_type(
llm=llm,
verbose=False,
chain_type="stuff",
retriever=information_retriever,
chain_type_kwargs={"prompt": PROMPT, "memory": memory},
)
llm_chain_disambiguate = LLMChain(llm=llm, prompt=PROMPT_DISAMBIGUATE)
def disambiguity(input):
out = llm_chain_disambiguate.run({"input": input, "chat_history": memory.buffer})
return out.split("\n")[0]
information_retriever.set_rephrase_handler(disambiguity)
while True:
user_input = input("User: ")
if " end " in user_input:
print("Agent: Happy to chat with you :)")
break
agent_response = llm_chain.run(user_input)
agent_response = agent_response.split("\n")[0]
print(f"Agent: {agent_response}")
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/examples/retrieval_conversation_zh.py | applications/ColossalQA/examples/retrieval_conversation_zh.py | """
Script for Chinese retrieval based conversation system backed by ChatGLM
"""
import argparse
import os
from colossalqa.chain.retrieval_qa.base import RetrievalQA
from colossalqa.data_loader.document_loader import DocumentLoader
from colossalqa.local.llm import ColossalAPI, ColossalLLM
from colossalqa.memory import ConversationBufferWithSummary
from colossalqa.prompt.prompt import (
PROMPT_DISAMBIGUATE_ZH,
PROMPT_RETRIEVAL_QA_ZH,
SUMMARY_PROMPT_ZH,
ZH_RETRIEVAL_QA_REJECTION_ANSWER,
ZH_RETRIEVAL_QA_TRIGGER_KEYWORDS,
)
from colossalqa.retriever import CustomRetriever
from colossalqa.text_splitter import ChineseTextSplitter
from langchain import LLMChain
from langchain.embeddings import HuggingFaceEmbeddings
if __name__ == "__main__":
# Parse arguments
parser = argparse.ArgumentParser(description="Chinese retrieval based conversation system backed by ChatGLM2")
parser.add_argument("--model_path", type=str, default=None, help="path to the model")
parser.add_argument("--model_name", type=str, default=None, help="name of the model")
parser.add_argument(
"--sql_file_path", type=str, default=None, help="path to the a empty folder for storing sql files for indexing"
)
args = parser.parse_args()
if not os.path.exists(args.sql_file_path):
os.makedirs(args.sql_file_path)
colossal_api = ColossalAPI.get_api(args.model_name, args.model_path)
llm = ColossalLLM(n=1, api=colossal_api)
# Setup embedding model locally
embedding = HuggingFaceEmbeddings(
model_name="moka-ai/m3e-base", model_kwargs={"device": "cpu"}, encode_kwargs={"normalize_embeddings": False}
)
# Define the retriever
information_retriever = CustomRetriever(k=3, sql_file_path=args.sql_file_path, verbose=True)
# Define memory with summarization ability
memory = ConversationBufferWithSummary(
llm=llm,
prompt=SUMMARY_PROMPT_ZH,
human_prefix="用户",
ai_prefix="Assistant",
max_tokens=2000,
llm_kwargs={"max_new_tokens": 50, "temperature": 0.6, "do_sample": True},
)
# Define the chain to preprocess the input
# Disambiguate the input. e.g. "What is the capital of that country?" -> "What is the capital of France?"
llm_chain_disambiguate = LLMChain(
llm=llm, prompt=PROMPT_DISAMBIGUATE_ZH, llm_kwargs={"max_new_tokens": 30, "temperature": 0.6, "do_sample": True}
)
def disambiguity(input: str):
out = llm_chain_disambiguate.run(input=input, chat_history=memory.buffer, stop=["\n"])
return out.split("\n")[0]
# Load data to vector store
print("Select files for constructing retriever")
documents = []
while True:
file = input("Enter a file path or press Enter directory without input to exit:").strip()
if file == "":
break
data_name = input("Enter a short description of the data:")
retriever_data = DocumentLoader([[file, data_name.replace(" ", "_")]]).all_data
# Split
text_splitter = ChineseTextSplitter()
splits = text_splitter.split_documents(retriever_data)
documents.extend(splits)
# Create retriever
information_retriever.add_documents(docs=documents, cleanup="incremental", mode="by_source", embedding=embedding)
# Set document retrieval chain, we need this chain to calculate prompt length
memory.initiate_document_retrieval_chain(llm, PROMPT_RETRIEVAL_QA_ZH, information_retriever)
# Define retrieval chain
llm_chain = RetrievalQA.from_chain_type(
llm=llm,
verbose=False,
chain_type="stuff",
retriever=information_retriever,
chain_type_kwargs={"prompt": PROMPT_RETRIEVAL_QA_ZH, "memory": memory},
llm_kwargs={"max_new_tokens": 150, "temperature": 0.6, "do_sample": True},
)
# Set disambiguity handler
information_retriever.set_rephrase_handler(disambiguity)
# Start conversation
while True:
user_input = input("User: ")
if "END" == user_input:
print("Agent: Happy to chat with you :)")
break
agent_response = llm_chain.run(
query=user_input,
stop=["</答案>"],
doc_prefix="支持文档",
rejection_trigger_keywords=ZH_RETRIEVAL_QA_TRIGGER_KEYWORDS,
rejection_answer=ZH_RETRIEVAL_QA_REJECTION_ANSWER,
)
print(f"Agent: {agent_response}")
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/examples/conversation_agent_chatgpt.py | applications/ColossalQA/examples/conversation_agent_chatgpt.py | """
Script for the multilingual conversation based experimental AI agent
We used ChatGPT as the language model
You need openai api key to run this script
"""
import argparse
import os
from colossalqa.data_loader.document_loader import DocumentLoader
from colossalqa.data_loader.table_dataloader import TableLoader
from langchain import LLMChain, OpenAI
from langchain.agents import Tool, ZeroShotAgent
from langchain.agents.agent import AgentExecutor
from langchain.agents.agent_toolkits import create_retriever_tool
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms import OpenAI
from langchain.memory import ChatMessageHistory, ConversationBufferMemory
from langchain.memory.chat_memory import ChatMessageHistory
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.utilities import SQLDatabase
from langchain.vectorstores import Chroma
from langchain_experimental.sql import SQLDatabaseChain
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Experimental AI agent powered by ChatGPT")
parser.add_argument("--open_ai_key_path", type=str, default=None, help="path to the plain text open_ai_key file")
args = parser.parse_args()
# Setup openai key
# Set env var OPENAI_API_KEY or load from a file
openai_key = open(args.open_ai_key_path).read()
os.environ["OPENAI_API_KEY"] = openai_key
# Load data served on sql
print("Select files for constructing sql database")
tools = []
llm = OpenAI(temperature=0.0)
while True:
file = input("Select a file to load or press Enter to exit:")
if file == "":
break
data_name = input("Enter a short description of the data:")
table_loader = TableLoader(
[[file, data_name.replace(" ", "_")]], sql_path=f"sqlite:///{data_name.replace(' ', '_')}.db"
)
sql_path = table_loader.get_sql_path()
# Create sql database
db = SQLDatabase.from_uri(sql_path)
print(db.get_table_info())
db_chain = SQLDatabaseChain.from_llm(llm, db, verbose=True)
name = f"Query the SQL database regarding {data_name}"
description = (
f"useful for when you need to answer questions based on data stored on a SQL database regarding {data_name}"
)
tools.append(
Tool(
name=name,
func=db_chain.run,
description=description,
)
)
print(f"Added sql dataset\n\tname={name}\n\tdescription:{description}")
# VectorDB
embedding = OpenAIEmbeddings()
# Load data serve on sql
print("Select files for constructing retriever")
while True:
file = input("Select a file to load or press Enter to exit:")
if file == "":
break
data_name = input("Enter a short description of the data:")
retriever_data = DocumentLoader([[file, data_name.replace(" ", "_")]]).all_data
# Split
text_splitter = RecursiveCharacterTextSplitter(chunk_size=200, chunk_overlap=0)
splits = text_splitter.split_documents(retriever_data)
# Create vector store
vectordb = Chroma.from_documents(documents=splits, embedding=embedding)
# Create retriever
retriever = vectordb.as_retriever(
search_type="similarity_score_threshold", search_kwargs={"score_threshold": 0.5, "k": 5}
)
# Add to tool chain
name = f"Searches and returns documents regarding {data_name}."
tools.append(create_retriever_tool(retriever, data_name, name))
prefix = """Have a conversation with a human, answering the following questions as best you can. You have access to the following tools. If none of the tools can be used to answer the question. Do not share uncertain answer unless you think answering the question doesn't need any background information. In that case, try to answer the question directly."""
suffix = """You are provided with the following background knowledge:
Begin!"
{chat_history}
Question: {input}
{agent_scratchpad}"""
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
input_variables=["input", "chat_history", "agent_scratchpad"],
)
memory = ConversationBufferMemory(memory_key="chat_history", chat_memory=ChatMessageHistory())
llm_chain = LLMChain(llm=OpenAI(temperature=0.7), prompt=prompt)
agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)
agent_chain = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True, memory=memory)
while True:
user_input = input("User: ")
if " end " in user_input:
print("Agent: Happy to chat with you :)")
break
agent_response = agent_chain.run(user_input)
print(f"Agent: {agent_response}")
table_loader.sql_engine.dispose()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/examples/retrieval_conversation_universal.py | applications/ColossalQA/examples/retrieval_conversation_universal.py | import argparse
from colossalqa.retrieval_conversation_universal import UniversalRetrievalConversation
if __name__ == "__main__":
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("--en_model_path", type=str, default=None)
parser.add_argument("--zh_model_path", type=str, default=None)
parser.add_argument("--zh_model_name", type=str, default=None)
parser.add_argument("--en_model_name", type=str, default=None)
parser.add_argument(
"--sql_file_path", type=str, default=None, help="path to the a empty folder for storing sql files for indexing"
)
args = parser.parse_args()
# Will ask for documents path in running time
session = UniversalRetrievalConversation(
files_en=None,
files_zh=None,
zh_model_path=args.zh_model_path,
en_model_path=args.en_model_path,
zh_model_name=args.zh_model_name,
en_model_name=args.en_model_name,
sql_file_path=args.sql_file_path,
)
session.start_test_session()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/examples/webui_demo/RAG_ChatBot.py | applications/ColossalQA/examples/webui_demo/RAG_ChatBot.py | import os
from typing import Dict, Tuple
from colossalqa.chain.retrieval_qa.base import RetrievalQA
from colossalqa.data_loader.document_loader import DocumentLoader
from colossalqa.memory import ConversationBufferWithSummary
from colossalqa.mylogging import get_logger
from colossalqa.prompt.prompt import ZH_RETRIEVAL_QA_REJECTION_ANSWER, ZH_RETRIEVAL_QA_TRIGGER_KEYWORDS
from colossalqa.retriever import CustomRetriever
from langchain import LLMChain
from langchain.embeddings import HuggingFaceEmbeddings
logger = get_logger()
class RAG_ChatBot:
def __init__(
self,
llm,
rag_config,
) -> None:
self.llm = llm
self.rag_config = rag_config
self.set_embed_model(**self.rag_config["embed"])
self.set_text_splitter(**self.rag_config["splitter"])
self.set_memory(**self.rag_config["chain"])
self.set_info_retriever(**self.rag_config["retrieval"])
self.set_rag_chain(**self.rag_config["chain"])
if self.rag_config["chain"].get("disambig_prompt", None):
self.set_disambig_retriv(**self.rag_config["chain"])
self.documents = []
self.docs_names = []
def set_embed_model(self, **kwargs):
self.embed_model = HuggingFaceEmbeddings(
model_name=kwargs["embed_model_name_or_path"],
model_kwargs=kwargs["embed_model_device"],
encode_kwargs={"normalize_embeddings": False},
)
def set_text_splitter(self, **kwargs):
# Initialize text_splitter
self.text_splitter = kwargs["name"]()
def set_memory(self, **kwargs):
params = {"llm_kwargs": kwargs["mem_llm_kwargs"]} if kwargs.get("mem_llm_kwargs", None) else {}
# Initialize memory with summarization ability
self.memory = ConversationBufferWithSummary(
llm=self.llm,
prompt=kwargs["mem_summary_prompt"],
human_prefix=kwargs["mem_human_prefix"],
ai_prefix=kwargs["mem_ai_prefix"],
max_tokens=kwargs["mem_max_tokens"],
**params,
)
def set_info_retriever(self, **kwargs):
self.info_retriever = CustomRetriever(
k=kwargs["retri_top_k"], sql_file_path=kwargs["retri_kb_file_path"], verbose=kwargs["verbose"]
)
def set_rag_chain(self, **kwargs):
params = {"llm_kwargs": kwargs["gen_llm_kwargs"]} if kwargs.get("gen_llm_kwargs", None) else {}
self.rag_chain = RetrievalQA.from_chain_type(
llm=self.llm,
verbose=kwargs["verbose"],
chain_type="stuff",
retriever=self.info_retriever,
chain_type_kwargs={"prompt": kwargs["gen_qa_prompt"], "memory": self.memory},
**params,
)
def set_disambig_retriv(self, **kwargs):
params = {"llm_kwargs": kwargs["disambig_llm_kwargs"]} if kwargs.get("disambig_llm_kwargs", None) else {}
self.llm_chain_disambiguate = LLMChain(llm=self.llm, prompt=kwargs["disambig_prompt"], **params)
def disambiguity(input: str):
out = self.llm_chain_disambiguate.run(input=input, chat_history=self.memory.buffer, stop=["\n"])
return out.split("\n")[0]
self.info_retriever.set_rephrase_handler(disambiguity)
def load_doc_from_console(self, json_parse_args: Dict = {}):
print("Select files for constructing the retriever")
while True:
file = input("Enter a file path or press Enter directly without input to exit:").strip()
if file == "":
break
data_name = input("Enter a short description of the data:")
docs = DocumentLoader([[file, data_name.replace(" ", "_")]], **json_parse_args).all_data
self.documents.extend(docs)
self.docs_names.append(data_name)
self.split_docs_and_add_to_mem(**self.rag_config["chain"])
def load_doc_from_files(self, files, data_name="default_kb", json_parse_args: Dict = {}):
for file in files:
docs = DocumentLoader([[file, data_name.replace(" ", "_")]], **json_parse_args).all_data
self.documents.extend(docs)
self.docs_names.append(os.path.basename(file))
self.split_docs_and_add_to_mem(**self.rag_config["chain"])
def split_docs_and_add_to_mem(self, **kwargs):
doc_splits = self.split_docs(self.documents)
self.info_retriever.add_documents(
docs=doc_splits, cleanup="incremental", mode="by_source", embedding=self.embed_model
)
self.memory.initiate_document_retrieval_chain(self.llm, kwargs["gen_qa_prompt"], self.info_retriever)
def split_docs(self, documents):
doc_splits = self.text_splitter.split_documents(documents)
return doc_splits
def clear_docs(self, **kwargs):
self.documents = []
self.docs_names = []
self.info_retriever.clear_documents()
self.memory.initiate_document_retrieval_chain(self.llm, kwargs["gen_qa_prompt"], self.info_retriever)
def reset_config(self, rag_config):
self.rag_config = rag_config
self.set_embed_model(**self.rag_config["embed"])
self.set_text_splitter(**self.rag_config["splitter"])
self.set_memory(**self.rag_config["chain"])
self.set_info_retriever(**self.rag_config["retrieval"])
self.set_rag_chain(**self.rag_config["chain"])
if self.rag_config["chain"].get("disambig_prompt", None):
self.set_disambig_retriv(**self.rag_config["chain"])
def run(self, user_input: str, memory: ConversationBufferWithSummary) -> Tuple[str, ConversationBufferWithSummary]:
if memory:
memory.buffered_history.messages = memory.buffered_history.messages
memory.summarized_history_temp.messages = memory.summarized_history_temp.messages
result = self.rag_chain.run(
query=user_input,
stop=[memory.human_prefix + ": "],
rejection_trigger_keywords=ZH_RETRIEVAL_QA_TRIGGER_KEYWORDS,
rejection_answer=ZH_RETRIEVAL_QA_REJECTION_ANSWER,
)
return result, memory
def start_test_session(self):
"""
Simple session for testing purpose
"""
while True:
user_input = input("User: ")
if "END" == user_input:
print("Agent: Happy to chat with you :)")
break
agent_response, self.memory = self.run(user_input, self.memory)
print(f"Agent: {agent_response}")
if __name__ == "__main__":
# Initialize an Langchain LLM(here we use ChatGPT as an example)
import config
from langchain.llms import OpenAI
# you need to: export OPENAI_API_KEY="YOUR_OPENAI_API_KEY"
llm = OpenAI(openai_api_key=os.getenv("OPENAI_API_KEY"))
# chatgpt cannot control temperature, do_sample, etc.
all_config = config.ALL_CONFIG
all_config["chain"]["mem_llm_kwargs"] = None
all_config["chain"]["disambig_llm_kwargs"] = None
all_config["chain"]["gen_llm_kwargs"] = None
rag = RAG_ChatBot(llm, all_config)
rag.load_doc_from_console()
rag.start_test_session()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/examples/webui_demo/webui.py | applications/ColossalQA/examples/webui_demo/webui.py | import argparse
import json
import os
import gradio as gr
import requests
from utils import DocAction
def parseArgs():
parser = argparse.ArgumentParser()
parser.add_argument("--http_host", default="0.0.0.0")
parser.add_argument("--http_port", type=int, default=13666)
return parser.parse_args()
def get_response(data, url):
headers = {"Content-type": "application/json"}
response = requests.post(url, json=data, headers=headers)
response = json.loads(response.content)
return response
def add_text(history, text):
history = history + [(text, None)]
return history, gr.update(value=None, interactive=True)
def add_file(history, files):
files_string = "\n".join([os.path.basename(file.name) for file in files])
doc_files = [file.name for file in files]
data = {"doc_files": doc_files, "action": DocAction.ADD}
response = get_response(data, update_url)["response"]
history = history + [(files_string, response)]
return history
def bot(history):
data = {"user_input": history[-1][0].strip()}
response = get_response(data, gen_url)
if response["error"] != "":
raise gr.Error(response["error"])
history[-1][1] = response["response"]
yield history
def restart(chatbot, txt):
# Reset the conversation state and clear the chat history
data = {"doc_files": "", "action": DocAction.CLEAR}
get_response(data, update_url)
return gr.update(value=None), gr.update(value=None, interactive=True)
CSS = """
.contain { display: flex; flex-direction: column; height: 100vh }
#component-0 { height: 100%; }
#chatbot { flex-grow: 1; }
"""
header_html = """
<div style="background: linear-gradient(to right, #2a0cf4, #7100ed, #9800e6, #b600df, #ce00d9, #dc0cd1, #e81bca, #f229c3, #f738ba, #f946b2, #fb53ab, #fb5fa5); padding: 20px; text-align: left;">
<h1 style="color: white;">ColossalQA</h1>
<h4 style="color: white;">A powerful Q&A system with knowledge bases</h4>
</div>
"""
with gr.Blocks(css=CSS) as demo:
html = gr.HTML(header_html)
chatbot = gr.Chatbot(
[],
elem_id="chatbot",
bubble_full_width=False,
avatar_images=(
(os.path.join(os.path.dirname(__file__), "img/avatar_user.png")),
(os.path.join(os.path.dirname(__file__), "img/avatar_ai.png")),
),
)
with gr.Row():
btn = gr.UploadButton("📁", file_types=["file"], file_count="multiple", size="sm")
restart_btn = gr.Button(str("\u21BB"), elem_id="restart-btn", scale=1)
txt = gr.Textbox(
scale=8,
show_label=False,
placeholder="Enter text and press enter, or use 📁 to upload files, click \u21BB to clear loaded files and restart chat",
container=True,
autofocus=True,
)
txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(bot, chatbot, chatbot)
# Clear the original textbox
txt_msg.then(lambda: gr.update(value=None, interactive=True), None, [txt], queue=False)
# Click Upload Button: 1. upload files 2. send config to backend, initalize model 3. get response "conversation_ready" = True/False
file_msg = btn.upload(add_file, [chatbot, btn], [chatbot], queue=False)
# restart
restart_msg = restart_btn.click(restart, [chatbot, txt], [chatbot, txt], queue=False)
if __name__ == "__main__":
args = parseArgs()
update_url = f"http://{args.http_host}:{args.http_port}/update"
gen_url = f"http://{args.http_host}:{args.http_port}/generate"
demo.queue()
demo.launch(share=True) # share=True will release a public link of the demo
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/examples/webui_demo/utils.py | applications/ColossalQA/examples/webui_demo/utils.py | from enum import Enum
class DocAction(str, Enum):
ADD = "add"
CLEAR = "clear"
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/examples/webui_demo/config.py | applications/ColossalQA/examples/webui_demo/config.py | from colossalqa.prompt.prompt import PROMPT_DISAMBIGUATE_ZH, PROMPT_RETRIEVAL_QA_ZH, SUMMARY_PROMPT_ZH
from colossalqa.text_splitter import ChineseTextSplitter
ALL_CONFIG = {
"embed": {
"embed_name": "m3e", # embedding model name
"embed_model_name_or_path": "moka-ai/m3e-base", # path to embedding model, could be a local path or a huggingface path
"embed_model_device": {"device": "cpu"},
},
"model": {
"mode": "api", # "local" for loading models, "api" for using model api
"model_name": "chatgpt_api", # local model name, "chatgpt_api" or "pangu_api"
"model_path": "", # path to the model, could be a local path or a huggingface path. don't need if using an api
"device": {"device": "cuda"},
},
"splitter": {"name": ChineseTextSplitter},
"retrieval": {"retri_top_k": 3, "retri_kb_file_path": "./", "verbose": True}, # path to store database files
"chain": {
"mem_summary_prompt": SUMMARY_PROMPT_ZH, # summary prompt template
"mem_human_prefix": "用户",
"mem_ai_prefix": "Assistant",
"mem_max_tokens": 2000,
"mem_llm_kwargs": {"max_new_tokens": 50, "temperature": 1, "do_sample": True},
"disambig_prompt": PROMPT_DISAMBIGUATE_ZH, # disambiguate prompt template
"disambig_llm_kwargs": {"max_new_tokens": 30, "temperature": 1, "do_sample": True},
"gen_llm_kwargs": {"max_new_tokens": 100, "temperature": 1, "do_sample": True},
"gen_qa_prompt": PROMPT_RETRIEVAL_QA_ZH, # generation prompt template
"verbose": True,
},
}
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalQA/examples/webui_demo/server.py | applications/ColossalQA/examples/webui_demo/server.py | import argparse
from typing import List, Union
import config
import uvicorn
from colossalqa.local.llm import ColossalAPI, ColossalLLM
from colossalqa.mylogging import get_logger
from fastapi import FastAPI, Request
from pydantic import BaseModel
from RAG_ChatBot import RAG_ChatBot
from utils import DocAction
logger = get_logger()
def parseArgs():
parser = argparse.ArgumentParser()
parser.add_argument("--http_host", default="0.0.0.0")
parser.add_argument("--http_port", type=int, default=13666)
return parser.parse_args()
app = FastAPI()
class DocUpdateReq(BaseModel):
doc_files: Union[List[str], str, None] = None
action: DocAction = DocAction.ADD
class GenerationTaskReq(BaseModel):
user_input: str
@app.post("/update")
def update_docs(data: DocUpdateReq, request: Request):
if data.action == "add":
if isinstance(data.doc_files, str):
data.doc_files = [data.doc_files]
chatbot.load_doc_from_files(files=data.doc_files)
all_docs = ""
for doc in chatbot.docs_names:
all_docs += f"\t{doc}\n\n"
return {"response": f"文件上传完成,所有数据库文件:\n\n{all_docs}让我们开始对话吧!"}
elif data.action == "clear":
chatbot.clear_docs(**all_config["chain"])
return {"response": f"已清空数据库。"}
@app.post("/generate")
def generate(data: GenerationTaskReq, request: Request):
try:
chatbot_response, chatbot.memory = chatbot.run(data.user_input, chatbot.memory)
return {"response": chatbot_response, "error": ""}
except Exception as e:
return {"response": "模型生成回答有误", "error": f"Error in generating answers, details: {e}"}
if __name__ == "__main__":
args = parseArgs()
all_config = config.ALL_CONFIG
model_name = all_config["model"]["model_name"]
# initialize chatbot
logger.info(f"Initialize the chatbot from {model_name}")
if all_config["model"]["mode"] == "local":
colossal_api = ColossalAPI(model_name, all_config["model"]["model_path"])
llm = ColossalLLM(n=1, api=colossal_api)
elif all_config["model"]["mode"] == "api":
if model_name == "pangu_api":
from colossalqa.local.pangu_llm import Pangu
gen_config = {
"user": "User",
"max_tokens": all_config["chain"]["disambig_llm_kwargs"]["max_new_tokens"],
"temperature": all_config["chain"]["disambig_llm_kwargs"]["temperature"],
"n": 1, # the number of responses generated
}
llm = Pangu(gen_config=gen_config)
llm.set_auth_config() # verify user's auth info here
elif model_name == "chatgpt_api":
from langchain.llms import OpenAI
llm = OpenAI()
else:
raise ValueError("Unsupported mode.")
# initialize chatbot
chatbot = RAG_ChatBot(llm, all_config)
app_config = uvicorn.Config(app, host=args.http_host, port=args.http_port)
server = uvicorn.Server(config=app_config)
server.run()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalMoE/train.py | applications/ColossalMoE/train.py | import argparse
import torch
import torch.distributed as dist
from torch.utils.data import Dataset
from tqdm import tqdm
from transformers import AutoTokenizer
from transformers.models.mixtral import MixtralForCausalLM
from utils import load_checkpoint, move_to_cuda, save_checkpoint
import colossalai
from colossalai.booster import Booster
from colossalai.booster.plugin.moe_hybrid_parallel_plugin import MoeHybridParallelPlugin
from colossalai.cluster import DistCoordinator
from colossalai.nn.lr_scheduler import CosineAnnealingWarmupLR
from colossalai.nn.optimizer import HybridAdam
from colossalai.utils import get_current_device
@torch.no_grad()
def get_global_loss(loss, booster):
global_loss = loss.clone().detach()
dist.all_reduce(tensor=global_loss, op=dist.ReduceOp.SUM, group=booster.plugin.dp_group)
global_loss.div_(booster.plugin.dp_size)
return global_loss
class RandomDataset(Dataset):
def __init__(self, num_samples: int = 1000, max_length: int = 2048, vocab_size: int = 100, tokenizer=None):
self.num_samples = num_samples
self.max_length = max_length
self.input_ids = torch.randint(0, vocab_size, (num_samples, max_length), device=get_current_device())
self.attention_mask = torch.ones_like(self.input_ids)
def __len__(self):
return self.num_samples
def __getitem__(self, idx):
return {
"input_ids": self.input_ids[idx],
"attention_mask": self.attention_mask[idx],
"labels": self.input_ids[idx],
}
def parse_args():
# basic settings
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="mistralai/Mixtral-8x7B-v0.1",
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument("--load_checkpoint", type=str, default=None, help="Load checkpoint")
parser.add_argument(
"--plugin",
type=str,
default="hybrid",
choices=["hybrid"],
help="Parallel methods.",
)
parser.add_argument(
"--output_path",
type=str,
default="./outputs",
help="The path of your saved model after finetuning.",
)
parser.add_argument("--num_epoch", type=int, default=1, help="Number of epochs.")
parser.add_argument(
"--batch_size",
type=int,
default=1,
help="Batch size (per dp group) for the training dataloader.",
)
parser.add_argument(
"--save_interval",
type=int,
default=1000,
help=" The interval (steps) of saving checkpoints.",
)
parser.add_argument(
"--precision",
type=str,
default="bf16",
choices=["fp32", "bf16", "fp16"],
help="The mixed precision training.",
)
parser.add_argument("--max_length", type=int, default=2048, help="Max sequence length.")
parser.add_argument("--seed", type=int, default=42, help="A seed for reproducible training.")
# optim
parser.add_argument("--lr", type=float, default=1e-5, help="Learning rate.")
parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.")
# lr scheduler
parser.add_argument("--num_epochs", type=int, default=1, help="Number of training epochs")
parser.add_argument("--warmup_steps", type=int, default=None, help="Warmup steps")
# zero stage for all plugins
parser.add_argument("--zero_stage", type=int, default=2, help="zero stage.")
# hybrid plugin
parser.add_argument("--pp_size", type=int, default=2, help="pp size for hybrid plugin")
parser.add_argument("--dp_size", type=int, default=1, help="dp size for hybrid plugin")
parser.add_argument("--ep_size", type=int, default=2, help="ep size for hybrid plugin")
parser.add_argument("--microbatch_size", type=int, default=1, help="Microbatch size in pipeline for hybrid plugin")
# kernel
parser.add_argument(
"--use_kernel",
action="store_true",
help="Use kernel optim. Need to install flash attention and triton to enable all kernel optimizations. Skip if not installed.",
)
parser.add_argument(
"--use_layernorm_kernel",
action="store_true",
help="Use layernorm kernel. Need to install apex. Raise error if not installed.",
)
# load balance
parser.add_argument(
"--load_balance", action="store_true", help="Expert load balance. Defaults to False. Recommend to enable."
)
parser.add_argument("--load_balance_interval", type=int, default=1000, help="Expert load balance interval.")
# communicate overlap
parser.add_argument(
"--comm_overlap",
action="store_true",
help="Use communication overlap for MoE. Recommended to enable for multi-node training.",
)
# hierarchical all-to-all
parser.add_argument(
"--hierarchical_alltoall",
action="store_true",
help="Use hierarchical all-to-all for MoE. Recommended to enable for multi-node training.",
)
args = parser.parse_args()
return args
def main():
args = parse_args()
# Launch ColossalAI
colossalai.launch_from_torch(seed=args.seed)
coordinator = DistCoordinator()
# Set plugin
if args.plugin == "hybrid":
plugin = MoeHybridParallelPlugin(
tp_size=1,
pp_size=args.pp_size,
ep_size=args.ep_size,
microbatch_size=args.microbatch_size,
enable_fused_normalization=args.use_layernorm_kernel,
enable_jit_fused=args.use_kernel,
precision=args.precision,
zero_stage=args.zero_stage,
)
else:
raise ValueError(f"Invalid plugin {args.plugin}")
coordinator.print_on_master(f"Set plugin as {plugin.__class__.__name__}")
# Build Mixtral model
model = MixtralForCausalLM.from_pretrained(args.model_name)
coordinator.print_on_master(f"Finish init model")
# Enable gradient checkpointing
model.gradient_checkpointing_enable()
# Prepare tokenizer and dataloader
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
dataset = RandomDataset(num_samples=100, tokenizer=tokenizer)
collate_fn = None
dataloader = plugin.prepare_dataloader(
dataset, batch_size=args.batch_size, shuffle=True, drop_last=True, collate_fn=collate_fn
)
# Set optimizer
optimizer = HybridAdam(
model_params=model.parameters(),
lr=args.lr,
betas=(0.9, 0.95),
weight_decay=args.weight_decay,
adamw_mode=True,
)
# Set lr scheduler
lr_scheduler = CosineAnnealingWarmupLR(
optimizer=optimizer,
total_steps=args.num_epochs * len(dataloader),
warmup_steps=(
args.warmup_steps if args.warmup_steps is not None else int(args.num_epochs * len(dataloader) * 0.025)
),
eta_min=0.1 * args.lr,
)
# Set booster
booster = Booster(plugin=plugin)
model, optimizer, _, dataloader, lr_scheduler = booster.boost(
model=model,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
dataloader=dataloader,
)
use_pipeline = isinstance(booster.plugin, MoeHybridParallelPlugin) and booster.plugin.pp_size > 1
is_pp_last_stage = use_pipeline and booster.plugin.stage_manager.is_last_stage()
coordinator.print_on_master(f"Finish init booster")
# Load ckpt
if args.load_checkpoint is not None:
load_checkpoint(args.load_checkpoint, booster, model, optimizer, lr_scheduler)
coordinator.print_on_master(f"Finish load optimizer")
# Start finetuning
coordinator.print_on_master(f"Start finetuning")
for epoch in range(args.num_epoch):
model.train()
train_dataloader_iter = iter(dataloader)
total_len = len(train_dataloader_iter)
with tqdm(
range(total_len),
desc=f"Epoch [{epoch + 1}/{args.num_epoch}]",
disable=not coordinator.is_master() if use_pipeline == False else not is_pp_last_stage,
) as pbar:
for step in pbar:
if use_pipeline:
# Forward pass
outputs = booster.execute_pipeline(
train_dataloader_iter,
model,
lambda x, y: x.loss,
optimizer,
return_loss=True,
)
# Backward and optimize
if is_pp_last_stage:
loss = outputs["loss"]
global_loss = get_global_loss(loss, booster)
if coordinator._local_rank == "0":
pbar.set_postfix({"Loss": global_loss.item()})
else:
# Forward pass
data = next(train_dataloader_iter)
data = move_to_cuda(data, torch.cuda.current_device())
outputs = model(**data)
loss = outputs["loss"]
# Backward
booster.backward(loss, optimizer)
pbar.set_postfix({"loss": loss.item()})
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# Apply load balance
# if (
# args.load_balance
# and args.load_balance_interval > 0
# and (step + 1) % args.load_balance_interval == 0
# ):
# coordinator.print_on_master(f"Apply load balance")
# apply_load_balance(model, optimizer)
# save checkpoint
if (step + 1) % args.save_interval == 0:
coordinator.print_on_master(f"Saving model checkpoint to {args.output_path}")
save_checkpoint(
args.output_path,
booster,
model,
optimizer,
lr_scheduler,
epoch,
step,
args.batch_size,
coordinator,
)
# save checkpoint at the end of each epochs
booster.save_model(model, args.output_path, shard=True, size_per_shard=5120)
coordinator.print_on_master(f"Saving model checkpoint to {args.output_path}")
# Finish training
coordinator.print_on_master(f"Finish training")
if __name__ == "__main__":
main()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalMoE/infer.py | applications/ColossalMoE/infer.py | import argparse
import torch
import torch.distributed as dist
from transformers import AutoTokenizer
from transformers.models.mixtral import MixtralConfig, MixtralForCausalLM
import colossalai
from colossalai.booster import Booster
from colossalai.booster.plugin.moe_hybrid_parallel_plugin import MoeHybridParallelPlugin
from colossalai.cluster import DistCoordinator
def parse_args():
# basic settings
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="mistralai/Mixtral-8x7B-v0.1",
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--plugin",
type=str,
default="ep",
choices=["ep"],
help="Parallel methos.",
)
parser.add_argument(
"--precision",
type=str,
default="bf16",
choices=["fp32", "bf16", "fp16"],
help="The mixed precision training.",
)
parser.add_argument("--seed", type=int, default=42, help="A seed for reproducible training.")
# kernel
parser.add_argument(
"--use_kernel",
action="store_true",
help="Use kernel optim. Need to install flash attention and triton to enable all kernel optimizations. Skip if not installed.",
)
parser.add_argument(
"--use_layernorm_kernel",
action="store_true",
help="Use layernorm kernel. Need to install apex. Raise error if not installed.",
)
args = parser.parse_args()
return args
def main():
args = parse_args()
# Launch ColossalAI
colossalai.launch_from_torch(seed=args.seed)
coordinator = DistCoordinator()
config = MixtralConfig.from_pretrained(args.model_name)
ep_size = min(dist.get_world_size(), config.num_local_experts)
# Set plugin
if args.plugin == "ep":
plugin = MoeHybridParallelPlugin(
tp_size=1,
pp_size=1,
ep_size=ep_size,
zero_stage=1,
precision=args.precision,
enable_fused_normalization=args.use_layernorm_kernel,
enable_jit_fused=args.use_kernel,
)
else:
raise ValueError(f"Invalid plugin {args.plugin}")
coordinator.print_on_master(f"Set plugin as {plugin.__class__.__name__}")
# Build mixtral model
model = MixtralForCausalLM.from_pretrained(args.model_name)
coordinator.print_on_master(f"Finish load model")
# Prepare tokenizer and dataloader
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
# Set booster
booster = Booster(plugin=plugin)
model, _, _, _, _ = booster.boost(model=model)
coordinator.print_on_master(f"Finish init booster")
model.eval()
if coordinator.rank == 0:
text = ["Hello my name is"]
else:
text = [
"What's the largest country in the world?",
"How many people live in China?",
"帮我续写这首诗:离离原上草",
]
tokenizer.pad_token = tokenizer.unk_token
inputs = tokenizer(text, return_tensors="pt", padding=True).to(torch.cuda.current_device())
with torch.no_grad():
outputs = model.module.generate(**inputs, max_new_tokens=20)
outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True)
print(f"[{coordinator.rank}] {outputs}")
if __name__ == "__main__":
main()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalMoE/setup.py | applications/ColossalMoE/setup.py | from setuptools import find_packages, setup
def fetch_requirements(path):
with open(path, "r") as fd:
return [r.strip() for r in fd.readlines()]
def fetch_readme():
with open("README.md", encoding="utf-8") as f:
return f.read()
def fetch_version():
with open("version.txt", "r") as f:
return f.read().strip()
setup(
name="colossal_moe",
version=fetch_version(),
packages=find_packages(
exclude=(
"tests",
"benchmarks",
"*.egg-info",
)
),
description="Colossal-AI MoE",
long_description=fetch_readme(),
long_description_content_type="text/markdown",
license="Apache Software License 2.0",
url="https://github.com/hpcaitech",
install_requires=fetch_requirements("requirements.txt"),
python_requires=">=3.6",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Environment :: GPU :: NVIDIA CUDA",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: System :: Distributed Computing",
],
)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalMoE/utils.py | applications/ColossalMoE/utils.py | import json
import os
from typing import Any, Dict, Tuple, Union
import torch
from torch.optim.lr_scheduler import _LRScheduler
from torch.optim.optimizer import Optimizer
from colossalai.booster import Booster
from colossalai.cluster import DistCoordinator
def move_to_cuda(batch, device):
return {k: v.to(device) for k, v in batch.items()}
def load_json(file_path: Union[str, os.PathLike]) -> Dict[str, Any]:
"""
Load file in JSON format
"""
with open(file=file_path, mode="r", encoding="utf-8") as fp:
return json.load(fp)
def save_json(data: Dict[str, Any], file_path: Union[str, os.PathLike]) -> None:
"""
Save as JSON format
"""
with open(file=file_path, mode="w", encoding="utf-8") as fp:
json.dump(data, fp=fp, ensure_ascii=False, indent=4)
def save_checkpoint(
save_dir: Union[str, os.PathLike],
booster: Booster,
model: torch.nn.Module,
optimizer: Optimizer,
lr_scheduler: _LRScheduler,
epoch: int,
step: int,
batch_size: int,
coordinator: DistCoordinator,
) -> None:
"""
Save model checkpoint, optimizer, LR scheduler and intermedidate running states.
"""
save_dir = os.path.join(save_dir, f"epoch-{epoch}_step-{step}")
os.makedirs(os.path.join(save_dir, "modeling"), exist_ok=True)
booster.save_model(model, os.path.join(save_dir, "modeling"), shard=True)
booster.save_optimizer(optimizer, os.path.join(save_dir, "optimizer"), shard=True)
booster.save_lr_scheduler(lr_scheduler, os.path.join(save_dir, "lr_scheduler"))
running_states = {
"epoch": epoch,
"step": step,
"sample_start_index": step * batch_size,
}
if coordinator.is_master():
save_json(running_states, os.path.join(save_dir, "running_states.json"))
def load_checkpoint(
load_dir: Union[str, os.PathLike],
booster: Booster,
model: torch.nn.Module,
optimizer: Optimizer,
lr_scheduler: _LRScheduler,
) -> Tuple[int, int, int]:
"""
Load model checkpoint, optimizer, LR scheduler and intermedidate running states.
"""
# Update booster params states.
booster.load_model(model, os.path.join(load_dir, "modeling"))
booster.load_optimizer(optimizer=optimizer, checkpoint=os.path.join(load_dir, "optimizer"))
booster.load_lr_scheduler(lr_scheduler=lr_scheduler, checkpoint=os.path.join(load_dir, "lr_scheduler"))
running_states = load_json(file_path=os.path.join(load_dir, "running_states.json"))
return (
running_states["epoch"],
running_states["step"],
running_states["sample_start_index"],
)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalMoE/tests/__init__.py | applications/ColossalMoE/tests/__init__.py | python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false | |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/Colossal-LLaMA/train.py | applications/Colossal-LLaMA/train.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Continual Pre-training/Supervised fine-tuning of Colossal-LLaMA-2 developed by Colossal-AI Team
"""
import argparse
import json
import os
import resource
from contextlib import nullcontext
import torch
from colossal_llama.dataset.dummy_dataset import RandomDataset
from colossal_llama.dataset.loader import (
DataCollatorForSupervisedDataset,
StatefulDistributedSampler,
load_tokenized_dataset,
)
from colossal_llama.utils.ckpt_io import load_checkpoint, save_checkpoint
from colossal_llama.utils.froze import freeze_non_embeds_parameters
from colossal_llama.utils.neftune_patch import activate_neftune, deactivate_neftune
from colossal_llama.utils.utils import all_reduce_mean, format_numel_str, get_model_numel
from peft import LoraConfig
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from transformers import AutoModelForCausalLM, AutoTokenizer
import colossalai
from colossalai.accelerator import get_accelerator
from colossalai.booster import Booster
from colossalai.booster.plugin import GeminiPlugin, HybridParallelPlugin, LowLevelZeroPlugin, TorchDDPPlugin
from colossalai.cluster import DistCoordinator
from colossalai.lazy import LazyInitContext
from colossalai.nn.lr_scheduler import CosineAnnealingWarmupLR
from colossalai.nn.optimizer import HybridAdam
from colossalai.utils import get_current_device
def train(args) -> None:
# ==============================
# Initialize Distributed Training
# ==============================
colossalai.launch_from_torch()
accelerator = get_accelerator()
coordinator = DistCoordinator()
# ==============================
# Initialize Tensorboard and Save Config
# ==============================
if coordinator.is_master():
os.makedirs(args.tensorboard_dir, exist_ok=True)
writer = SummaryWriter(args.tensorboard_dir)
with open(args.config_file, "w") as f:
json.dump(args.__dict__, f, indent=4)
# ==============================
# Initialize Booster
# ==============================
if args.plugin == "ddp":
plugin = TorchDDPPlugin(find_unused_parameters=True if args.use_grad_checkpoint is False else False)
elif args.plugin == "gemini":
plugin = GeminiPlugin(
precision=args.mixed_precision,
initial_scale=2**16,
max_norm=args.grad_clip,
enable_gradient_accumulation=(args.accumulation_steps > 1),
enable_fused_normalization=get_accelerator().is_available(),
enable_flash_attention=args.use_flash_attn,
)
elif args.plugin == "gemini_auto":
plugin = GeminiPlugin(
precision=args.mixed_precision,
placement_policy="auto",
initial_scale=2**16,
max_norm=args.grad_clip,
enable_gradient_accumulation=(args.accumulation_steps > 1),
enable_fused_normalization=get_accelerator().is_available(),
enable_flash_attention=args.use_flash_attn,
)
elif args.plugin == "zero2":
plugin = LowLevelZeroPlugin(
stage=2,
precision=args.mixed_precision,
initial_scale=2**16,
max_norm=args.grad_clip,
)
elif args.plugin == "zero2_cpu":
plugin = LowLevelZeroPlugin(
stage=2,
precision=args.mixed_precision,
initial_scale=2**16,
cpu_offload=True,
max_norm=args.grad_clip,
)
elif args.plugin == "3d":
plugin = HybridParallelPlugin(
tp_size=args.tp,
pp_size=args.pp,
sp_size=args.sp,
sequence_parallelism_mode=args.sp_mode,
zero_stage=args.zero_stage,
enable_flash_attention=args.use_flash_attn,
enable_fused_normalization=get_accelerator().is_available(),
enable_sequence_parallelism=args.enable_sequence_parallelism,
cpu_offload=True if args.zero_stage >= 1 and args.zero_cpu_offload else False,
max_norm=args.grad_clip,
precision=args.mixed_precision,
microbatch_size=args.microbatch_size,
)
else:
raise ValueError(f"Unknown plugin {args.plugin}")
booster = Booster(plugin=plugin)
# ======================================================
# Initialize Tokenizer, Dataset, Collator and Dataloader
# ======================================================
tokenizer = AutoTokenizer.from_pretrained(args.pretrained, trust_remote_code=True)
if args.pad_token == "eos":
try:
tokenizer.pad_token = tokenizer.eos_token
except AttributeError:
coordinator.print_on_master(f"pad_token can't be set")
elif args.pad_token == "unk":
try:
tokenizer.pad_token = tokenizer.unk_token
except AttributeError:
coordinator.print_on_master(f"pad_token can't be set")
tokenizer.add_bos_token = False
tokenizer.add_eos_token = False
coordinator.print_on_master(
f"Training Info:\nConfig file: {args.config_file} \nTensorboard logs: {args.tensorboard_dir} \nModel checkpoint: {args.save_dir}"
)
if args.benchmark:
coordinator.print_on_master(f"Run benchmark with {args.num_samples} random samples.")
dataset = RandomDataset(
num_samples=args.num_samples, max_length=args.max_length, vocab_size=tokenizer.vocab_size
)
dataloader = plugin.prepare_dataloader(
dataset,
batch_size=args.batch_size,
shuffle=True,
drop_last=True,
seed=42,
distributed_sampler_cls=StatefulDistributedSampler,
)
else:
coordinator.print_on_master(f"Load dataset: {args.dataset}")
dataset = load_tokenized_dataset(dataset_paths=args.dataset, mode="train")
data_collator = DataCollatorForSupervisedDataset(
tokenizer=tokenizer, max_length=args.max_length, padding=args.padding_mode
)
dataloader = plugin.prepare_dataloader(
dataset=dataset,
batch_size=args.batch_size,
shuffle=True,
drop_last=True,
collate_fn=data_collator,
distributed_sampler_cls=StatefulDistributedSampler,
)
coordinator.print_on_master(
f"Max device memory after data loader: {accelerator.max_memory_allocated() / 1024 ** 2:.2f} MB"
)
# ======================================================
# Initialize Model, Objective, Optimizer and LR Scheduler
# ======================================================
# When training the ChatGLM model, LoRA and gradient checkpointing are incompatible.
init_ctx = (
LazyInitContext(default_device=get_current_device())
if isinstance(plugin, (GeminiPlugin, HybridParallelPlugin)) and args.lora_rank == 0
else nullcontext()
)
with init_ctx:
model = AutoModelForCausalLM.from_pretrained(
args.pretrained,
torch_dtype=torch.bfloat16 if args.mixed_precision == "bf16" else torch.float16,
trust_remote_code=True,
)
# Freeze part of parameters.
if args.freeze_non_embeds_params:
freeze_non_embeds_parameters(model=model)
if args.lora_rank > 0:
lora_config = LoraConfig(task_type="CAUSAL_LM", r=args.lora_rank, lora_alpha=32, lora_dropout=0.1)
model = booster.enable_lora(model, lora_config=lora_config)
# this is essential, otherwise the grad checkpoint will not work.
model.train()
if args.use_grad_checkpoint:
model.gradient_checkpointing_enable(gradient_checkpointing_kwargs={"use_reentrant": False})
coordinator.print_on_master(msg="Gradient checkpointing enabled successfully")
model_numel = get_model_numel(model)
coordinator.print_on_master(f"Model params: {format_numel_str(model_numel)}")
optimizer = HybridAdam(
model_params=(
filter(lambda p: p.requires_grad, model.parameters())
if args.freeze_non_embeds_params
else model.parameters()
),
lr=args.lr,
betas=(0.9, 0.95),
weight_decay=args.weight_decay,
adamw_mode=True,
)
if args.warmup_steps is None:
args.warmup_steps = int(args.num_epochs * 0.025 * (len(dataloader) // args.accumulation_steps))
coordinator.print_on_master(f"Warmup steps is set to {args.warmup_steps}")
lr_scheduler = CosineAnnealingWarmupLR(
optimizer=optimizer,
total_steps=args.num_epochs * (len(dataloader) // args.accumulation_steps),
warmup_steps=args.warmup_steps,
eta_min=0.1 * args.lr,
)
# Flash attention will be disabled because it does NOT support fp32.
default_dtype = torch.float16 if args.mixed_precision == "fp16" else torch.bfloat16
torch.set_default_dtype(default_dtype)
model, optimizer, _, dataloader, lr_scheduler = booster.boost(
model=model,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
dataloader=dataloader,
)
torch.set_default_dtype(torch.float)
coordinator.print_on_master(
f"Booster init max device memory: {accelerator.max_memory_allocated() / 1024 ** 2:.2f} MB"
)
coordinator.print_on_master(
f"Booster init max CPU memory: {resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024:.2f} MB"
)
start_epoch = 0
start_step = 0
sampler_start_idx = 0
if args.load_checkpoint is not None:
if "modeling" in args.load_checkpoint:
coordinator.print_on_master(f"Continued pretrain from checkpoint {args.load_checkpoint}")
booster.load_model(model, args.load_checkpoint)
else:
coordinator.print_on_master(f"Load model checkpoint from {args.load_checkpoint}")
start_epoch, start_step, sampler_start_idx = load_checkpoint(
load_dir=args.load_checkpoint,
booster=booster,
model=model,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
)
coordinator.print_on_master(
f"Loaded checkpoint {args.load_checkpoint} at epoch {start_epoch} step {start_step}"
)
coordinator.print_on_master(f"Loaded sample at index {sampler_start_idx}")
coordinator.print_on_master(
f"Checkpoint loaded max device memory: {accelerator.max_memory_allocated() / 1024 ** 2:.2f} MB"
)
coordinator.print_on_master(
f"Checkpoint loaded device memory: {accelerator.memory_allocated() / 1024 ** 2:.2f} MB"
)
coordinator.print_on_master(
f"Checkpoint loaded max CPU memory: {resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024:.2f} MB"
)
if args.use_neft:
coordinator.print_on_master("Activate NEFTune.")
model, handle = activate_neftune(model)
num_steps_per_epoch = len(dataloader) // args.accumulation_steps
# If resume training, set the sampler start index to the correct value
assert isinstance(dataloader.sampler, StatefulDistributedSampler)
dataloader.sampler.set_start_index(start_index=sampler_start_idx)
for epoch in range(start_epoch, args.num_epochs):
dataloader.sampler.set_epoch(epoch=epoch)
if isinstance(plugin, HybridParallelPlugin) and plugin.pp_size > 1:
data_iter = iter(dataloader)
step_bar = tqdm(
range(len(dataloader)),
desc="Step",
disable=not (coordinator._local_rank == coordinator._world_size - 1),
)
for step in step_bar:
outputs = booster.execute_pipeline(
data_iter,
model,
criterion=lambda outputs, inputs: outputs[0],
optimizer=optimizer,
return_loss=True,
)
loss = outputs["loss"]
if booster.plugin.stage_manager.is_last_stage():
global_loss = all_reduce_mean(loss, plugin)
if coordinator._local_rank == coordinator._world_size - 1:
step_bar.set_postfix({"train/loss": global_loss.item()})
optimizer.step()
optimizer.zero_grad()
# Save modeling.
save_model_condition = args.save_interval > 0 and (step + 1) % args.save_interval == 0
if not args.skip_save_each_epoch:
save_model_condition = save_model_condition or (step + 1) == len(dataloader)
if save_model_condition and not args.benchmark:
coordinator.print_on_master("\nStart saving model checkpoint with running states")
if args.use_neft:
coordinator.print_on_master("Deactivate NEFTune before saving model.")
deactivate_neftune(model, handle)
accelerator.empty_cache()
save_checkpoint(
save_dir=args.save_dir,
booster=booster,
model=model,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
epoch=epoch,
step=step + 1,
batch_size=args.batch_size,
coordinator=coordinator,
use_lora=(args.lora_rank > 0),
)
coordinator.print_on_master(
f"Saved checkpoint at epoch {epoch} step {step + 1} at folder {args.save_dir}"
)
if args.use_neft:
coordinator.print_on_master("Activate NEFTune.")
model, handle = activate_neftune(model)
else:
pbar = tqdm(
desc=f"Epoch {epoch}",
disable=not coordinator.is_master(),
total=num_steps_per_epoch,
initial=start_step // args.accumulation_steps,
)
total_loss = torch.tensor(0.0, device=get_current_device())
for step, batch in enumerate(dataloader, start=start_step):
batch = {k: v.to(get_current_device()) for k, v in batch.items() if isinstance(v, torch.Tensor)}
batch_output = model(**batch)
loss = batch_output.loss / args.accumulation_steps
total_loss.add_(loss.data)
booster.backward(loss=loss, optimizer=optimizer)
if (step + 1) % args.accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
all_reduce_mean(tensor=total_loss)
pbar.set_postfix({"Loss": f"{total_loss.item():.4f}"})
if coordinator.is_master():
global_step = (epoch * num_steps_per_epoch) + (step + 1) // args.accumulation_steps
writer.add_scalar(tag="Loss", scalar_value=total_loss.item(), global_step=global_step)
writer.add_scalar(
tag="Learning Rate",
scalar_value=lr_scheduler.get_last_lr()[0],
global_step=global_step,
)
total_loss.fill_(0.0)
pbar.update()
# Save modeling.
save_model_condition = (
args.save_interval > 0 and (step + 1) % (args.save_interval * args.accumulation_steps) == 0
)
if not args.skip_save_each_epoch:
save_model_condition = save_model_condition or (step + 1) == len(dataloader)
if save_model_condition and not args.benchmark:
coordinator.print_on_master("\nStart saving model checkpoint with running states")
if args.use_neft:
coordinator.print_on_master("Deactivate NEFTune before saving model.")
deactivate_neftune(model, handle)
accelerator.empty_cache()
save_checkpoint(
save_dir=args.save_dir,
booster=booster,
model=model,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
epoch=epoch,
step=step + 1,
batch_size=args.batch_size,
coordinator=coordinator,
use_lora=(args.lora_rank > 0),
)
coordinator.print_on_master(
f"Saved checkpoint at epoch {epoch} step {step + 1} at folder {args.save_dir}"
)
if args.use_neft:
coordinator.print_on_master("Activate NEFTune.")
model, handle = activate_neftune(model)
# Delete cache.
# del batch, batch_labels, batch_output, loss
accelerator.empty_cache()
# the continue epochs are not resumed, so we need to reset the sampler start index and start step
dataloader.sampler.set_start_index(start_index=0)
start_step = 0
if args.use_neft:
coordinator.print_on_master("Deactivate NEFTune.")
deactivate_neftune(model, handle)
# Final save.
if not args.benchmark:
coordinator.print_on_master("Start saving final model checkpoint")
booster.save_model(model, os.path.join(args.save_dir, "modeling"), shard=True)
coordinator.print_on_master(f"Saved final model checkpoint at epoch {epoch} at folder {args.save_dir}")
coordinator.print_on_master(f"Max device memory usage: {accelerator.max_memory_allocated()/1024**2:.2f} MB")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Basic training information.
parser.add_argument(
"--pretrained",
type=str,
default=None,
help="Address of the pre-trained model",
)
parser.add_argument("--load_checkpoint", type=str, default=None, help="Load checkpoint for continuous training.")
parser.add_argument("--dataset", nargs="+", default=[])
parser.add_argument(
"--plugin",
type=str,
default="gemini",
choices=["gemini", "gemini_auto", "zero2", "zero2_cpu", "3d", "ddp"],
help="Choose which plugin to use",
)
parser.add_argument("--save_interval", type=int, default=1000, help="Save interval")
parser.add_argument("--save_dir", type=str, default="checkpoint_dir", help="Checkpoint directory")
parser.add_argument("--tensorboard_dir", type=str, default="logs_dir", help="Tensorboard directory")
parser.add_argument("--config_file", type=str, default="config_file", help="Config file")
# Training parameters
parser.add_argument("--num_epochs", type=int, default=1, help="Number of training epochs")
parser.add_argument("--accumulation_steps", type=int, default=1, help="Number of accumulation steps")
parser.add_argument("--batch_size", type=int, default=2, help="Global Batch size of each process")
parser.add_argument("--lr", type=float, default=3e-4, help="Learning rate")
parser.add_argument("--max_length", type=int, default=8192, help="Model max length")
parser.add_argument(
"--mixed_precision",
type=str,
default="fp16",
choices=["fp16", "bf16"],
help="Mixed precision",
)
parser.add_argument("--grad_clip", type=float, default=1.0, help="Gradient clipping value")
parser.add_argument("--weight_decay", type=float, default=0.1, help="Weight decay")
parser.add_argument("--warmup_steps", type=int, default=None, help="Warmup steps")
parser.add_argument(
"--use_grad_checkpoint",
action="store_true",
default=False,
help="Use gradient checkpointing",
)
parser.add_argument(
"--use_flash_attn",
action="store_true",
default=False,
help="Use flash-attention",
)
parser.add_argument(
"--use_neft",
action="store_true",
default=False,
help="Use NEFTune",
)
parser.add_argument(
"--freeze_non_embeds_params",
action="store_true",
default=False,
help="Freeze non embeddings parameters",
)
parser.add_argument("--pad_token", choices=["eos", "unk"], default="eos")
parser.add_argument("--padding_mode", choices=["max_length", "longest"], default="max_length")
parser.add_argument(
"--skip_save_each_epoch",
action="store_true",
default=False,
help="Skip saving the model checkpoint after each epoch is completed.",
)
# Additional arguments for 3d plugin.
parser.add_argument("--tp", type=int, default=1, help="TP size, used for 3d plugin.")
parser.add_argument("--pp", type=int, default=1, help="PP size, used for 3d plugin.")
parser.add_argument("--sp", type=int, default=1, help="SP size, used for 3d plugin.")
parser.add_argument("--zero_stage", type=int, default=0, help="Zero stage, used for 3d plugin.", choices=[0, 1, 2])
parser.add_argument(
"--sp_mode",
type=str,
default="split_gather",
choices=["split_gather", "ring", "all_to_all"],
help="SP mode, used for 3d plugin.",
)
parser.add_argument(
"--enable_sequence_parallelism",
default=False,
action="store_true",
help="Whether to enable SP, used for 3d plugin.",
)
parser.add_argument(
"--zero_cpu_offload", default=False, action="store_true", help="Whether to use offloading, used for 3d plugin."
)
parser.add_argument(
"--microbatch_size", type=int, default=1, help="Batch size for each process in PP, used for 3d plugin."
)
parser.add_argument("--lora_rank", type=int, default=0, help="lora rank when using lora to train.")
# Additional arguments for benchmark.
parser.add_argument("--num_samples", type=int, default=500, help="Number of samples for benchmarking.")
parser.add_argument(
"--benchmark", action="store_true", default=False, help="Benchmark performance using random dataset."
)
args = parser.parse_args()
train(args)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/Colossal-LLaMA/setup.py | applications/Colossal-LLaMA/setup.py | from setuptools import find_packages, setup
def fetch_requirements(path):
with open(path, "r") as fd:
return [r.strip() for r in fd.readlines()]
def fetch_readme():
with open("README.md", encoding="utf-8") as f:
return f.read()
def fetch_version():
with open("version.txt", "r") as f:
return f.read().strip()
setup(
name="colossal_llama",
version=fetch_version(),
packages=find_packages(exclude=("*.egg-info",)),
description="Continual Pre-training and SFT for LLaMA",
long_description=fetch_readme(),
long_description_content_type="text/markdown",
license="Apache Software License 2.0",
url="https://github.com/hpcaitech/ColossalAI/tree/main/applications/Colossal-LLaMA",
install_requires=fetch_requirements("requirements.txt"),
python_requires=">=3.7",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Environment :: GPU :: NVIDIA CUDA",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: System :: Distributed Computing",
],
)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/Colossal-LLaMA/dataset/prepare_sft_dataset.py | applications/Colossal-LLaMA/dataset/prepare_sft_dataset.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Prepare sft dataset for fine-tuning
"""
import argparse
import json
import math
import os
from multiprocessing import cpu_count
from colossal_llama.dataset.conversation import LLaMA2_Conv, LLaMA3_Conv
from colossal_llama.dataset.spliced_and_tokenized_dataset import supervised_tokenize_sft
from datasets import dataset_dict, load_dataset
from transformers import AddedToken, AutoTokenizer
from colossalai.logging import get_dist_logger
logger = get_dist_logger()
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_input_dirs",
type=str,
required=True,
default=None,
help="Comma(i.e., ',') separated list of all data directories containing `.jsonl` data files.",
)
parser.add_argument(
"--tokenizer_dir", type=str, required=True, default=None, help="A directory containing the tokenizer"
)
parser.add_argument("--data_output_dirs", type=str, default="data_output_dirs", help="Data output directory")
parser.add_argument("--max_length", type=int, default=8192, help="Max length of each spliced tokenized sequence")
parser.add_argument("--num_spliced_dataset_bins", type=int, default=10, help="Number of spliced dataset bins")
parser.add_argument("--llama_version", type=int, default=3, help="LLaMA version")
args = parser.parse_args()
if args.num_spliced_dataset_bins >= 100000:
raise ValueError("Too many spliced divisions, must be smaller than 100000")
args.data_cache_dir = os.path.join(args.data_output_dirs, "cache")
args.data_jsonl_output_dir = os.path.join(args.data_output_dirs, "jsonl")
args.data_arrow_output_dir = os.path.join(args.data_output_dirs, "arrow")
if not os.path.exists(args.data_cache_dir):
os.makedirs(args.data_cache_dir)
if not os.path.exists(args.data_jsonl_output_dir):
os.makedirs(args.data_jsonl_output_dir)
if not os.path.exists(args.data_arrow_output_dir):
os.makedirs(args.data_arrow_output_dir)
# Prepare to all input datasets
input_data_paths = []
input_data_dirs = args.data_input_dirs.split(",")
for ds_dir in input_data_dirs:
ds_dir = os.path.abspath(ds_dir)
assert os.path.exists(ds_dir), f"Not find data dir {ds_dir}"
ds_files = [name for name in os.listdir(ds_dir) if name.endswith(".jsonl")]
ds_paths = [os.path.join(ds_dir, name) for name in ds_files]
input_data_paths.extend(ds_paths)
# Prepare to data splitting.
train_splits = []
split_interval = math.ceil(100 / args.num_spliced_dataset_bins)
for i in range(0, 100, split_interval):
start = i
end = i + split_interval
if end > 100:
end = 100
train_splits.append(f"train[{start}%:{end}%]")
# Prepare to the tokenizer.
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_dir)
default_conversation = LLaMA3_Conv
# Fix </s> split issue: https://github.com/huggingface/transformers/issues/23833
if args.llama_version == 2:
tokenizer.add_tokens(AddedToken("</s>", normalized=False, special=True), special_tokens=True)
default_conversation = LLaMA2_Conv
tokenizer.add_bos_token = False
tokenizer.add_eos_token = False
if tokenizer.pad_token is None:
if tokenizer.unk_token is not None:
tokenizer.pad_token = tokenizer.unk_token
else:
tokenizer.pad_token = tokenizer.eos_token
tokenizer.unk_token = tokenizer.eos_token
list_dataset = load_dataset(
path="json",
data_files=input_data_paths,
cache_dir=os.path.join(args.data_cache_dir, "raw"),
keep_in_memory=False,
split=train_splits,
num_proc=cpu_count(),
)
for index, dataset in enumerate(list_dataset):
assert isinstance(dataset, dataset_dict.Dataset)
logger.info(f"Start to process part-{index}/{len(list_dataset)} of all original datasets.")
dataset = dataset.map(
function=supervised_tokenize_sft,
fn_kwargs={
"tokenizer": tokenizer,
"conversation_template": default_conversation,
"max_length": args.max_length,
},
keep_in_memory=False,
num_proc=min(len(dataset), cpu_count()),
)
dataset = dataset.filter(lambda data: data["labels"] is not None)
dataset = dataset.sort(column_names=("seq_category", "seq_length"), reverse=False, keep_in_memory=False)
# We don't concatenate data samples here.
spliced_dataset = dataset
# Save each jsonl spliced dataset.
output_index = "0" * (5 - len(str(index))) + str(index)
output_name = f"part-{output_index}"
output_jsonl_path = os.path.join(args.data_jsonl_output_dir, output_name + ".jsonl")
# st = time.time()
with open(file=output_jsonl_path, mode="w", encoding="utf-8") as fp_writer:
spliced_count = 0
for spliced_data_point in spliced_dataset:
if spliced_count % 500 == 0:
logger.info(f"processing {spliced_count} spliced data points for {fp_writer.name}")
spliced_count += 1
fp_writer.write(json.dumps(spliced_data_point, ensure_ascii=False) + "\n")
# Save each arrow spliced dataset
output_arrow_path = os.path.join(args.data_arrow_output_dir, output_name)
logger.info(f"Start to save {output_arrow_path}")
spliced_dataset = load_dataset(
path="json",
data_files=[output_jsonl_path],
cache_dir=os.path.join(args.data_cache_dir, "spliced_and_tokenized"),
keep_in_memory=False,
num_proc=cpu_count(),
split="train",
)
spliced_dataset.save_to_disk(dataset_path=output_arrow_path, num_proc=min(len(spliced_dataset), cpu_count()))
if __name__ == "__main__":
main()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/Colossal-LLaMA/dataset/prepare_pretrain_dataset.py | applications/Colossal-LLaMA/dataset/prepare_pretrain_dataset.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Prepare dataset for continual pre-training
"""
import argparse
import json
import math
import os
import time
from multiprocessing import cpu_count
from colossal_llama.dataset.spliced_and_tokenized_dataset import (
ClosedToConstantLengthSplicedDataset,
supervised_tokenize_pretrain,
)
from datasets import dataset_dict, load_dataset
from transformers import AutoTokenizer
from colossalai.logging import get_dist_logger
logger = get_dist_logger()
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_input_dirs",
type=str,
required=True,
default=None,
help="Comma(i.e., ',') separated list of all data directories containing `.jsonl` data files.",
)
parser.add_argument(
"--tokenizer_dir", type=str, required=True, default=None, help="A directory containing the tokenizer"
)
parser.add_argument("--data_output_dirs", type=str, default="data_output_dirs", help="Data output directory")
parser.add_argument("--max_length", type=int, default=8192, help="Max length of each spliced tokenized sequence")
parser.add_argument("--num_spliced_dataset_bins", type=int, default=10, help="Number of spliced dataset bins")
args = parser.parse_args()
if args.num_spliced_dataset_bins >= 100000:
raise ValueError("Too many spliced divisions, must be smaller than 100000")
args.data_cache_dir = os.path.join(args.data_output_dirs, "cache")
args.data_jsonl_output_dir = os.path.join(args.data_output_dirs, "jsonl")
args.data_arrow_output_dir = os.path.join(args.data_output_dirs, "arrow")
if not os.path.exists(args.data_cache_dir):
os.makedirs(args.data_cache_dir)
if not os.path.exists(args.data_jsonl_output_dir):
os.makedirs(args.data_jsonl_output_dir)
if not os.path.exists(args.data_arrow_output_dir):
os.makedirs(args.data_arrow_output_dir)
# Prepare to all input datasets
input_data_paths = []
input_data_dirs = args.data_input_dirs.split(",")
for ds_dir in input_data_dirs:
ds_dir = os.path.abspath(ds_dir)
assert os.path.exists(ds_dir), f"Not find data dir {ds_dir}"
ds_files = [name for name in os.listdir(ds_dir) if name.endswith(".jsonl")]
ds_paths = [os.path.join(ds_dir, name) for name in ds_files]
input_data_paths.extend(ds_paths)
# Prepare to data splitting.
train_splits = []
split_interval = math.ceil(100 / args.num_spliced_dataset_bins)
for i in range(0, 100, split_interval):
start = i
end = i + split_interval
if end > 100:
end = 100
train_splits.append(f"train[{start}%:{end}%]")
# Prepare to the tokenizer.
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_dir)
tokenizer.add_bos_token = False
tokenizer.add_eos_token = False
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.unk_token
list_dataset = load_dataset(
path="json",
data_files=input_data_paths,
cache_dir=os.path.join(args.data_cache_dir, "raw"),
keep_in_memory=False,
split=train_splits,
num_proc=cpu_count(),
)
for index, dataset in enumerate(list_dataset):
assert isinstance(dataset, dataset_dict.Dataset)
logger.info(f"Start to process part-{index}/{len(list_dataset)} of all original datasets.")
dataset = dataset.map(
function=supervised_tokenize_pretrain,
fn_kwargs={"tokenizer": tokenizer, "max_length": args.max_length},
keep_in_memory=False,
num_proc=min(len(dataset), cpu_count()),
)
dataset = dataset.remove_columns(column_names=["source", "target", "category"])
dataset = dataset.sort(column_names=("seq_category", "seq_length"), reverse=False, keep_in_memory=False)
dataset = dataset.remove_columns(column_names=["seq_category", "seq_length"])
spliced_dataset = ClosedToConstantLengthSplicedDataset(
dataset=dataset, tokenizer=tokenizer, max_length=args.max_length, error_strict=False
)
# Save each jsonl spliced dataset.
output_index = "0" * (5 - len(str(index))) + str(index)
output_name = f"part-{output_index}"
output_jsonl_path = os.path.join(args.data_jsonl_output_dir, output_name + ".jsonl")
st = time.time()
with open(file=output_jsonl_path, mode="w", encoding="utf-8") as fp_writer:
spliced_count = 0
for spliced_data_point in spliced_dataset:
if spliced_count % 500 == 0:
logger.info(f"processing {spliced_count} spliced data points for {fp_writer.name}")
spliced_count += 1
fp_writer.write(json.dumps(spliced_data_point, ensure_ascii=False) + "\n")
logger.info(
f"Current file {fp_writer.name}; "
f"Data size: {len(spliced_dataset)}; "
f"Spliced data size: {spliced_dataset.current_size}; "
f"Splicing compression rate: {round(spliced_dataset.current_size / len(spliced_dataset), 6)}; "
f"Time cost: {round((time.time() - st) / 60, 6)} minutes."
)
# Save each arrow spliced dataset
output_arrow_path = os.path.join(args.data_arrow_output_dir, output_name)
logger.info(f"Start to save {output_arrow_path}")
spliced_dataset = load_dataset(
path="json",
data_files=[output_jsonl_path],
cache_dir=os.path.join(args.data_cache_dir, "spliced_and_tokenized"),
keep_in_memory=False,
num_proc=cpu_count(),
split="train",
)
spliced_dataset.save_to_disk(dataset_path=output_arrow_path, num_proc=min(len(spliced_dataset), cpu_count()))
if __name__ == "__main__":
main()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/Colossal-LLaMA/colossal_llama/__init__.py | applications/Colossal-LLaMA/colossal_llama/__init__.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/Colossal-LLaMA/colossal_llama/model/init_model.py | applications/Colossal-LLaMA/colossal_llama/model/init_model.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Initialize new model with updated tokenizer by calculating the mean values from original model
"""
import argparse
import numpy as np
import torch
from transformers import LlamaForCausalLM, LlamaTokenizer
from colossalai.logging import get_dist_logger
logger = get_dist_logger()
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--source_model_and_tokenizer_path",
type=str,
required=True,
default=None,
help="Source path of model & tokenizer",
)
parser.add_argument("--target_tokenizer_path", type=str, required=True, default=None, help="Target tokenizer path")
parser.add_argument("--target_model_path", type=str, required=True, default=None, help="Target model path")
args = parser.parse_args()
source_tokenizer = LlamaTokenizer.from_pretrained(args.source_model_and_tokenizer_path)
source_tokenizer.add_bos_token = False
source_tokenizer.add_eos_token = False
if source_tokenizer.pad_token is None:
source_tokenizer.pad_token = source_tokenizer.unk_token
source_vocab = source_tokenizer.get_vocab()
target_tokenizer = LlamaTokenizer.from_pretrained(args.target_tokenizer_path)
target_tokenizer.add_bos_token = False
target_tokenizer.add_eos_token = False
if target_tokenizer.pad_token is None:
target_tokenizer.pad_token = target_tokenizer.unk_token
target_vocab = target_tokenizer.get_vocab()
target_inverted_vocab = {v: k for k, v in target_vocab.items()}
assert len(target_vocab) > len(
source_vocab
), f"Target vocab size({len(target_vocab)}) must be greater than source vocab size({len(source_vocab)})"
gpu_device = torch.device("cuda:0")
cpu_device = torch.device("cpu")
source_model = LlamaForCausalLM.from_pretrained(args.source_model_and_tokenizer_path)
source_model.eval()
source_model = source_model.to(gpu_device)
source_input_embeddings = source_model.get_input_embeddings()
assert isinstance(source_input_embeddings, torch.nn.Embedding)
assert source_input_embeddings.weight.shape[0] == len(source_vocab)
source_input_embeddings.eval()
source_output_embeddings = source_model.get_output_embeddings()
assert isinstance(source_output_embeddings, torch.nn.Linear)
assert source_output_embeddings.bias is None
assert source_output_embeddings.weight.shape[0] == len(source_vocab)
source_output_embeddings.eval()
input_embeddings = source_input_embeddings.weight.cpu().detach().numpy()
output_embeddings = source_output_embeddings.weight.cpu().detach().numpy()
for i in range(len(source_vocab), len(target_vocab)):
if i % 500 == 0:
logger.info(f"processing {i}/{len(target_vocab)} target tokens")
target_token = target_inverted_vocab[i]
target_to_source_token_ids = torch.LongTensor(source_tokenizer([target_token])["input_ids"][0])
target_to_source_token_ids = target_to_source_token_ids.to(gpu_device)
target_to_source_input_embedding = (
source_input_embeddings.weight[target_to_source_token_ids]
.mean(dim=0)
.unsqueeze(dim=0)
.cpu()
.detach()
.numpy()
)
target_to_source_output_embedding = (
source_output_embeddings.weight[target_to_source_token_ids]
.mean(dim=0)
.unsqueeze(dim=0)
.cpu()
.detach()
.numpy()
)
input_embeddings = np.concatenate((input_embeddings, target_to_source_input_embedding), axis=0)
output_embeddings = np.concatenate((output_embeddings, target_to_source_output_embedding), axis=0)
source_model = source_model.to(cpu_device)
assert isinstance(source_model, LlamaForCausalLM)
# expand
source_model.resize_token_embeddings(new_num_tokens=len(target_vocab))
source_model.model.embed_tokens.weight.data = torch.Tensor(input_embeddings)
source_model.lm_head.weight.data = torch.Tensor(output_embeddings)
source_model = source_model.half()
source_model.save_pretrained(save_directory=args.target_model_path)
if __name__ == "__main__":
main()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/Colossal-LLaMA/colossal_llama/tokenizer/init_tokenizer.py | applications/Colossal-LLaMA/colossal_llama/tokenizer/init_tokenizer.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Initialize new tokenizer for continual pre-training
"""
import argparse
import json
import os
from typing import List, Union
from sentencepiece import sentencepiece_model_pb2 as sp_pb2_model
from transformers.models.llama.tokenization_llama import LlamaTokenizer
from colossalai.logging import get_dist_logger
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
logger = get_dist_logger()
def expand_vocab_tokenizer(
source_tokenizer_dir: Union[str, os.PathLike], target_tokenizer_dir: Union[str, os.PathLike], new_tokens: List[str]
) -> None:
"""Expand tokenizer for continue pre-training."""
if os.path.exists(target_tokenizer_dir):
raise RuntimeError(f"Find existed directory {target_tokenizer_dir}")
source_tokenizer = LlamaTokenizer.from_pretrained(source_tokenizer_dir)
logger.info(source_tokenizer)
source_sp_processor = source_tokenizer.sp_model
source_spm = sp_pb2_model.ModelProto()
source_spm.ParseFromString(source_sp_processor.serialized_model_proto())
logger.info(f"Source tokenizer size: {len(source_sp_processor)}")
# Add new tokens to source tokenizer.
source_spm_tokens = set([p.piece for p in source_spm.pieces])
for piece in new_tokens:
assert isinstance(piece, str), f"Invalid token({piece}) type {type(piece)}"
if piece in source_spm_tokens:
# Skip existed token.
continue
new_p = sp_pb2_model.ModelProto().SentencePiece()
new_p.piece = piece
new_p.score = 0
source_spm.pieces.append(new_p)
logger.info(f"Expand vocab from {len(source_spm_tokens)} to {len(source_spm.pieces)}")
# Save
os.makedirs(target_tokenizer_dir)
target_tokenizer_model_path = os.path.join(target_tokenizer_dir, "tokenizer.model")
with open(file=target_tokenizer_model_path, mode="wb") as fp:
fp.write(source_spm.SerializeToString())
target_tokenizer = LlamaTokenizer(vocab_file=target_tokenizer_model_path)
target_tokenizer.save_pretrained(save_directory=target_tokenizer_dir)
logger.info(f"Successfully save expand tokenizer to {target_tokenizer_dir}")
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--source_tokenizer_dir", type=str, required=True, default=None, help="Source tokenizer directory"
)
parser.add_argument(
"--target_tokenizer_dir", type=str, required=True, default=None, help="Target tokenizer directory"
)
parser.add_argument(
"--expand_tokens_file",
type=str,
required=True,
default=None,
help="Path of the file containing tokens to be extended",
)
args = parser.parse_args()
expand_tokens = []
with open(file=args.expand_tokens_file, mode="r", encoding="utf-8") as fp_reader:
for line in fp_reader:
item = json.loads(line)
# e.g., {"piece": "你好"}
token = item["piece"]
if token in expand_tokens:
continue
expand_tokens.append(token)
expand_tokens.sort(key=lambda t: len(t), reverse=False)
expand_vocab_tokenizer(
source_tokenizer_dir=args.source_tokenizer_dir,
target_tokenizer_dir=args.target_tokenizer_dir,
new_tokens=expand_tokens,
)
if __name__ == "__main__":
main()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/Colossal-LLaMA/colossal_llama/utils/froze.py | applications/Colossal-LLaMA/colossal_llama/utils/froze.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from transformers.models.llama import LlamaForCausalLM
def freeze_non_embeds_parameters(model: LlamaForCausalLM) -> None:
"""Freeze all parameters except embeddings."""
for name, params in model.named_parameters():
if "embed_tokens" not in name and "lm_head" not in name:
params.requires_grad = False
else:
params.requires_grad = True
def unfreeze_parameters(model: LlamaForCausalLM) -> None:
for name, params in model.named_parameters():
params.requires_grad = False
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/Colossal-LLaMA/colossal_llama/utils/stream_chat_patch.py | applications/Colossal-LLaMA/colossal_llama/utils/stream_chat_patch.py | from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple
import torch
from torch import nn
from transformers import PreTrainedTokenizer
from transformers.generation.utils import GenerationConfig, LogitsProcessorList, StoppingCriteriaList
from transformers.utils import logging
logger = logging.get_logger(__name__)
def get_prompt_template(
input_query: str,
history: List[Dict] = None,
roles: list = ["", "Human", "Assistant"],
) -> str:
"""
Generates a prompt template for chat models based on input and history.
Args:
input_query (str): User's current input query.
history (List[Dict], optional): List of past conversations, each a dict with 'role' and 'message'.
roles (list): Specifies the roles in the conversation, defaults to ["", "Human", "Assistant"].
Returns:
str: A formatted prompt including the input query and history.
"""
prompt = ""
if history is None:
new_history = []
else:
new_history = deepcopy(history)
new_history.append({"role": roles[1], "message": input_query.strip()})
new_history.append({"role": roles[2], "message": None})
for _, item in enumerate(new_history):
role = item.get("role")
message = item.get("message")
if role == roles[0]:
prompt += f"<s>{message}\n\n"
else:
if message:
prompt += f"{role}: <s>{message}</s>"
else:
prompt += f"{role}: <s>"
return prompt
@torch.inference_mode()
def streaming_chat(
model: Any,
tokenizer: PreTrainedTokenizer,
input_query: str,
history: List[Dict] = None,
roles: list = ["", "Human", "Assistant"],
past_key_values: Tuple[Tuple[torch.FloatTensor, Any], Any] = None,
temperature: float = 0.8,
top_p: float = 0.95,
top_k: int = 50,
do_sample: bool = True,
length_penalty: float = 1.2,
max_new_tokens: int = 512,
logits_processor: LogitsProcessorList = None,
return_past_key_values: bool = False,
**kwargs,
):
"""
Streaming chat responses generation with a given model and tokenizer.
Args:
model (Any): The language model to generate responses.
tokenizer (PreTrainedTokenizer): Tokenizer compatible with the model, used for encoding inputs and decoding responses.
input_query (str): The current user input to respond to.
history (List[Dict], optional): A list of past conversations, where each conversation is a dictionary with keys 'role' and 'message'.
roles (list): Roles involved in the conversation, defaults to ["", "Human", "Assistant"].
past_key_values (Tuple[Tuple[torch.FloatTensor, Any], Any], optional): Past key values for incremental decoding.
temperature (float): The temperature value for token sampling, defaults to 0.8.
top_p (float): Nucleus sampling probability threshold, defaults to 0.95.
top_k (int): Top-K filtering threshold, defaults to 50.
do_sample (bool): Whether to sample responses, defaults to True.
length_penalty (float): Penalty for response length, defaults to 1.2.
max_new_tokens (int): Maximum number of new tokens to generate, defaults to 512.
logits_processor (LogitsProcessorList, optional): Custom logits processors, defaults to None.
return_past_key_values (bool): Whether to return past key values for further incremental decoding, defaults to False.
**kwargs: Additional keyword arguments for generation.
Yields:
Tuple[str, List[Dict], Optional[Tuple[Tuple[torch.FloatTensor, Any], Any]]]: A tuple containing the generated response, updated history, and
optionally the updated past key values if `return_past_key_values` is True.
Ensures padding is on the left side for the tokenizer.
"""
assert tokenizer.padding_side == "left", "Current generation only supports left padding."
if history is None:
history = []
if logits_processor is None:
logits_processor = LogitsProcessorList()
generation_kwargs = {
"temperature": temperature,
"top_p": top_p,
"top_k": top_k,
"do_sample": do_sample,
"max_new_tokens": max_new_tokens,
"length_penalty": length_penalty,
"use_cache": True,
**kwargs,
}
prompt_str = get_prompt_template(input_query, history=history, roles=roles)
eos_token_id = [tokenizer.eos_token_id]
inputs = tokenizer(prompt_str, return_tensors="pt").to(model.device)
history.append({"role": roles[1], "message": input_query.strip()})
history.append({"role": roles[2], "message": None})
for outputs in stream_generate(
model,
**inputs,
past_key_values=past_key_values,
eos_token_id=eos_token_id,
return_past_key_values=return_past_key_values,
**generation_kwargs,
):
if return_past_key_values:
outputs, past_key_values = outputs
outputs = outputs.tolist()[0][len(inputs["input_ids"][0]) : -1]
response = tokenizer.decode(outputs)
history[-1]["message"] = response.strip()
if return_past_key_values:
yield response, history, past_key_values
else:
yield response, history
@torch.inference_mode()
def stream_generate(
model: Any,
input_ids: torch.Tensor,
generation_config: Optional[GenerationConfig] = None,
logits_processor: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,
return_past_key_values: bool = False,
**kwargs,
):
"""
Generates sequences of token ids using the specified model and generation parameters.
Adapted from https://huggingface.co/THUDM/chatglm3-6b/blob/main/modeling_chatglm.py
Args:
model (Any): The model used for generating sequences of token ids.
input_ids (torch.Tensor): The sequence used as a prompt for the generation or as model inputs to the encoder.
generation_config (Optional[GenerationConfig]): The generation configuration to be used as base parametrization for the generation call.
logits_processor (Optional[LogitsProcessorList]): Custom logits processors that complement the default logits processors built from arguments
and generation config.
stopping_criteria (Optional[StoppingCriteriaList]): Custom stopping criteria that complement the default stopping criteria built from arguments
and a generation config.
prefix_allowed_tokens_fn (Optional[Callable[[int, torch.Tensor], List[int]]]): Function to constrain token generation.
return_past_key_values (bool): Whether to return past key values for further incremental decoding, defaults to False.
**kwargs: Additional parameters for model generation.
Yields:
torch.Tensor: The generated token IDs, updated after each generation step.
Optional[Tuple[Tuple[torch.FloatTensor, Any], Any]]: The past key values, returned if `return_past_key_values` is True, defaults to False.
"""
input_ids_len = input_ids.size(1)
if generation_config is None:
generation_config = model.generation_config
generation_config = deepcopy(generation_config)
model_kwargs = generation_config.update(**kwargs)
eos_token_id = generation_config.eos_token_id
if isinstance(eos_token_id, int):
eos_token_id = [eos_token_id]
eos_token_id_tensor = torch.tensor(eos_token_id).to(input_ids.device) if eos_token_id is not None else None
if generation_config.max_new_tokens is not None:
generation_config.max_length = generation_config.max_new_tokens + input_ids_len
if input_ids_len >= generation_config.max_length:
input_ids_string = "decoder_input_ids" if model.config.is_encoder_decoder else "input_ids"
logger.warning(
f"Input length of {input_ids_string} is {input_ids_len}, but `max_length` is set to"
f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
" increasing `max_new_tokens`."
)
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
# prepare distribution pre_processing samplers
logits_processor = model._get_logits_processor(
generation_config=generation_config,
input_ids_seq_length=input_ids_len,
encoder_input_ids=input_ids,
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
logits_processor=logits_processor,
)
# prepare stopping criteria
stopping_criteria = model._get_stopping_criteria(
generation_config=generation_config, stopping_criteria=stopping_criteria
)
logits_warper = model._get_logits_warper(generation_config)
unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)
scores = None
while True:
model_inputs = model.prepare_inputs_for_generation(input_ids, **model_kwargs)
# forward pass to get next token
outputs = model(
**model_inputs,
return_dict=True,
output_attentions=False,
output_hidden_states=False,
)
# NOTE: this is correct only in left padding mode
# pre-process distribution
next_token_logits = outputs.logits[:, -1, :]
next_token_scores = logits_processor(input_ids, next_token_logits)
next_token_scores = logits_warper(input_ids, next_token_scores)
# sample
probs = nn.functional.softmax(next_token_scores, dim=-1)
if generation_config.do_sample:
next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
else:
next_tokens = torch.argmax(probs, dim=-1)
# update generated ids, model inputs, and length for next step
input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
model_kwargs = model._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=model.config.is_encoder_decoder
)
unfinished_sequences = unfinished_sequences.mul(
next_tokens.tile(eos_token_id_tensor.shape[0], 1).ne(eos_token_id_tensor.unsqueeze(1)).prod(dim=0)
)
if return_past_key_values:
yield input_ids, outputs.past_key_values
else:
yield input_ids
# stop when each sentence is finished, or if exceed the maximum length
if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores):
break
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/Colossal-LLaMA/colossal_llama/utils/utils.py | applications/Colossal-LLaMA/colossal_llama/utils/utils.py | """
Utils for Colossal-LLaMA
"""
import torch
import torch.distributed as dist
from colossalai.booster import Plugin
def all_reduce_mean(tensor: torch.Tensor, plugin: Plugin = None) -> torch.Tensor:
if plugin is not None:
dist.all_reduce(tensor=tensor, op=dist.ReduceOp.SUM, group=plugin.dp_group)
tensor.div_(plugin.dp_size)
else:
dist.all_reduce(tensor=tensor, op=dist.ReduceOp.SUM)
tensor.div_(dist.get_world_size())
return tensor
def get_model_numel(model: torch.nn.Module) -> int:
return sum(p.numel() for p in model.parameters())
def format_numel_str(numel: int) -> str:
B = 1024**3
M = 1024**2
K = 1024
if numel >= B:
return f"{numel / B:.2f} B"
elif numel >= M:
return f"{numel / M:.2f} M"
elif numel >= K:
return f"{numel / K:.2f} K"
else:
return f"{numel}"
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/Colossal-LLaMA/colossal_llama/utils/ckpt_io.py | applications/Colossal-LLaMA/colossal_llama/utils/ckpt_io.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Helper functions for IO
"""
import json
import os
from typing import Any, Dict, Tuple, Union
import torch
from torch.optim.lr_scheduler import _LRScheduler
from torch.optim.optimizer import Optimizer
from colossalai.booster import Booster
from colossalai.cluster import DistCoordinator
def load_json(file_path: Union[str, os.PathLike]) -> Dict[str, Any]:
"""
Load file in JSON format
"""
with open(file=file_path, mode="r", encoding="utf-8") as fp:
return json.load(fp)
def save_json(data: Dict[str, Any], file_path: Union[str, os.PathLike]) -> None:
"""
Save as JSON format
"""
with open(file=file_path, mode="w", encoding="utf-8") as fp:
json.dump(data, fp=fp, ensure_ascii=False, indent=4)
def save_checkpoint(
save_dir: Union[str, os.PathLike],
booster: Booster,
model: torch.nn.Module,
optimizer: Optimizer,
lr_scheduler: _LRScheduler,
epoch: int,
step: int,
batch_size: int,
coordinator: DistCoordinator,
use_lora: bool = False,
) -> None:
"""
Save model checkpoint, optimizer, LR scheduler and intermedidate running states.
"""
save_dir = os.path.join(save_dir, f"epoch-{epoch}_step-{step}")
os.makedirs(os.path.join(save_dir, "modeling"), exist_ok=True)
if use_lora:
booster.save_lora_as_pretrained(model, os.path.join(save_dir, "modeling"))
else:
booster.save_model(model, os.path.join(save_dir, "modeling"), shard=True)
booster.save_optimizer(optimizer, os.path.join(save_dir, "optimizer"), shard=True)
booster.save_lr_scheduler(lr_scheduler, os.path.join(save_dir, "lr_scheduler"))
running_states = {
"epoch": epoch,
"step": step,
"sample_start_index": step * batch_size,
}
if coordinator.is_master():
save_json(running_states, os.path.join(save_dir, "running_states.json"))
def load_checkpoint(
load_dir: Union[str, os.PathLike],
booster: Booster,
model: torch.nn.Module,
optimizer: Optimizer,
lr_scheduler: _LRScheduler,
) -> Tuple[int, int, int]:
"""
Load model checkpoint, optimizer, LR scheduler and intermedidate running states.
"""
# Update booster params states.
booster.load_model(model=model, checkpoint=os.path.join(load_dir, "modeling"))
booster.load_optimizer(optimizer=optimizer, checkpoint=os.path.join(load_dir, "optimizer"))
booster.load_lr_scheduler(lr_scheduler=lr_scheduler, checkpoint=os.path.join(load_dir, "lr_scheduler"))
running_states = load_json(file_path=os.path.join(load_dir, "running_states.json"))
return (
running_states["epoch"],
running_states["step"],
running_states["sample_start_index"],
)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/Colossal-LLaMA/colossal_llama/utils/__init__.py | applications/Colossal-LLaMA/colossal_llama/utils/__init__.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/Colossal-LLaMA/colossal_llama/utils/neftune_patch.py | applications/Colossal-LLaMA/colossal_llama/utils/neftune_patch.py | # Copyright 2023 The Hugging Face team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
def unwrap(model):
if hasattr(model, "module"):
return model.unwrap()
else:
return model
def neftune_post_forward_hook(module, input, output):
"""
Implements the NEFTune forward pass for the model using forward hooks. Note this works only for torch.nn.Embedding
layers. This method is slightly adapted from the original source code that can be found here:
https://github.com/neelsjain/NEFTune Simply add it to your model as follows:
```python
model = ...
model.embed_tokens.neftune_noise_alpha = 0.1
model.embed_tokens.register_forward_hook(neftune_post_forward_hook)
```
Args:
module (`torch.nn.Module`):
The embedding module where the hook is attached. Note that you need to set `module.neftune_noise_alpha` to
the desired noise alpha value.
input (`torch.Tensor`):
The input tensor to the model.
output (`torch.Tensor`):
The output tensor of the model (i.e. the embeddings).
"""
if module.training:
dims = torch.tensor(output.size(1) * output.size(2))
mag_norm = module.neftune_noise_alpha / torch.sqrt(dims)
output = output + torch.zeros_like(output).uniform_(-mag_norm, mag_norm)
return output
def activate_neftune(model, neftune_noise_alpha=0.1):
r"""
Activates the neftune as presented in this code: https://github.com/neelsjain/NEFTune and paper:
https://arxiv.org/abs/2310.05914
"""
embeddings = unwrap(model).get_input_embeddings()
embeddings.neftune_noise_alpha = neftune_noise_alpha
hook_handle = embeddings.register_forward_hook(neftune_post_forward_hook)
neftune_hook_handle = hook_handle
return model, neftune_hook_handle
def deactivate_neftune(model, neftune_hook_handle):
"""
Deactivates the neftune method. Make sure to call `_activate_neftune` first.
"""
embeddings = unwrap(model).get_input_embeddings()
neftune_hook_handle.remove()
del embeddings.neftune_noise_alpha
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/Colossal-LLaMA/colossal_llama/dataset/dummy_dataset.py | applications/Colossal-LLaMA/colossal_llama/dataset/dummy_dataset.py | import torch
from torch.utils.data import Dataset
from colossalai.accelerator import get_accelerator
class RandomDataset(Dataset):
def __init__(self, num_samples: int = 1000, max_length: int = 2048, vocab_size: int = 32000):
self.num_samples = num_samples
self.max_length = max_length
self.input_ids = torch.randint(
0, vocab_size, (num_samples, max_length), device=get_accelerator().get_current_device()
)
self.attention_mask = torch.ones_like(self.input_ids)
def __len__(self):
return self.num_samples
def __getitem__(self, idx):
return {
"input_ids": self.input_ids[idx],
"attention_mask": self.attention_mask[idx],
"labels": self.input_ids[idx],
}
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/Colossal-LLaMA/colossal_llama/dataset/loader.py | applications/Colossal-LLaMA/colossal_llama/dataset/loader.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from dataclasses import dataclass
from typing import Dict, Iterator, List, Optional, Sequence, Union
import torch
import torch.nn.functional as F
from datasets import Dataset as HFDataset
from datasets import dataset_dict, load_from_disk
from torch.utils.data import ConcatDataset, Dataset, DistributedSampler
from transformers.tokenization_utils import PreTrainedTokenizer
DatasetType = Union[Dataset, ConcatDataset, dataset_dict.Dataset]
PathType = Union[str, os.PathLike]
def load_tokenized_dataset(
dataset_paths: Union[PathType, List[PathType]], mode: str = "train"
) -> Optional[DatasetType]:
"""
Load pre-tokenized dataset.
Each instance of dataset is a dictionary with
`{'input_ids': List[int], 'labels': List[int], sequence: str}` format.
"""
mode_map = {"train": "train", "dev": "validation", "test": "test"}
assert mode in tuple(mode_map), f"Unsupported mode {mode}, it must be in {tuple(mode_map)}"
if isinstance(dataset_paths, (str, os.PathLike)):
dataset_paths = [dataset_paths]
datasets = [] # `List[datasets.dataset_dict.Dataset]`
for ds_path in dataset_paths:
ds_path = os.path.abspath(ds_path)
assert os.path.exists(ds_path), f"Not existed file path {ds_path}"
ds_dict = load_from_disk(dataset_path=ds_path, keep_in_memory=False)
if isinstance(ds_dict, HFDataset):
datasets.append(ds_dict)
else:
if mode_map[mode] in ds_dict:
datasets.append(ds_dict[mode_map[mode]])
if len(datasets) == 0:
return None
if len(datasets) == 1:
return datasets.pop()
return ConcatDataset(datasets=datasets)
@dataclass
class DataCollatorForSupervisedDataset(object):
"""
Collate instances for supervised dataset.
Each instance is a tokenized dictionary with fields
`input_ids`(List[int]), `labels`(List[int]) and `sequence`(str).
"""
tokenizer: PreTrainedTokenizer
max_length: int = 4096
ignore_index: int = -100
padding: str = "max_length"
def __call__(self, instances: Sequence[Dict[str, List[int]]]) -> Dict[str, torch.Tensor]:
"""
Args:
instances (`Sequence[Dict[str, List[int]]]`):
Mini-batch samples, each sample is stored in an individual dictionary.
Returns:
(`Dict[str, torch.Tensor]`): Contains the following `torch.Tensor`:
`input_ids`: `torch.Tensor` of shape (bsz, max_len);
`attention_mask`: `torch.BoolTensor` of shape (bsz, max_len);
`labels`: `torch.Tensor` of shape (bsz, max_len), which contains `IGNORE_INDEX`.
"""
assert isinstance(self.tokenizer.pad_token_id, int) and self.tokenizer.pad_token_id >= 0, (
f"`{self.tokenizer.__class__.__name__}.pad_token_id` must be a valid non-negative integer index value, "
f"but now `{self.tokenizer.pad_token_id}`"
)
# `List[torch.Tensor]`
batch_input_ids = [
(
torch.LongTensor(instance["input_ids"][: self.max_length])
if len(instance["input_ids"]) > self.max_length
else torch.LongTensor(instance["input_ids"])
)
for instance in instances
]
batch_labels = [
(
torch.LongTensor(instance["labels"][: self.max_length])
if len(instance["labels"]) > self.max_length
else torch.LongTensor(instance["labels"])
)
for instance in instances
]
if self.tokenizer.padding_side == "right":
input_ids = torch.nn.utils.rnn.pad_sequence(
sequences=batch_input_ids,
batch_first=True,
padding_value=self.tokenizer.pad_token_id,
) # (bsz, max_len)
labels = torch.nn.utils.rnn.pad_sequence(
sequences=batch_labels,
batch_first=True,
padding_value=self.ignore_index,
) # (bsz, max_len)
if self.padding == "max_length":
# pad to max
to_pad = self.max_length - input_ids.size(1)
input_ids = F.pad(input_ids, (0, to_pad), value=self.tokenizer.pad_token_id)
labels = F.pad(labels, (0, to_pad), value=self.ignore_index)
elif self.tokenizer.padding_side == "left":
reversed_input_ids = [seq.flip(dims=(0,)) for seq in batch_input_ids]
reversed_input_ids = torch.nn.utils.rnn.pad_sequence(
sequences=reversed_input_ids,
batch_first=True,
padding_value=self.tokenizer.pad_token_id,
) # (bsz, max_len)
input_ids = torch.flip(reversed_input_ids, dims=(1,)) # (bsz, max_len)
reversed_labels = [seq.flip(dims=(0,)) for seq in batch_labels]
reversed_labels = torch.nn.utils.rnn.pad_sequence(
sequences=reversed_labels,
batch_first=True,
padding_value=self.ignore_index,
) # (bsz, max_len)
labels = torch.flip(reversed_labels, dims=(1,)) # (bsz, max_len)
else:
raise RuntimeError(
f"`{self.tokenizer.__class__.__name__}.padding_side` can only be `left` or `right`, "
f"but now `{self.tokenizer.padding_side}`"
)
attention_mask = input_ids.ne(self.tokenizer.pad_token_id) # `torch.BoolTensor`, (bsz, max_len)
return dict(input_ids=input_ids, attention_mask=attention_mask, labels=labels)
class StatefulDistributedSampler(DistributedSampler):
"""
Stateful distributed sampler for multi-stage training.
"""
def __init__(
self,
dataset: DatasetType,
num_replicas: Optional[int] = None,
rank: Optional[int] = None,
shuffle: bool = True,
seed: int = 0,
drop_last: bool = False,
) -> None:
super().__init__(
dataset=dataset,
num_replicas=num_replicas,
rank=rank,
shuffle=shuffle,
seed=seed,
drop_last=drop_last,
)
self.start_index = 0
def __iter__(self) -> Iterator:
iterator = super().__iter__()
indices = list(iterator)
indices = indices[self.start_index :]
return iter(indices)
def __len__(self) -> int:
return self.num_samples - self.start_index
def set_start_index(self, start_index: int) -> None:
self.start_index = start_index
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/Colossal-LLaMA/colossal_llama/dataset/conversation.py | applications/Colossal-LLaMA/colossal_llama/dataset/conversation.py | # Copyright 2023 lm-sys@FastChat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
from enum import Enum, auto
from typing import List
class SeparatorStyle(Enum):
ADD_BOS_EOS_TOKEN = auto()
@dataclasses.dataclass
class Conversation:
system: str
roles: List[str]
messages: List[List[str]]
offset: int
sep_style: SeparatorStyle
seps: List[str]
def clear(self):
self.messages = []
def get_prompt(self, length: int = None):
if length is None:
length = len(self.messages)
if self.sep_style == SeparatorStyle.ADD_BOS_EOS_TOKEN:
ret = self.system
for role, message in self.messages[0:length]:
if message:
ret += role + ": " + self.seps[0] + message + self.seps[1]
else:
ret += role + ": " + self.seps[0]
return ret
else:
raise ValueError(f"Invalid style: {self.sep_style}")
def save_prompt(self):
if self.sep_style == SeparatorStyle.ADD_BOS_EOS_TOKEN:
ret = self.system
for role, message in self.messages:
if message:
ret += role + ": " + self.seps[0] + message + self.seps[1] + "\n"
else:
ret += role + ": " + self.seps[0]
return ret
else:
raise ValueError(f"Invalid style: {self.sep_style}")
def append_message(self, role, message):
self.messages.append([role, message])
def copy(self):
return Conversation(
system=self.system,
roles=self.roles,
messages=[[x, y] for x, y in self.messages],
offset=self.offset,
sep_style=self.sep_style,
seps=self.seps,
)
def dict(self):
return {
"system": self.system,
"roles": self.roles,
"messages": self.messages,
"offset": self.offset,
"seps": self.seps,
}
LLaMA2_Conv = Conversation(
system="A chat between a curious human and an artificial intelligence assistant. "
"The assistant gives helpful, detailed, and polite answers to the human's questions.\n\n",
roles=("Human", "Assistant"),
messages=[],
offset=0,
sep_style=SeparatorStyle.ADD_BOS_EOS_TOKEN,
seps=["<s>", "</s>"],
)
LLaMA3_Conv = Conversation(
system="A chat between a curious human and an artificial intelligence assistant. "
"The assistant gives helpful, detailed, and polite answers to the human's questions.\n\n",
roles=("Human", "Assistant"),
messages=[],
offset=0,
sep_style=SeparatorStyle.ADD_BOS_EOS_TOKEN,
seps=["<|begin_of_text|>", "<|eot_id|>"],
)
default_conversation = LLaMA3_Conv
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/Colossal-LLaMA/colossal_llama/dataset/__init__.py | applications/Colossal-LLaMA/colossal_llama/dataset/__init__.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/Colossal-LLaMA/colossal_llama/dataset/spliced_and_tokenized_dataset.py | applications/Colossal-LLaMA/colossal_llama/dataset/spliced_and_tokenized_dataset.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Splicing multiple pre-tokenized sequence data points
"""
import bisect
import random
import warnings
from copy import deepcopy
from typing import Any, Callable, Dict, Iterable, List, Tuple, Union
from datasets import dataset_dict
from torch.utils.data import ConcatDataset, Dataset, IterableDataset
from transformers import AutoTokenizer
from transformers.models.llama.tokenization_llama import LlamaTokenizer
from transformers.tokenization_utils import PreTrainedTokenizer
from colossalai.logging import get_dist_logger
from .conversation import Conversation, default_conversation
logger = get_dist_logger()
IGNORE_INDEX = -100
DSType = Union[Dataset, ConcatDataset, dataset_dict.Dataset]
def supervised_tokenize_pretrain(
data_point: Dict[str, str], tokenizer: LlamaTokenizer, ignore_index: int = None, max_length: int = 4096
) -> Dict[str, Union[int, str, List[int]]]:
"""
A tokenization function to tokenize an original pretraining data point as following:
{"source": "", "target": "Beijing, the capital of the People's Republic of China, ...", "category": "geography"}
"""
assert tokenizer.add_bos_token is False and tokenizer.add_eos_token is False, (
"Initially set `tokenizer.add_bos_token` and `tokenizer.add_eos_token` to False, "
"add <bos> and <eos> manually later"
)
if ignore_index is None:
ignore_index = IGNORE_INDEX
source_text = data_point["source"] # `str`
target_text = data_point["target"] # `str`
is_null_source = len(source_text) == 0
source_text = tokenizer.bos_token + source_text
target_text += tokenizer.eos_token
sequence_text = source_text + target_text
tokenized = tokenizer([source_text, sequence_text])["input_ids"]
sequence_input_ids = tokenized[1]
sequence_labels = deepcopy(sequence_input_ids)
source_length = len(tokenized[0])
if not is_null_source:
sequence_labels[:source_length] = [ignore_index for _ in range(source_length)]
# sequence truncation.
if len(sequence_input_ids) > max_length:
sequence_input_ids = sequence_input_ids[:max_length]
sequence_labels = sequence_labels[:max_length]
return dict(
input_ids=sequence_input_ids,
labels=sequence_labels,
seq_length=len(sequence_input_ids),
seq_category=data_point["category"],
)
def supervised_tokenize_sft(
data_point: Dict[str, str],
tokenizer: AutoTokenizer,
conversation_template: Conversation = default_conversation,
ignore_index: int = None,
max_length: int = 4096,
) -> Dict[str, Union[int, str, List[int]]]:
"""
A tokenization function to tokenize an original supervised data point as following:
{"messages": [{"from": "human", "content": "xxx"}, {"from": "assistant", "content": "xxx"}]}
"""
assert tokenizer.add_bos_token is False and tokenizer.add_eos_token is False, (
"Initially set `tokenizer.add_bos_token` and `tokenizer.add_eos_token` to False, "
"add <bos> and <eos> manually later"
)
assert (
tokenizer.bos_token == conversation_template.seps[0] and tokenizer.eos_token == conversation_template.seps[1]
), f"`bos_token`{tokenizer.bos_token} and `eos_token`{tokenizer.eos_token} should be the same with `conversation_template.seps`{conversation_template.seps}."
if ignore_index is None:
ignore_index = IGNORE_INDEX
messages = data_point["messages"]
template = deepcopy(conversation_template)
template.messages = []
for mess in messages:
from_str = mess["from"]
if from_str.lower() == "human":
from_str = template.roles[0]
elif from_str.lower() == "assistant":
from_str = template.roles[1]
else:
raise ValueError(f"Unsupported role {from_str.lower()}")
template.append_message(from_str, mess["content"])
if len(template.messages) % 2 != 0:
template.messages = template.messages[0:-1]
# `target_turn_index` is the number of turns which exceeds `max_length - 1` for the first time.
turns = [i for i in range(1, len(messages) // 2 + 1)]
target_turn_index = bisect.bisect_right(
turns,
max_length - 1,
key=lambda x: len(tokenizer([template.get_prompt(2 * x)], add_special_tokens=False)["input_ids"][0]),
)
# The tokenized length for first turn already exceeds `max_length - 1`.
if target_turn_index - 1 < 0:
return dict(
input_ids=None,
labels=None,
inputs_decode=None,
labels_decode=None,
seq_length=None,
seq_category=None,
)
target_turn = turns[target_turn_index - 1]
prompt = template.get_prompt(2 * target_turn)
tokenized = tokenizer([prompt], add_special_tokens=False)["input_ids"][0]
template.messages = template.messages[0 : 2 * target_turn]
starts = []
ends = []
gpt_bos = False if template.messages[0][0] == template.roles[0] else True
gpt_eos = False if template.messages[0][0] == template.roles[0] else True
for i, token_id in enumerate(tokenized):
if token_id == tokenizer.bos_token_id:
if gpt_bos:
starts.append(i)
gpt_bos = not gpt_bos
elif token_id == tokenizer.eos_token_id:
if gpt_eos:
ends.append(i)
gpt_eos = not gpt_eos
if len(starts) != target_turn or len(ends) != target_turn:
logger.info(
"Please check whether the tokenizer add additional `bos_token` and `eos_token`.\n\nOr the original message contains `bos_token` or `eos_token`."
)
return dict(
input_ids=None,
labels=None,
inputs_decode=None,
labels_decode=None,
seq_length=None,
seq_category=None,
)
tokenized = [tokenizer.bos_token_id] + tokenized
labels = [ignore_index] * len(tokenized)
for start, end in zip(starts, ends):
labels[start + 1 : end + 2] = tokenized[start + 1 : end + 2]
labels_decode = deepcopy(labels)
for i, z in enumerate(labels_decode):
if z == ignore_index:
labels_decode[i] = tokenizer.unk_token_id
# `inputs_decode` and `labels_decode` can be used to check whether the tokenization method is true.
return dict(
input_ids=tokenized,
labels=labels,
inputs_decode=tokenizer.decode(tokenized),
labels_decode=tokenizer.decode(labels_decode),
seq_length=len(tokenized),
seq_category=data_point["category"] if "category" in data_point else "None",
)
class ClosedToConstantLengthSplicedDataset(IterableDataset):
"""
Define an iterable dataset that returns a (close to) constant length data point spliced from multiple
original independent (pre-tokenized) data points.
"""
def __init__(
self,
dataset: DSType,
tokenizer: PreTrainedTokenizer,
max_length: int = 4096,
num_packed_sequences: int = 8,
fetch_sequence_func: Callable[[Any], Tuple[List[int], List[int]]] = None,
input_ids_field: str = "input_ids",
labels_field: str = "labels",
infinite: bool = False,
shuffle: bool = True,
error_strict: bool = False,
) -> None:
self.tokenizer = tokenizer
self.dataset = dataset
self.max_length = max_length
self.infinite = infinite
self.max_buffer_size = max_length * num_packed_sequences # e.g., 4096 * 16
self.shuffle = shuffle
# Callable[[Dict[str, Any]], Tuple[List[int], List[int]]],
# A function that fetch sequence input_ids and labels from the original data point
if fetch_sequence_func is None:
self.fetch_sequence_func = lambda data_point: (data_point[input_ids_field], data_point[labels_field])
else:
self.fetch_sequence_func = fetch_sequence_func
self.input_ids_field = input_ids_field
self.labels_field = labels_field
self.error_strict = error_strict
self.current_size = 0 # `int`, current packed data size.
def __len__(self) -> int:
return len(self.dataset)
def __iter__(self) -> Iterable[Dict[str, List[int]]]:
iterator = iter(self.dataset)
more_data_points = True
while more_data_points is True:
buffer, buffer_len = [], 0
while True:
# ending condition.
if buffer_len >= self.max_buffer_size:
break
try:
# `Tuple[List[int], List[int]]`
seq_input_ids, seq_labels = self.fetch_sequence_func(next(iterator))
buffer.append({self.input_ids_field: seq_input_ids, self.labels_field: seq_labels})
buffer_len += len(buffer[-1][self.input_ids_field])
except StopIteration:
if self.infinite is True:
iterator = iter(self.dataset)
warnings.warn("The dataset reached end and the iterator is reset to the start.")
else:
more_data_points = False
break
examples = [] # `List[Dict[str, List[int]]]`, save buffered spliced data points.
spliced_input_ids, spliced_labels = [], [] # `List[int]`, `List[int]`
for i, data_point in enumerate(buffer):
# TODO(2023-09-18) check errors for each unspliced tokenized data point
seq_input_ids = data_point[self.input_ids_field]
seq_labels = data_point[self.labels_field]
# Handle special case:
# If the length of an original data point (i.e., input_ids length of a data point before splicing)
# exceeds `max_length`, truncate it.
if len(seq_input_ids) > self.max_length:
truncated_seq_input_ids = seq_input_ids[: self.max_length]
truncated_label_ids = seq_labels[: self.max_length]
if set(truncated_label_ids) == {IGNORE_INDEX}:
if self.error_strict is True:
raise ValueError(
f"Find an out-of-bounds length({len(seq_input_ids)}) data point "
f"with all label values as {IGNORE_INDEX}."
)
else:
warnings.warn(f"Filter an error truncated data point (labels all {IGNORE_INDEX})")
continue # Skip the current error data point.
spliced_data_point = {
self.input_ids_field: truncated_seq_input_ids,
self.labels_field: truncated_label_ids,
}
examples.append(spliced_data_point)
warnings.warn("Find a data point to be truncated.")
continue
# Pre action judgment.
if len(spliced_input_ids) + len(seq_input_ids) > self.max_length:
spliced_data_point = {
self.input_ids_field: spliced_input_ids,
self.labels_field: spliced_labels,
} # `Dict[str, List[int]]`
# Update.
spliced_input_ids, spliced_labels = [], []
spliced_input_ids.extend(seq_input_ids)
spliced_labels.extend(seq_labels)
examples.append(spliced_data_point)
else:
spliced_input_ids.extend(seq_input_ids)
spliced_labels.extend(seq_labels)
# For residual spliced data point at the end of the data set
if self.infinite is False and more_data_points is False and len(spliced_input_ids) > 0:
examples.append({self.input_ids_field: spliced_input_ids, self.labels_field: spliced_labels})
if self.shuffle:
random.shuffle(examples)
for spliced_data_point in examples:
# TODO(2023-09-18): check errors for each spliced tokenized data point.
self.current_size += 1
yield spliced_data_point
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/Colossal-LLaMA/inference/stream_chat_example.py | applications/Colossal-LLaMA/inference/stream_chat_example.py | import argparse
from colossal_llama.utils.stream_chat_patch import streaming_chat
from transformers import AutoModelForCausalLM, AutoTokenizer
SYSTEM = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions."
def main(args):
model = AutoModelForCausalLM.from_pretrained(args.model_path).cuda().eval()
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_path)
past_key_values, history = None, []
roles = ["", "Human", "Assistant"]
history = []
history.append({"role": roles[0], "message": SYSTEM})
while True:
input_query = input(f"\n{roles[1]}: ")
if input_query.strip() == "exit":
break
if input_query.strip() == "clear":
past_key_values, history = None, []
continue
print(f"\n{roles[2]}: ", end="")
gen_len = 0
for response, history, past_key_values in streaming_chat(
model,
tokenizer,
input_query,
history=history,
roles=roles,
temperature=args.temperature,
top_p=args.top_p,
top_k=args.top_k,
do_sample=args.do_sample,
length_penalty=args.length_penalty,
max_new_tokens=args.max_new_tokens,
past_key_values=past_key_values,
return_past_key_values=True,
):
output = response[gen_len:]
print(output, end="", flush=True)
gen_len = len(response)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_path", type=str, default=None, help="path to chat version model")
parser.add_argument("--tokenizer_path", type=str, default=None, help="path to chat version tokenizer")
parser.add_argument("--temperature", type=float, default=0.8, help="set temperature")
parser.add_argument("--top_p", type=float, default=0.95, help="set top p value")
parser.add_argument("--top_k", type=int, default=50, help="set top k value")
parser.add_argument("--do_sample", type=bool, default=True, help="whether turn on do_sample or not")
parser.add_argument("--length_penalty", type=float, default=1.2, help="set length penalty")
parser.add_argument("--max_new_tokens", type=int, default=512, help="set max new tokens")
args = parser.parse_args()
main(args)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/Colossal-LLaMA/inference/inference_example.py | applications/Colossal-LLaMA/inference/inference_example.py | import argparse
import torch
from colossal_llama.dataset.conversation import default_conversation
from transformers import AutoModelForCausalLM, AutoTokenizer
from colossalai.logging import get_dist_logger
logger = get_dist_logger()
def load_model(model_path, device="cuda", **kwargs):
logger.info("Please check whether the tokenizer and model weights are properly stored in the same folder.")
model = AutoModelForCausalLM.from_pretrained(model_path, **kwargs)
model.to(device)
try:
tokenizer = AutoTokenizer.from_pretrained(model_path, padding_side="left")
except OSError:
raise ImportError("Tokenizer not found. Please check if the tokenizer exists or the model path is correct.")
return model, tokenizer
@torch.inference_mode()
def generate(args):
model, tokenizer = load_model(model_path=args.model_path, device=args.device)
if args.prompt_style == "sft":
conversation = default_conversation.copy()
conversation.append_message("Human", args.input_txt)
conversation.append_message("Assistant", None)
input_txt = conversation.get_prompt()
else:
BASE_INFERENCE_SUFFIX = "\n\n->\n\n"
input_txt = f"{args.input_txt}{BASE_INFERENCE_SUFFIX}"
inputs = tokenizer(input_txt, return_tensors="pt").to(args.device)
num_input_tokens = inputs["input_ids"].shape[-1]
output = model.generate(
**inputs,
max_new_tokens=args.max_new_tokens,
do_sample=args.do_sample,
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
num_return_sequences=1,
)
response = tokenizer.decode(output.cpu()[0, num_input_tokens:], skip_special_tokens=True)
logger.info(f"\nHuman: {args.input_txt} \n\nAssistant: \n{response}")
return response
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Colossal-LLaMA-2 inference Process.")
parser.add_argument(
"--model_path",
type=str,
default="hpcai-tech/Colossal-LLaMA-2-7b-base",
help="HF repo name or local path of the model",
)
parser.add_argument("--device", type=str, default="cuda:0", help="Set the device")
parser.add_argument(
"--max_new_tokens",
type=int,
default=512,
help=" Set maximum numbers of tokens to generate, ignoring the number of tokens in the prompt",
)
parser.add_argument("--do_sample", type=bool, default=True, help="Set whether or not to use sampling")
parser.add_argument("--temperature", type=float, default=0.3, help="Set temperature value")
parser.add_argument("--top_k", type=int, default=50, help="Set top_k value for top-k-filtering")
parser.add_argument("--top_p", type=float, default=0.95, help="Set top_p value for generation")
parser.add_argument("--input_txt", type=str, default="明月松间照,", help="The prompt input to the model")
parser.add_argument("--prompt_style", choices=["sft", "pretrained"], default="sft", help="The style of the prompt")
args = parser.parse_args()
generate(args)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalChat/setup.py | applications/ColossalChat/setup.py | from setuptools import find_packages, setup
def fetch_requirements(path):
with open(path, "r") as fd:
return [r.strip() for r in fd.readlines()]
def fetch_readme():
with open("README.md", encoding="utf-8") as f:
return f.read()
def fetch_version():
with open("version.txt", "r") as f:
return f.read().strip()
setup(
name="coati",
version=fetch_version(),
packages=find_packages(
exclude=(
"tests",
"benchmarks",
"*.egg-info",
)
),
description="Colossal-AI Talking Intelligence",
long_description=fetch_readme(),
long_description_content_type="text/markdown",
license="Apache Software License 2.0",
url="https://github.com/hpcaitech/Coati",
install_requires=fetch_requirements("requirements.txt"),
python_requires=">=3.7",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Environment :: GPU :: NVIDIA CUDA",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: System :: Distributed Computing",
],
)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalChat/start_code_verifier.py | applications/ColossalChat/start_code_verifier.py | from typing import List, Optional
from coati.distributed.reward.code_reward.utils import check_correctness # Assuming utils.py is in the same directory
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
app = FastAPI()
class CheckCorrectnessRequest(BaseModel):
in_outs: Optional[dict]
generation: str
timeout: int = 10
debug: bool = True
eval_mode: bool = False
class CheckCorrectnessResponse(BaseModel):
result: List[int]
metadata: List[dict]
@app.post("/check_correctness", response_model=CheckCorrectnessResponse)
def check_correctness_api(request: CheckCorrectnessRequest):
try:
result, metadata = check_correctness(
in_outs=request.in_outs,
generation=request.generation,
timeout=request.timeout,
debug=request.debug,
eval_mode=request.eval_mode,
)
return CheckCorrectnessResponse(result=result, metadata=metadata)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalChat/rl_example.py | applications/ColossalChat/rl_example.py | import argparse
import json
import os
import ray
import torch
from coati.distributed.launch import launch_distributed
DEFAUT_SYSTEM_PROMPT = {
"think_answer_tags": "You are a helpful assistant. The assistant first thinks about the reasoning process in the mind and then provides the user with the answer. The reasoning process and answer are enclosed within <think> </think> and<answer> </answer> tags, respectively, i.e., <think> reasoning process here </think><answer> answer here </answer>. Now the user asks you to solve a math problem that involves reasoning. After thinking, when you finally reach a conclusion, clearly output the final answer without explanation within the <answer> </answer> tags, i.e., <answer> 123 </answer>.\n\n",
"boxed": "Please reason step by step, and put your final answer within \\boxed{}.",
"code": "You are a helpful assistant.",
}
# bypass the proxy for local addresses
os.environ["no_proxy"] = "127.0.0.1,localhost"
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", type=str, default="Qwen/Qwen2.5-7B")
parser.add_argument(
"-cp",
"--checkpoint-path",
type=str,
default=None,
help="Path to the checkpoint to load the model from. If not provided, the model will be loaded from the model path.",
)
parser.add_argument("-d", "--dataset", type=str, default="data.jsonl")
parser.add_argument(
"-ed",
"--eval-dataset",
type=str,
default=None,
help="Evaluation dataset for each task, please use json format to specify the dataset for each task. \
For example: {'task1':'data_eval_task1.jsonl', 'task2':'data_eval_task2.jsonl'}, the jsonl file should be in the same format as the training dataset. \
The key is the task name, and the value is the path to the jsonl file",
)
parser.add_argument("-p", "--project", type=str, default="GRPO", help="Project name.")
parser.add_argument("-e", "--num-episodes", type=int, default=1, help="Number of episodes to train.")
# Distributed training parameters
parser.add_argument("-t", "--num-trainers", type=int, default=2)
parser.add_argument("-i", "--num-inferencer", type=int, default=2)
parser.add_argument("-g", "--num-generations", type=int, default=8, help="Number of generations per prompt.")
parser.add_argument(
"-ibs",
"--inference-batch-size",
type=int,
default=64,
help="Number of prompts to generate per inference step. It should be divisible by tbs, and the weights on the inference backend will be synced every ibs/tbs training steps of the policy model.",
)
parser.add_argument(
"-imbs",
"--inference-microbatch-size",
type=int,
default=8,
help="Effective batch size for the inference backend to run generation. Please select based on memory constraint.",
)
parser.add_argument(
"-tbs",
"--train-batch-size",
type=int,
default=32,
help="Number of unique prompts to update policy model per step per dp group. Gradient is accumulated across tbs * dp_size unique prompts, equivalently tbs * g * dp_size samples",
)
parser.add_argument(
"-tMbs",
"--train-minibatch-size",
type=int,
default=8,
help="Number of unique prompts in each training batch per dp group. The inference backend must generate tMbs * g * dp_size samples before forwarding. Satisfy tMbs * g >= tmbs",
)
parser.add_argument(
"-tmbs",
"--train-microbatch-size",
type=int,
default=2,
help="Effective batch size per dp group for forwarding and backwarding. Please select based on the availiable memory.",
)
parser.add_argument(
"-tp",
"--tensor-parallel-size",
type=int,
default=1,
help="Tensor parallel size for the trainer (consumer). Please check the generation arguments documentation for your backend.",
)
parser.add_argument(
"-pp",
"--pipeline-parallel-size",
type=int,
default=1,
help="Pipeline parallel size for the trainer (consumer). Please check the generation arguments documentation for your backend.",
)
parser.add_argument(
"-zero",
"--zero-stage",
type=int,
default=0,
help="Zero stage for the trainer (consumer). Please check the generation arguments documentation for your backend.",
)
parser.add_argument(
"--ray_dir", type=str, default=None, help="Custom temperary directory for storing ray cluster data, Optional"
)
parser.add_argument(
"--master_address", type=str, default=None, help="Master address for multi-node distributed training, Optional"
)
parser.add_argument(
"--master_port", type=int, default=29506, help="Master port for multi-node distributed training, Optional"
)
# Sampling parameters
parser.add_argument("-b", "--backend", type=str, default="transformers", choices=["transformers", "vllm"])
parser.add_argument("-temp", "--temperature", type=float, default=1.0, help="Temperature for sampling.")
parser.add_argument(
"-topk",
"--top-k",
type=int,
default=None,
help="Top k for sampling. Please check the generation arguments documentation for your backend.",
)
parser.add_argument(
"-topp",
"--top-p",
type=float,
default=1.0,
help="Top p for sampling. Please check the generation arguments documentation for your backend.",
)
parser.add_argument("-s", "--system-prompt", type=str, default=None, help="System prompt for data construction.")
parser.add_argument("-mnt", "--max-new-tokens", type=int, default=1024 * 4 - 512, help="Max length for generation.")
parser.add_argument("-mpt", "--max-prompt-tokens", type=int, default=512, help="Max length for prompt.")
parser.add_argument(
"-ptp",
"--producer-tensor-parallel-size",
type=int,
default=1,
help="Tensor parallel size for the producer. Please check the generation arguments documentation for your backend.",
)
# GRPO parameters
parser.add_argument("-a", "--algo", type=str, default="GRPO", choices=["DAPO", "GRPO", "REINFORCE_PPB", "RLOO"])
parser.add_argument("-lr", "--learning-rate", type=float, default=1e-6, help="Learning rate for GRPO.")
parser.add_argument("-kl", "--kl-coeff", type=float, default=0.01, help="KL penalty coefficient for GRPO.")
parser.add_argument(
"-rt",
"--reward-type",
type=str,
default="think_answer_tags",
choices=["think_answer_tags", "boxed", "code"],
help="Reward type for GRPO.",
)
parser.add_argument(
"-cv",
"--code-verifier-api-url",
type=str,
default=None,
help="API URL for code verifier. If not provided, the code verifier will be disabled.",
)
parser.add_argument(
"-ei",
"--eval-interval",
type=int,
default=-1,
help="Interval for evaluation. Evaluate every ei training steps.",
)
parser.add_argument(
"-nb",
"--n-behind",
type=int,
default=0,
help="Number of producer batches to rollout to fill the data buffer before trainer starts to decrease bubble time",
)
# Logging/Checkpointing parameters
parser.add_argument("-si", "--save-interval", type=int, default=100, help="Interval for saving checkpoints.")
parser.add_argument("-sd", "--save-dir", type=str, default="./model", help="Directory for saving checkpoints.")
parser.add_argument(
"-esd", "--eval-save-dir", type=str, default="./eval", help="Directory for saving evaluation results."
)
parser.add_argument(
"-rsd", "--rollout-save-dir", type=str, default="./rollouts", help="Directory for saving rollout loggings."
)
parser.add_argument(
"--enable_profiling", action="store_true", default=False, help="Enable profiling for the training process."
)
args = parser.parse_args()
if args.train_minibatch_size is None:
# Default settings: Using train batch size as mini batch size
args.train_minibatch_size = args.train_batch_size
if args.inference_batch_size is None:
# Default settings: Using train batch size as inference batch size, sync every inference model every train step
args.inference_batch_size = args.train_batch_size
assert (
args.train_minibatch_size * args.num_generations >= args.train_microbatch_size
and args.train_microbatch_size > 0
), "Train micro batch size must be greater than 0 less than train mini batch size * num generations"
assert (
args.train_minibatch_size <= args.train_batch_size and args.train_batch_size % args.train_minibatch_size == 0
), "Train mini batch size must be less than or equals to train batch size and train batch size must be divisible by train mini batch size"
if args.master_address is None:
# Default settings: Using single machine
ray.init(
address="local",
namespace="ray-example",
runtime_env={
"env_vars": {
# "RAY_DEBUG_POST_MORTEM": "1", # enable post-mortem debugging with ray
"TOKENIZERS_PARALLELISM": "false"
},
},
)
else:
# For ray distributed multi-machine training, Please change _node_ip_address to your IP address of your master node
ray.init(
_node_ip_address=args.master_address,
namespace="ray-example",
_temp_dir=args.ray_dir,
runtime_env={
"env_vars": {
# "RAY_DEBUG_POST_MORTEM": "1", # enable post-mortem debugging with ray
"TOKENIZERS_PARALLELISM": "false"
},
},
)
if args.top_k is None:
if args.backend == "transformers":
args.top_k = 50
elif args.backend == "vllm":
args.top_k = -1
os.environ["TOKENIZERS_PARALLELISM"] = "false" # Disable tokenizers parallelism to avoid deadlock
inference_model_config = dict(path=args.model, checkpoint_path=args.checkpoint_path)
train_model_config = dict(
path=args.model, use_flash_attention_2=True, use_cache=False, checkpoint_path=args.checkpoint_path
)
generate_config = dict(top_k=args.top_k, top_p=args.top_p, temperature=args.temperature)
if args.backend == "transformers":
inference_model_config.update(
dict(
use_flash_attention_2=True,
torch_dtype=torch.bfloat16,
)
)
generate_config.update(
dict(
max_length=args.max_new_tokens + args.max_prompt_tokens,
do_sample=True,
max_new_tokens=None,
early_stopping=False if args.reward_type == "think_answer_tags" else True,
stop_strings=["</answer>"] if args.reward_type == "think_answer_tags" else None,
)
)
eval_generation_config = {"temperature": 0.6} # used to update generation config for evaluation
elif args.backend == "vllm":
inference_model_config.update(
dict(
gpu_memory_utilization=0.7,
enforce_eager=True,
enable_chunked_prefill=True,
max_model_len=args.max_new_tokens + args.max_prompt_tokens,
tensor_parallel_size=args.producer_tensor_parallel_size,
)
)
if args.enable_profiling:
# If profiling is enabled, we force model to generate to max_new_tokens
generate_config.update(
dict(
max_tokens=args.max_new_tokens, # max new tokens
ignore_eos=True,
include_stop_str_in_output=True,
stop=None,
)
)
else:
generate_config.update(
dict(
max_tokens=args.max_new_tokens, # max new tokens
ignore_eos=True if args.reward_type == "think_answer_tags" else False,
include_stop_str_in_output=True,
stop=["</answer>"] if args.reward_type == "think_answer_tags" else None,
)
)
eval_generation_config = {"temperature": 0.6} # used to update generation config for evaluation
else:
raise ValueError(f"Unsupported backend: {args.backend}")
if args.algo == "GRPO":
# Default Settings
grpo_config = {
"algo": "GRPO",
"lr": args.learning_rate,
"train_microbatch_size": args.train_microbatch_size,
"beta": args.kl_coeff, # KL penalty coefficient
"loss_variation": "sample_level",
"reward_fn_type": args.reward_type,
"max_length": args.max_new_tokens + args.max_prompt_tokens,
"max_new_tokens": args.max_new_tokens,
"response_format_tags": (
{
"think_start": {"text": "<think>", "num_occur": 1},
"think_end": {"text": "</think>", "num_occur": 1},
"answer_start": {"text": "<answer>", "num_occur": 1},
"answer_end": {"text": "</answer>", "num_occur": 1},
}
if args.reward_type == "think_answer_tags"
else None
),
}
elif args.algo == "DAPO":
# DAPO variant settings
grpo_config = {
"algo": "DAPO",
"filter_range": [0.01, 0.99], # only filter out all zero batch and all one batch
"lr": args.learning_rate,
"train_microbatch_size": args.train_microbatch_size,
"dynamic_batching": True,
"clip_eps_low": 0.2,
"clip_eps_high": 0.28,
"skip_threshold": 20.0,
"beta": 0, # no KL penalty for DAPO
"loss_variation": "token_level",
"soft_over_length_punishment": True,
"max_length": args.max_new_tokens + args.max_prompt_tokens,
"max_new_tokens": args.max_new_tokens,
"cache_length": min(1024, int(args.max_new_tokens / 4)),
"filter_truncated_response": True,
"reward_fn_type": args.reward_type,
"response_format_tags": (
{
"think_start": {"text": "<think>", "num_occur": 1},
"think_end": {"text": "</think>", "num_occur": 1},
"answer_start": {"text": "<answer>", "num_occur": 1},
"answer_end": {"text": "</answer>", "num_occur": 1},
}
if args.reward_type == "think_answer_tags"
else None
),
}
elif args.algo == "REINFORCE_PPB":
# Default Settings
grpo_config = {
"algo": "REINFORCE_PPB",
"lr": args.learning_rate,
"train_microbatch_size": args.train_microbatch_size,
"beta": args.kl_coeff, # KL penalty coefficient
"loss_variation": "sample_level",
"reward_fn_type": args.reward_type,
"max_length": args.max_new_tokens + args.max_prompt_tokens,
"max_new_tokens": args.max_new_tokens,
"response_format_tags": (
{
"think_start": {"text": "<think>", "num_occur": 1},
"think_end": {"text": "</think>", "num_occur": 1},
"answer_start": {"text": "<answer>", "num_occur": 1},
"answer_end": {"text": "</answer>", "num_occur": 1},
}
if args.reward_type == "think_answer_tags"
else None
),
}
elif args.algo == "RLOO":
# Default Settings
grpo_config = {
"algo": "RLOO",
"lr": args.learning_rate,
"train_microbatch_size": args.train_microbatch_size,
"beta": args.kl_coeff, # KL penalty coefficient
"loss_variation": "sample_level",
"reward_fn_type": args.reward_type,
"max_length": args.max_new_tokens + args.max_prompt_tokens,
"max_new_tokens": args.max_new_tokens,
"response_format_tags": (
{
"think_start": {"text": "<think>", "num_occur": 1},
"think_end": {"text": "</think>", "num_occur": 1},
"answer_start": {"text": "<answer>", "num_occur": 1},
"answer_end": {"text": "</answer>", "num_occur": 1},
}
if args.reward_type == "think_answer_tags"
else None
),
}
else:
raise ValueError(f"Unsupported algorithm: {args.algo}")
if args.reward_type == "code":
assert args.code_verifier_api_url is not None, "Please provide a code verifier API URL for code reward type."
grpo_config.update({"code_verifier_api_url": args.code_verifier_api_url})
if args.system_prompt is None:
# Default system prompt
args.system_prompt = DEFAUT_SYSTEM_PROMPT[args.reward_type]
launch_distributed(
num_producers=args.num_inferencer,
num_proc_per_producer=inference_model_config.get("tensor_parallel_size", args.producer_tensor_parallel_size),
num_consumer_procs=args.num_trainers,
num_episodes=args.num_episodes,
inference_batch_size=args.inference_batch_size,
inference_microbatch_size=args.inference_microbatch_size,
train_batch_size=args.train_batch_size,
train_minibatch_size=args.train_minibatch_size,
train_dataset_config={
"path": args.dataset,
"max_length": args.max_prompt_tokens,
"system_prompt": args.system_prompt,
},
inference_model_config=inference_model_config,
generate_config=generate_config,
num_generations=args.num_generations,
train_model_config=train_model_config,
grpo_config=grpo_config,
plugin_config={
"tp_size": args.tensor_parallel_size,
"pp_size": args.pipeline_parallel_size,
"microbatch_size": max(
1, args.train_microbatch_size // args.pipeline_parallel_size
), # microbatch size should be set to train_microbatch_size // pp_size
"zero_stage": args.zero_stage,
"max_norm": 1.0,
}, # for pp, tp
inference_backend=args.backend,
master_addr="localhost",
master_port=args.master_port,
core_algo=args.algo,
project_name=args.project,
save_interval=args.save_interval,
save_dir=os.path.join(args.save_dir, args.project.replace(" ", "_")),
eval_dataset_config=(
{
k: {"path": v, "max_length": args.max_prompt_tokens, "system_prompt": args.system_prompt}
for k, v in json.loads(args.eval_dataset).items()
}
if args.eval_dataset
else None
),
eval_interval=args.eval_interval,
eval_save_dir=os.path.join(args.eval_save_dir, args.project.replace(" ", "_")),
eval_generation_config=eval_generation_config,
log_rollout_interval=20,
rollout_save_dir=args.rollout_save_dir,
enable_profiling=args.enable_profiling,
n_behind=args.n_behind,
)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalChat/visualization.py | applications/ColossalChat/visualization.py | # Re-import required libraries due to kernel reset
import argparse
from collections import defaultdict
import matplotlib.cm as cm
import matplotlib.pyplot as plt
# Argument parser for command line arguments
parser = argparse.ArgumentParser(description="Process profiling logs and generate a timeline plot.")
parser.add_argument("--visualization", type=str, default="actor_timelines.png", help="Path to the visualization file.")
args = parser.parse_args()
# Raw log lines
log_lines = []
import glob
files = glob.glob("*.prof")
for file in files:
with open(file, "r") as f:
log_lines += f.readlines()
# Parse logs and collect function intervals grouped by actor
actors = defaultdict(lambda: defaultdict(list))
current_entries = {}
# First, collect all timestamps to find the minimum
all_timestamps = []
parsed_lines = []
for line in log_lines:
if line.startswith("[Log]"):
continue
parts = line.split()
timestamp = float(parts[0])
actor = parts[1]
action = parts[3]
func_name = parts[4]
parsed_lines.append((timestamp, actor, action, func_name))
all_timestamps.append(timestamp)
if not all_timestamps:
raise ValueError("No valid log entries found.")
min_timestamp = min(all_timestamps)
for timestamp, actor, action, func_name in parsed_lines:
rel_timestamp = timestamp - min_timestamp
key = (actor, func_name)
if action == "Enter":
current_entries[key] = rel_timestamp
elif action == "Exit":
start_time = current_entries.pop(key, None)
if start_time is not None:
actors[actor][func_name].append((start_time, rel_timestamp))
# Plotting setup
fig, ax = plt.subplots(figsize=(12, 6))
colors = cm.get_cmap("tab10", len(actors))
actor_offsets = {}
base_offset = 0
function_spacing = 0.9
yticks = []
yticklabels = []
for idx, (actor, func_dict) in enumerate(actors.items()):
actor_offsets[actor] = base_offset
color = colors(idx)
for j, (func, intervals) in enumerate(func_dict.items()):
print(actor, func, intervals)
y_val = base_offset + j * function_spacing
yticks.append(y_val)
yticklabels.append(f"{actor}:{func}")
for start, end in intervals:
if end - start < 1:
end = start + 1 # Ensure all lines are at least 3 units long
ax.plot(
[start, end],
[y_val, y_val],
color=color,
linewidth=2,
label=actor if j == 0 else "",
)
base_offset += len(func_dict) * function_spacing + 1
# Formatting
ax.set_yticks(yticks)
ax.set_yticklabels(yticklabels)
ax.set_xlabel("Time")
ax.set_title("Timeline per Actor")
# Remove duplicate labels in legend
handles, labels = ax.get_legend_handles_labels()
unique = dict(zip(labels, handles))
ax.legend(unique.values(), unique.keys())
plt.tight_layout()
plt.grid(True)
plt.savefig(args.visualization, dpi=600) # Increase dpi for higher resolution
print(f"Plot saved as {args.visualization}")
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalChat/rl_example_zero_bubble.py | applications/ColossalChat/rl_example_zero_bubble.py | import argparse
import json
import os
import ray
import torch
from coati.distributed.launch_zero_bubble import launch_distributed
DEFAUT_SYSTEM_PROMPT = {
"think_answer_tags": "You are a helpful assistant. The assistant first thinks about the reasoning process in the mind and then provides the user with the answer. The reasoning process and answer are enclosed within <think> </think> and<answer> </answer> tags, respectively, i.e., <think> reasoning process here </think><answer> answer here </answer>. Now the user asks you to solve a math problem that involves reasoning. After thinking, when you finally reach a conclusion, clearly output the final answer without explanation within the <answer> </answer> tags, i.e., <answer> 123 </answer>.\n\n",
"boxed": "Please reason step by step, and put your final answer within \\boxed{}.",
"code": "You are a helpful assistant.",
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", type=str, default="Qwen/Qwen2.5-7B")
parser.add_argument(
"--tokenizer-path",
type=str,
default=None,
help="Path to the tokenizer. If not provided, will use the model path.",
)
parser.add_argument("-d", "--dataset", type=str, default="data.jsonl")
parser.add_argument(
"-ed",
"--eval-dataset",
type=str,
default=None,
help="Evaluation dataset for each task, please use json format to specify the dataset for each task. \
For example: {'task1':'data_eval_task1.jsonl', 'task2':'data_eval_task2.jsonl'}, the jsonl file should be in the same format as the training dataset. \
The key is the task name, and the value is the path to the jsonl file",
)
parser.add_argument("-p", "--project", type=str, default="GRPO", help="Project name.")
parser.add_argument("-e", "--num-episodes", type=int, default=1, help="Number of episodes to train.")
# Distributed training parameters
parser.add_argument("-t", "--num-trainers", type=int, default=2)
parser.add_argument("-i", "--num-inferencer", type=int, default=2)
parser.add_argument("-g", "--num-generations", type=int, default=8, help="Number of generations per prompt.")
parser.add_argument(
"-ibs",
"--inference-batch-size",
type=int,
default=64,
help="Number of prompts to generate per inference step. It should be divisible by tbs, and the weights on the inference backend will be synced every ibs/tbs training steps of the policy model.",
)
parser.add_argument(
"-imbs",
"--inference-microbatch-size",
type=int,
default=8,
help="Effective batch size for the inference backend to run generation. Please select based on memory constraint.",
)
parser.add_argument(
"-tbs",
"--train-batch-size",
type=int,
default=32,
help="Number of unique prompts to update policy model per step per dp group. Gradient is accumulated across tbs * dp_size unique prompts, equivalently tbs * g * dp_size samples",
)
parser.add_argument(
"-tMbs",
"--train-minibatch-size",
type=int,
default=8,
help="Number of unique prompts in each training batch per dp group. The inference backend must generate tMbs * g * dp_size samples before forwarding. Satisfy tMbs * g >= tmbs",
)
parser.add_argument(
"-tmbs",
"--train-microbatch-size",
type=int,
default=2,
help="Effective batch size per dp group for forwarding and backwarding. Please select based on the availiable memory.",
)
parser.add_argument(
"-tp",
"--tensor-parallel-size",
type=int,
default=1,
help="Tensor parallel size for the trainer (consumer). Please check the generation arguments documentation for your backend.",
)
parser.add_argument(
"-pp",
"--pipeline-parallel-size",
type=int,
default=1,
help="Pipeline parallel size for the trainer (consumer). Please check the generation arguments documentation for your backend.",
)
parser.add_argument(
"-zero",
"--zero-stage",
type=int,
default=0,
help="Zero stage for the trainer (consumer). Please check the generation arguments documentation for your backend.",
)
parser.add_argument(
"--ray_dir", type=str, default=None, help="Custom temperary directory for storing ray cluster data, Optional"
)
parser.add_argument(
"--master_address", type=str, default=None, help="Master address for multi-node distributed training, Optional"
)
parser.add_argument(
"--master_port", type=int, default=29506, help="Master port for multi-node distributed training, Optional"
)
# Sampling parameters
parser.add_argument("-b", "--backend", type=str, default="transformers", choices=["transformers", "vllm"])
parser.add_argument("-temp", "--temperature", type=float, default=1.0, help="Temperature for sampling.")
parser.add_argument(
"-topk",
"--top-k",
type=int,
default=None,
help="Top k for sampling. Please check the generation arguments documentation for your backend.",
)
parser.add_argument(
"-topp",
"--top-p",
type=float,
default=1.0,
help="Top p for sampling. Please check the generation arguments documentation for your backend.",
)
parser.add_argument("-s", "--system-prompt", type=str, default=None, help="System prompt for data construction.")
parser.add_argument("-mnt", "--max-new-tokens", type=int, default=1024 * 4 - 512, help="Max length for generation.")
parser.add_argument("-mpt", "--max-prompt-tokens", type=int, default=512, help="Max length for prompt.")
parser.add_argument(
"-ptp",
"--producer-tensor-parallel-size",
type=int,
default=1,
help="Tensor parallel size for the producer. Please check the generation arguments documentation for your backend.",
)
# GRPO parameters
parser.add_argument("-a", "--algo", type=str, default="GRPO", choices=["DAPO", "GRPO"])
parser.add_argument("-lr", "--learning-rate", type=float, default=1e-6, help="Learning rate for GRPO.")
parser.add_argument("-kl", "--kl-coeff", type=float, default=0.01, help="KL penalty coefficient for GRPO.")
parser.add_argument(
"-rt",
"--reward-type",
type=str,
default="think_answer_tags",
choices=["think_answer_tags", "boxed", "code"],
help="Reward type for GRPO.",
)
parser.add_argument(
"-ei",
"--eval-interval",
type=int,
default=100,
help="Interval for evaluation. Evaluate every ei training steps.",
)
parser.add_argument(
"-cbsl",
"--data_actor_buffer_size_limit",
type=int,
default=-1,
help="The approximate number of samples to keep in the consumer buffer. After this limit is reached, the producer will stop generating new samples and prioritize model sync until the consumer has processed some samples",
)
# Logging/Checkpointing parameters
parser.add_argument("-si", "--save-interval", type=int, default=100, help="Interval for saving checkpoints.")
parser.add_argument("-sd", "--save-dir", type=str, default="./model", help="Directory for saving checkpoints.")
parser.add_argument(
"-esd", "--eval-save-dir", type=str, default="./eval", help="Directory for saving evaluation results."
)
parser.add_argument(
"-rsd", "--rollout-save-dir", type=str, default="./rollouts", help="Directory for saving rollout loggings."
)
parser.add_argument(
"--enable_profiling", action="store_true", default=False, help="Enable profiling for the training process."
)
args = parser.parse_args()
print(args)
if args.train_minibatch_size is None:
# Default settings: Using train batch size as mini batch size
args.train_minibatch_size = args.train_batch_size
if args.inference_batch_size is None:
# Default settings: Using train batch size as inference batch size, sync every inference model every train step
args.inference_batch_size = args.train_batch_size
assert (
args.train_minibatch_size * args.num_generations >= args.train_microbatch_size
and args.train_microbatch_size > 0
), "Train micro batch size must be greater than 0 less than train mini batch size * num generations"
assert (
args.train_minibatch_size <= args.train_batch_size and args.train_batch_size % args.train_minibatch_size == 0
), "Train mini batch size must be less than or equals to train batch size and train batch size must be divisible by train mini batch size"
if args.master_address is None:
# Default settings: Using single machine
ray.init(
address="local",
namespace="ray-example",
runtime_env={
"env_vars": {
# "RAY_DEBUG_POST_MORTEM": "1" # enable post-mortem debugging with ray
"TOKENIZERS_PARALLELISM": "false"
},
},
)
else:
# For ray distributed multi-machine training, Please change _node_ip_address to your IP address of your master node
ray.init(
_node_ip_address=args.master_address,
namespace="ray-example",
_temp_dir=args.ray_dir,
runtime_env={
"env_vars": {
# "RAY_DEBUG_POST_MORTEM": "1" # enable post-mortem debugging with ray
"TOKENIZERS_PARALLELISM": "false"
},
},
)
if args.top_k is None:
if args.backend == "transformers":
args.top_k = 50
elif args.backend == "vllm":
args.top_k = -1
os.environ["TOKENIZERS_PARALLELISM"] = "false" # Disable tokenizers parallelism to avoid deadlock
inference_model_config = dict(path=args.model)
train_model_config = dict(path=args.model, use_flash_attention_2=True, use_cache=False)
generate_config = dict(top_k=args.top_k, top_p=args.top_p, temperature=args.temperature)
if args.backend == "transformers":
inference_model_config.update(
dict(
use_flash_attention_2=True,
torch_dtype=torch.bfloat16,
)
)
generate_config.update(
dict(
max_length=args.max_new_tokens + args.max_prompt_tokens,
do_sample=True,
max_new_tokens=None,
early_stopping=False if args.reward_type == "think_answer_tags" else True,
stop_strings=["</answer>"] if args.reward_type == "think_answer_tags" else None,
)
)
eval_generation_config = {"temperature": 0.6} # used to update generation config for evaluation
elif args.backend == "vllm":
inference_model_config.update(
dict(
gpu_memory_utilization=0.7,
enforce_eager=True,
enable_chunked_prefill=True,
max_model_len=args.max_new_tokens + args.max_prompt_tokens,
tensor_parallel_size=args.producer_tensor_parallel_size,
)
)
generate_config.update(
dict(
max_tokens=args.max_new_tokens, # max new tokens
ignore_eos=True if args.reward_type == "think_answer_tags" else False,
include_stop_str_in_output=True,
stop=["</answer>"] if args.reward_type == "think_answer_tags" else None,
)
)
eval_generation_config = {"temperature": 0.6} # used to update generation config for evaluation
else:
raise ValueError(f"Unsupported backend: {args.backend}")
if args.algo == "GRPO":
# Default Settings
grpo_config = {
"lr": args.learning_rate,
"train_microbatch_size": args.train_microbatch_size,
"num_minibatch_during_rollout": 1, # number of mini batches to pop out from buffer and used for training during rollout of the producer after it syncs the model. Hint, set to a proper value close to the number of mini batches for training that takes roughly the same time as the rollout of the producer. A value that is too large or too small will cause bubble time on the trainer or the producer.
"beta": args.kl_coeff, # KL penalty coefficient
"loss_variation": "sample_level",
"reward_fn_type": args.reward_type,
"max_length": args.max_new_tokens + args.max_prompt_tokens,
"max_new_tokens": args.max_new_tokens,
"response_format_tags": (
{
"think_start": {"text": "<think>", "num_occur": 1},
"think_end": {"text": "</think>", "num_occur": 1},
"answer_start": {"text": "<answer>", "num_occur": 1},
"answer_end": {"text": "</answer>", "num_occur": 1},
}
if args.reward_type == "think_answer_tags"
else None
),
}
elif args.algo == "DAPO":
# DAPO variant settings
grpo_config = {
"filter_range": [0.01, 0.7], # only filter out all zero batch and all one batch
"lr": args.learning_rate,
"train_microbatch_size": args.train_microbatch_size,
"dynamic_batching": True,
"clip_eps_low": 0.2,
"clip_eps_high": 0.28,
"skip_threshold": 20.0,
"beta": 0, # no KL penalty for DAPO
"loss_variation": "token_level",
"soft_over_length_punishment": True,
"max_length": args.max_new_tokens + args.max_prompt_tokens,
"max_new_tokens": args.max_new_tokens,
"cache_length": min(1024, int(args.max_new_tokens / 4)),
"filter_truncated_response": True,
"reward_fn_type": args.reward_type,
"response_format_tags": (
{
"think_start": {"text": "<think>", "num_occur": 1},
"think_end": {"text": "</think>", "num_occur": 1},
"answer_start": {"text": "<answer>", "num_occur": 1},
"answer_end": {"text": "</answer>", "num_occur": 1},
}
if args.reward_type == "think_answer_tags"
else None
),
}
else:
raise ValueError(f"Unsupported algorithm: {args.algo}")
if args.system_prompt is None:
# Default system prompt
args.system_prompt = DEFAUT_SYSTEM_PROMPT[args.reward_type]
launch_distributed(
num_producers=args.num_inferencer,
num_proc_per_producer=inference_model_config.get("tensor_parallel_size", args.producer_tensor_parallel_size),
num_consumer_procs=args.num_trainers,
num_episodes=args.num_episodes,
inference_batch_size=args.inference_batch_size,
inference_microbatch_size=args.inference_microbatch_size,
train_batch_size=args.train_batch_size,
train_minibatch_size=args.train_minibatch_size,
train_dataset_config={
"path": args.dataset,
"max_length": args.max_prompt_tokens,
"system_prompt": args.system_prompt,
},
inference_model_config=inference_model_config,
generate_config=generate_config,
num_generations=args.num_generations,
train_model_config=train_model_config,
grpo_config=grpo_config,
plugin_config={
"tp_size": args.tensor_parallel_size,
"pp_size": args.pipeline_parallel_size,
"microbatch_size": max(
1, args.train_microbatch_size // args.pipeline_parallel_size
), # microbatch size should be set to train_microbatch_size // pp_size
"zero_stage": args.zero_stage,
"max_norm": 1.0,
# "num_layers_per_stage": [18, 10], # Example for 28 layers model with pp_size=2, set manually according to your model architecture
}, # for pp, tp
tokenizer_config={"path": args.tokenizer_path} if args.tokenizer_path else {"path": args.model},
inference_backend=args.backend,
master_addr="localhost",
master_port=args.master_port,
core_algo=args.algo,
project_name=args.project,
save_interval=args.save_interval,
save_dir=os.path.join(args.save_dir, args.project.replace(" ", "_")),
eval_dataset_config=(
{
k: {"path": v, "max_length": args.max_prompt_tokens, "system_prompt": args.system_prompt}
for k, v in json.loads(args.eval_dataset).items()
}
if args.eval_dataset
else None
),
eval_interval=args.eval_interval,
eval_save_dir=os.path.join(args.eval_save_dir, args.project.replace(" ", "_")),
eval_generation_config=eval_generation_config,
log_rollout_interval=20,
rollout_save_dir=args.rollout_save_dir,
enable_profiling=args.enable_profiling,
data_actor_buffer_size_limit=args.data_actor_buffer_size_limit,
)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalChat/coati/__init__.py | applications/ColossalChat/coati/__init__.py | python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false | |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalChat/coati/quant/utils.py | applications/ColossalChat/coati/quant/utils.py | from contextlib import contextmanager
import torch
def _noop(*args, **kwargs):
pass
@contextmanager
def low_resource_init():
"""This context manager disables weight initialization and sets the default float dtype to half."""
old_kaiming_uniform_ = torch.nn.init.kaiming_uniform_
old_uniform_ = torch.nn.init.uniform_
old_normal_ = torch.nn.init.normal_
dtype = torch.get_default_dtype()
try:
torch.nn.init.kaiming_uniform_ = _noop
torch.nn.init.uniform_ = _noop
torch.nn.init.normal_ = _noop
torch.set_default_dtype(torch.half)
yield
finally:
torch.nn.init.kaiming_uniform_ = old_kaiming_uniform_
torch.nn.init.uniform_ = old_uniform_
torch.nn.init.normal_ = old_normal_
torch.set_default_dtype(dtype)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalChat/coati/quant/__init__.py | applications/ColossalChat/coati/quant/__init__.py | from .llama_gptq import load_quant as llama_load_quant
from .utils import low_resource_init
__all__ = [
"llama_load_quant",
"low_resource_init",
]
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalChat/coati/quant/llama_gptq/quant.py | applications/ColossalChat/coati/quant/llama_gptq/quant.py | # copied from https://github.com/qwopqwop200/GPTQ-for-LLaMa/blob/past/quant.py
import math
import numpy as np
import torch
import torch.nn as nn
def quantize(x, scale, zero, maxq):
q = torch.clamp(torch.round(x / scale) + zero, 0, maxq)
return scale * (q - zero)
class Quantizer(nn.Module):
def __init__(self, shape=1):
super(Quantizer, self).__init__()
self.register_buffer("maxq", torch.tensor(0))
self.register_buffer("scale", torch.zeros(shape))
self.register_buffer("zero", torch.zeros(shape))
def configure(self, bits, perchannel=False, sym=True, mse=False, norm=2.4, grid=100, maxshrink=0.8):
self.maxq = torch.tensor(2**bits - 1)
self.perchannel = perchannel
self.sym = sym
self.mse = mse
self.norm = norm
self.grid = grid
self.maxshrink = maxshrink
def find_params(self, x, weight=False):
dev = x.device
self.maxq = self.maxq.to(dev)
shape = x.shape
if self.perchannel:
if weight:
x = x.flatten(1)
else:
if len(shape) == 4:
x = x.permute([1, 0, 2, 3])
x = x.flatten(1)
if len(shape) == 3:
x = x.reshape((-1, shape[-1])).t()
if len(shape) == 2:
x = x.t()
else:
x = x.flatten().unsqueeze(0)
tmp = torch.zeros(x.shape[0], device=dev)
xmin = torch.minimum(x.min(1)[0], tmp)
xmax = torch.maximum(x.max(1)[0], tmp)
if self.sym:
xmax = torch.maximum(torch.abs(xmin), xmax)
tmp = xmin < 0
if torch.any(tmp):
xmin[tmp] = -xmax[tmp]
tmp = (xmin == 0) & (xmax == 0)
xmin[tmp] = -1
xmax[tmp] = +1
self.scale = (xmax - xmin) / self.maxq
if self.sym:
self.zero = torch.full_like(self.scale, (self.maxq + 1) / 2)
else:
self.zero = torch.round(-xmin / self.scale)
if self.mse:
best = torch.full([x.shape[0]], float("inf"), device=dev)
for i in range(int(self.maxshrink * self.grid)):
p = 1 - i / self.grid
xmin1 = p * xmin
xmax1 = p * xmax
scale1 = (xmax1 - xmin1) / self.maxq
zero1 = torch.round(-xmin1 / scale1) if not self.sym else self.zero
q = quantize(x, scale1.unsqueeze(1), zero1.unsqueeze(1), self.maxq)
q -= x
q.abs_()
q.pow_(self.norm)
err = torch.sum(q, 1)
tmp = err < best
if torch.any(tmp):
best[tmp] = err[tmp]
self.scale[tmp] = scale1[tmp]
self.zero[tmp] = zero1[tmp]
if not self.perchannel:
if weight:
tmp = shape[0]
else:
tmp = shape[1] if len(shape) != 3 else shape[2]
self.scale = self.scale.repeat(tmp)
self.zero = self.zero.repeat(tmp)
if weight:
shape = [-1] + [1] * (len(shape) - 1)
self.scale = self.scale.reshape(shape)
self.zero = self.zero.reshape(shape)
return
if len(shape) == 4:
self.scale = self.scale.reshape((1, -1, 1, 1))
self.zero = self.zero.reshape((1, -1, 1, 1))
if len(shape) == 3:
self.scale = self.scale.reshape((1, 1, -1))
self.zero = self.zero.reshape((1, 1, -1))
if len(shape) == 2:
self.scale = self.scale.unsqueeze(0)
self.zero = self.zero.unsqueeze(0)
def quantize(self, x):
if self.ready():
return quantize(x, self.scale, self.zero, self.maxq)
return x
def enabled(self):
return self.maxq > 0
def ready(self):
return torch.all(self.scale != 0)
try:
import quant_cuda
except:
print("CUDA extension not installed.")
# Assumes layer is perfectly divisible into 256 * 256 blocks
class QuantLinear(nn.Module):
def __init__(self, bits, groupsize, infeatures, outfeatures):
super().__init__()
if bits not in [2, 3, 4, 8]:
raise NotImplementedError("Only 2,3,4,8 bits are supported.")
self.infeatures = infeatures
self.outfeatures = outfeatures
self.bits = bits
if groupsize != -1 and groupsize < 32 and groupsize != int(math.pow(2, int(math.log2(groupsize)))):
raise NotImplementedError("groupsize supports powers of 2 greater than 32. (e.g. : 32,64,128,etc)")
groupsize = groupsize if groupsize != -1 else infeatures
self.groupsize = groupsize
self.register_buffer(
"qzeros", torch.zeros((math.ceil(infeatures / groupsize), outfeatures // 256 * (bits * 8)), dtype=torch.int)
)
self.register_buffer("scales", torch.zeros((math.ceil(infeatures / groupsize), outfeatures)))
self.register_buffer("bias", torch.zeros(outfeatures))
self.register_buffer("qweight", torch.zeros((infeatures // 256 * (bits * 8), outfeatures), dtype=torch.int))
self._initialized_quant_state = False
def pack(self, linear, scales, zeros):
scales = scales.t().contiguous()
zeros = zeros.t().contiguous()
scale_zeros = zeros * scales
self.scales = scales.clone()
if linear.bias is not None:
self.bias = linear.bias.clone()
intweight = []
for idx in range(self.infeatures):
g_idx = idx // self.groupsize
intweight.append(
torch.round((linear.weight.data[:, idx] + scale_zeros[g_idx]) / self.scales[g_idx]).to(torch.int)[
:, None
]
)
intweight = torch.cat(intweight, dim=1)
intweight = intweight.t().contiguous()
intweight = intweight.numpy().astype(np.uint32)
qweight = np.zeros((intweight.shape[0] // 256 * (self.bits * 8), intweight.shape[1]), dtype=np.uint32)
i = 0
row = 0
while row < qweight.shape[0]:
if self.bits in [2, 4, 8]:
for j in range(i, i + (32 // self.bits)):
qweight[row] |= intweight[j] << (self.bits * (j - i))
i += 32 // self.bits
row += 1
elif self.bits == 3:
for j in range(i, i + 10):
qweight[row] |= intweight[j] << (3 * (j - i))
i += 10
qweight[row] |= intweight[i] << 30
row += 1
qweight[row] |= (intweight[i] >> 2) & 1
i += 1
for j in range(i, i + 10):
qweight[row] |= intweight[j] << (3 * (j - i) + 1)
i += 10
qweight[row] |= intweight[i] << 31
row += 1
qweight[row] |= (intweight[i] >> 1) & 0x3
i += 1
for j in range(i, i + 10):
qweight[row] |= intweight[j] << (3 * (j - i) + 2)
i += 10
row += 1
else:
raise NotImplementedError("Only 2,3,4,8 bits are supported.")
qweight = qweight.astype(np.int32)
self.qweight = torch.from_numpy(qweight)
zeros -= 1
zeros = zeros.numpy().astype(np.uint32)
qzeros = np.zeros((zeros.shape[0], zeros.shape[1] // 256 * (self.bits * 8)), dtype=np.uint32)
i = 0
col = 0
while col < qzeros.shape[1]:
if self.bits in [2, 4, 8]:
for j in range(i, i + (32 // self.bits)):
qzeros[:, col] |= zeros[:, j] << (self.bits * (j - i))
i += 32 // self.bits
col += 1
elif self.bits == 3:
for j in range(i, i + 10):
qzeros[:, col] |= zeros[:, j] << (3 * (j - i))
i += 10
qzeros[:, col] |= zeros[:, i] << 30
col += 1
qzeros[:, col] |= (zeros[:, i] >> 2) & 1
i += 1
for j in range(i, i + 10):
qzeros[:, col] |= zeros[:, j] << (3 * (j - i) + 1)
i += 10
qzeros[:, col] |= zeros[:, i] << 31
col += 1
qzeros[:, col] |= (zeros[:, i] >> 1) & 0x3
i += 1
for j in range(i, i + 10):
qzeros[:, col] |= zeros[:, j] << (3 * (j - i) + 2)
i += 10
col += 1
else:
raise NotImplementedError("Only 2,3,4,8 bits are supported.")
qzeros = qzeros.astype(np.int32)
self.qzeros = torch.from_numpy(qzeros)
def forward(self, x):
intermediate_dtype = torch.float32
if not self._initialized_quant_state:
# Do we even have a bias? Check for at least one non-zero element.
if self.bias is not None and bool(torch.any(self.bias != 0)):
# Then make sure it's the right type.
self.bias.data = self.bias.data.to(intermediate_dtype)
else:
self.bias = None
outshape = list(x.shape)
outshape[-1] = self.outfeatures
x = x.reshape(-1, x.shape[-1])
if self.bias is None:
y = torch.zeros(x.shape[0], outshape[-1], dtype=intermediate_dtype, device=x.device)
else:
y = self.bias.clone().repeat(x.shape[0], 1)
output_dtype = x.dtype
x = x.to(intermediate_dtype)
if self.bits == 2:
quant_cuda.vecquant2matmul(x, self.qweight, y, self.scales, self.qzeros, self.groupsize)
elif self.bits == 3:
quant_cuda.vecquant3matmul(x, self.qweight, y, self.scales, self.qzeros, self.groupsize)
elif self.bits == 4:
quant_cuda.vecquant4matmul(x, self.qweight, y, self.scales, self.qzeros, self.groupsize)
elif self.bits == 8:
quant_cuda.vecquant8matmul(x, self.qweight, y, self.scales, self.qzeros, self.groupsize)
else:
raise NotImplementedError("Only 2,3,4,8 bits are supported.")
y = y.to(output_dtype)
return y.reshape(outshape)
def make_quant(module, names, bits, groupsize, name=""):
if isinstance(module, QuantLinear):
return
for attr in dir(module):
tmp = getattr(module, attr)
name1 = name + "." + attr if name != "" else attr
if name1 in names:
setattr(module, attr, QuantLinear(bits, groupsize, tmp.in_features, tmp.out_features))
for name1, child in module.named_children():
make_quant(child, names, bits, groupsize, name + "." + name1 if name != "" else name1)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalChat/coati/quant/llama_gptq/loader.py | applications/ColossalChat/coati/quant/llama_gptq/loader.py | import torch
import torch.nn as nn
from .model_utils import find_layers
from .quant import make_quant
def load_quant(model: nn.Module, checkpoint: str, wbits: int, groupsize: int):
model = model.eval()
layers = find_layers(model)
# ignore lm head
layers = find_layers(model)
for name in ["lm_head"]:
if name in layers:
del layers[name]
make_quant(model, layers, wbits, groupsize)
if checkpoint.endswith(".safetensors"):
from safetensors.torch import load_file as safe_load
model.load_state_dict(safe_load(checkpoint))
else:
model.load_state_dict(torch.load(checkpoint))
return model
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalChat/coati/quant/llama_gptq/model_utils.py | applications/ColossalChat/coati/quant/llama_gptq/model_utils.py | # copied from https://github.com/qwopqwop200/GPTQ-for-LLaMa/blob/past/modelutils.py
import torch.nn as nn
def find_layers(module, layers=[nn.Conv2d, nn.Linear], name=""):
if type(module) in layers:
return {name: module}
res = {}
for name1, child in module.named_children():
res.update(find_layers(child, layers=layers, name=name + "." + name1 if name != "" else name1))
return res
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalChat/coati/quant/llama_gptq/__init__.py | applications/ColossalChat/coati/quant/llama_gptq/__init__.py | from .loader import load_quant
__all__ = [
"load_quant",
]
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalChat/coati/experience_buffer/naive.py | applications/ColossalChat/coati/experience_buffer/naive.py | import random
from typing import List
import torch
from coati.experience_maker.base import Experience
from colossalai.logging import get_dist_logger
from .base import ExperienceBuffer
from .utils import BufferItem, make_experience_batch, split_experience_batch
logger = get_dist_logger()
class NaiveExperienceBuffer(ExperienceBuffer):
"""Naive experience buffer class. It stores experience.
Args:
sample_batch_size (int): Batch size when sampling.
limit (int, optional): Limit of number of experience samples. A number <= 0 means unlimited. Defaults to 0.
cpu_offload (bool, optional): Whether to offload experience to cpu when sampling. Defaults to True.
"""
def __init__(self, sample_batch_size: int, limit: int = 0, cpu_offload: bool = True) -> None:
super().__init__(sample_batch_size, limit)
self.cpu_offload = cpu_offload
self.target_device = torch.device(f"cuda:{torch.cuda.current_device()}")
# TODO(ver217): add prefetch
self.items: List[BufferItem] = []
self.rng_sequence = []
self.ptr = 0
@torch.no_grad()
def append(self, experience: Experience) -> None:
if self.cpu_offload:
experience.to_device(torch.device("cpu"))
items = split_experience_batch(experience)
self.items.extend(items)
if self.limit > 0:
samples_to_remove = len(self.items) - self.limit
if samples_to_remove > 0:
logger.warning(f"Experience buffer is full. Removing {samples_to_remove} samples.")
self.items = self.items[samples_to_remove:]
self.rng_sequence = [i for i in range(len(self.items))]
random.shuffle(self.rng_sequence)
self.ptr = 0
def clear(self) -> None:
self.items.clear()
@torch.no_grad()
def sample(self) -> Experience:
"""
Randomly samples experiences from the buffer.
Returns:
A batch of sampled experiences.
"""
items = []
for _ in range(self.sample_batch_size):
self.ptr = (self.ptr + 1) % len(self.items)
items.append(self.items[self.rng_sequence[self.ptr]])
experience = make_experience_batch(items)
if self.cpu_offload:
experience.to_device(self.target_device)
return experience
def __len__(self) -> int:
return len(self.items)
def __getitem__(self, idx: int) -> BufferItem:
return self.items[idx]
def collate_fn(self, batch) -> Experience:
experience = make_experience_batch(batch)
return experience
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalChat/coati/experience_buffer/utils.py | applications/ColossalChat/coati/experience_buffer/utils.py | from dataclasses import dataclass
from typing import List, Optional
import torch
import torch.nn.functional as F
from coati.experience_maker.base import Experience
@dataclass
class BufferItem:
"""BufferItem is an item of experience data.
Shapes of each tensor:
sequences: (S)
action_log_probs: (A)
values: (1)
reward: (1)
advantages: (1)
attention_mask: (S)
action_mask: (A)
"A" is the number of actions.
"""
sequences: torch.Tensor
action_log_probs: torch.Tensor
values: torch.Tensor
reward: torch.Tensor
kl: torch.Tensor
advantages: torch.Tensor
attention_mask: Optional[torch.LongTensor]
action_mask: Optional[torch.BoolTensor]
def split_experience_batch(experience: Experience) -> List[BufferItem]:
batch_size = experience.sequences.size(0)
batch_kwargs = [{} for _ in range(batch_size)]
keys = ("sequences", "action_log_probs", "values", "reward", "kl", "advantages", "attention_mask", "action_mask")
for key in keys:
value = getattr(experience, key)
if isinstance(value, torch.Tensor):
vals = torch.unbind(value)
else:
# None
vals = [value for _ in range(batch_size)]
assert batch_size == len(vals)
for i, v in enumerate(vals):
batch_kwargs[i][key] = v
items = [BufferItem(**kwargs) for kwargs in batch_kwargs]
return items
def _zero_pad_sequences(sequences: List[torch.Tensor], side: str = "left") -> torch.Tensor:
assert side in ("left", "right")
max_len = max(seq.size(0) for seq in sequences)
padded_sequences = []
for seq in sequences:
pad_len = max_len - seq.size(0)
padding = (pad_len, 0) if side == "left" else (0, pad_len)
padded_sequences.append(F.pad(seq, padding))
return torch.stack(padded_sequences, dim=0)
def make_experience_batch(items: List[BufferItem]) -> Experience:
kwargs = {}
to_pad_keys = set(("action_log_probs", "action_mask"))
keys = ("sequences", "action_log_probs", "values", "reward", "kl", "advantages", "attention_mask", "action_mask")
for key in keys:
vals = [getattr(item, key) for item in items]
if key in to_pad_keys:
batch_data = _zero_pad_sequences(vals)
else:
batch_data = torch.stack(vals, dim=0)
kwargs[key] = batch_data
return Experience(**kwargs)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalChat/coati/experience_buffer/__init__.py | applications/ColossalChat/coati/experience_buffer/__init__.py | from .base import ExperienceBuffer
from .naive import NaiveExperienceBuffer
__all__ = ["ExperienceBuffer", "NaiveExperienceBuffer"]
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalChat/coati/experience_buffer/base.py | applications/ColossalChat/coati/experience_buffer/base.py | from abc import ABC, abstractmethod
from typing import Any
from coati.experience_maker.base import Experience
class ExperienceBuffer(ABC):
"""Experience buffer base class. It stores experience.
Args:
sample_batch_size (int): Batch size when sampling.
limit (int, optional): Limit of number of experience samples. A number <= 0 means unlimited. Defaults to 0.
"""
def __init__(self, sample_batch_size: int, limit: int = 0) -> None:
super().__init__()
self.sample_batch_size = sample_batch_size
# limit <= 0 means unlimited
self.limit = limit
@abstractmethod
def append(self, experience: Experience) -> None:
pass
@abstractmethod
def clear(self) -> None:
pass
@abstractmethod
def sample(self) -> Experience:
pass
@abstractmethod
def __len__(self) -> int:
pass
@abstractmethod
def __getitem__(self, idx: int) -> Any:
pass
@abstractmethod
def collate_fn(self, batch: Any) -> Experience:
pass
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalChat/coati/experience_maker/naive.py | applications/ColossalChat/coati/experience_maker/naive.py | """
experience maker.
"""
from typing import Any
import torch
import torch.nn.functional as F
from coati.dataset.utils import find_first_occurrence_subsequence
from coati.models import Critic, RewardModel
from coati.models.generation import generate
from coati.models.utils import calc_action_log_probs, compute_reward
from transformers import PreTrainedModel, PreTrainedTokenizer
from colossalai.logging import get_dist_logger
from .base import Experience, ExperienceMaker
logger = get_dist_logger()
import torch.distributed as dist
def is_rank_0() -> bool:
return not dist.is_initialized() or dist.get_rank() == 0
class NaiveExperienceMaker(ExperienceMaker):
"""
Naive experience maker.
"""
def __init__(
self,
actor: PreTrainedModel,
critic: Critic,
reward_model: RewardModel,
initial_model: PreTrainedModel,
tokenizer: PreTrainedTokenizer,
kl_coef: float = 0.01,
gamma: float = 1.0,
lam: float = 0.95,
use_grpo: bool = False,
num_generation: int = 8,
inference_batch_size: int = None,
logits_forward_batch_size: int = 2,
) -> None:
super().__init__(actor, critic, reward_model, initial_model)
self.tokenizer = tokenizer
self.kl_coef = kl_coef
self.gamma = gamma
self.lam = lam
self.use_grpo = use_grpo
self.num_generation = num_generation
self.inference_batch_size = inference_batch_size
self.logits_forward_batch_size = logits_forward_batch_size
if not self.use_grpo:
assert self.critic is not None, "Critic model is required for PPO training."
else:
assert self.critic is None, "Critic model is not required for GRPO training."
assert self.num_generation > 1, "Number of generations should be greater than 1 for GRPO training."
@torch.inference_mode()
def calculate_advantage(self, value: torch.Tensor, reward: torch.Tensor, num_actions: int) -> torch.Tensor:
"""
Calculates the advantage values for each action based on the value and reward tensors.
Args:
value (torch.Tensor): Tensor containing the predicted values from critic.
reward (torch.Tensor): reward of the shape [B, len].
num_actions (int): Number of actions.
Returns:
torch.Tensor: Tensor containing the calculated advantages for each action.
"""
lastgaelam = 0
advantages_reversed = []
for t in reversed(range(num_actions)):
nextvalues = value[:, t + 1] if t < num_actions - 1 else 0.0
delta = reward[:, t] + self.gamma * nextvalues - value[:, t]
lastgaelam = delta + self.gamma * self.lam * lastgaelam
advantages_reversed.append(lastgaelam)
advantages = torch.stack(advantages_reversed[::-1], dim=1)
return advantages
@torch.no_grad()
def make_experience(
self, input_ids: torch.Tensor, attention_mask: torch.Tensor, gt_answer: Any = None, **generate_kwargs
) -> Experience:
"""
Generates an experience using the given input_ids and attention_mask.
Args:
input_ids (torch.Tensor): The input tensor containing the tokenized input sequence.
attention_mask (torch.Tensor): The attention mask tensor indicating which tokens to attend to.
**generate_kwargs: Additional keyword arguments for the generation process.
Returns:
Experience: The generated experience object.
"""
self.actor.eval()
if self.critic:
self.critic.eval()
self.initial_model.eval()
self.reward_model.eval()
pad_token_id = self.tokenizer.pad_token_id
stop_token_ids = generate_kwargs.get("stop_token_ids", None)
if isinstance(stop_token_ids, int):
stop_token_ids = [[stop_token_ids]]
elif isinstance(stop_token_ids[0], int):
stop_token_ids = [stop_token_ids]
elif isinstance(stop_token_ids[0], list):
pass
else:
raise ValueError(
f"stop_token_ids should be a list of list of integers, a list of integers or an integers. got {stop_token_ids}"
)
generate_kwargs["stop_token_ids"] = stop_token_ids
# Hack: manually initialize cache_position to address transformer version conflict
if generate_kwargs.get("cache_position", None) is None and generate_kwargs.get("use_cache", False) is True:
generate_kwargs["cache_position"] = torch.arange(
0, input_ids.shape[1], dtype=torch.long, device=input_ids.device
)
torch.manual_seed(41) # for tp, gurantee the same input for reward model
if self.use_grpo and self.num_generation > 1:
# Generate multiple responses for each prompt
input_ids = input_ids.repeat_interleave(self.num_generation, dim=0)
gt_answer_tmp = []
for t in gt_answer:
gt_answer_tmp.extend([t] * self.num_generation)
gt_answer = gt_answer_tmp
if self.inference_batch_size is None:
self.inference_batch_size = input_ids.size(0)
batch_sequences = []
batch_input_ids_rm = []
batch_attention_mask_rm = []
batch_attention_mask = []
batch_r = []
batch_action_log_probs = []
batch_base_action_log_probs = []
batch_action_mask = []
num_actions = 0
for inference_mini_batch_id in range(0, input_ids.size(0), self.inference_batch_size):
s, e = inference_mini_batch_id, inference_mini_batch_id + self.inference_batch_size
if input_ids[s:e].size(0) == 0:
break
sequences = generate(self.actor, input_ids[s:e], self.tokenizer, **generate_kwargs)
# pad to max_len, you don't want to get an OOM error after a thousands of steps
sequences = F.pad(sequences, (0, generate_kwargs["max_length"] - sequences.size(1)), value=pad_token_id)
# Pad to max length
sequence_length = sequences.size(1)
# Calculate auxiliary tensors
attention_mask = None
if pad_token_id is not None:
attention_mask = sequences.not_equal(pad_token_id).to(dtype=torch.long, device=sequences.device)
input_len = input_ids.size(1)
if stop_token_ids is None:
# End the sequence with eos token
eos_token_id = self.tokenizer.eos_token_id
if eos_token_id is None:
action_mask = torch.ones_like(sequences, dtype=torch.bool)
else:
# Left padding may be applied, only mask action
action_mask = (sequences[:, input_len:] == eos_token_id).cumsum(dim=-1) == 0
action_mask = F.pad(action_mask, (1 + input_len, -1), value=True) # include eos token and input
else:
# stop_token_ids are given, generation ends with stop_token_ids
action_mask = torch.ones_like(sequences, dtype=torch.bool)
for i in range(sequences.size(0)):
stop_token_pos = [
find_first_occurrence_subsequence(
sequences[i][input_len:], torch.tensor(stop_token_id).to(sequences.device)
)
for stop_token_id in stop_token_ids
]
stop_index = min([i for i in stop_token_pos if i != -1], default=-1)
stop_token_id = stop_token_ids[stop_token_pos.index(stop_index)]
if stop_index == -1:
# Sequence does not contain stop_token_ids, this should never happen BTW
logger.warning(
"Generated sequence does not contain stop_token_ids. Please check your chat template config"
)
print(self.tokenizer.decode(sequences[i], skip_special_tokens=True))
else:
# Keep stop tokens
stop_index = input_len + stop_index
action_mask[i, stop_index + len(stop_token_id) :] = False
generation_end_index = (action_mask == True).sum(dim=-1) - 1
action_mask[:, :input_len] = False
action_mask = action_mask[:, 1:]
action_mask = action_mask[:, -(sequences.size(1) - input_len) :]
num_actions = action_mask.size(1)
torch.cuda.empty_cache()
with torch.inference_mode():
actor_output = []
base_model_output = []
for i in range(0, sequences.size(0), self.logits_forward_batch_size):
actor_output.append(
self.actor(
input_ids=sequences[i : i + self.logits_forward_batch_size],
attention_mask=attention_mask[i : i + self.logits_forward_batch_size],
use_cache=False,
)["logits"]
)
base_model_output.append(
self.initial_model(
input_ids=sequences[i : i + self.logits_forward_batch_size],
attention_mask=attention_mask[i : i + self.logits_forward_batch_size],
use_cache=False,
)["logits"]
)
actor_output = torch.cat(actor_output, dim=0)
base_model_output = torch.cat(base_model_output, dim=0)
action_log_probs = calc_action_log_probs(actor_output, sequences, num_actions)
base_action_log_probs = calc_action_log_probs(base_model_output, sequences, num_actions)
# Convert to right padding for the reward model and the critic model
input_ids_rm = torch.zeros_like(sequences, device=sequences.device)
response_start = []
response_end = []
attention_mask_rm = torch.zeros_like(sequences, device=sequences.device)
for i in range(sequences.size(0)):
sequence = sequences[i]
bos_index = (sequence != pad_token_id).nonzero().reshape([-1])[0]
eos_index = generation_end_index[i] + 1 # include the stop token
sequence_to_pad = sequence[bos_index:eos_index]
response_start.append(input_len - bos_index)
response_end.append(eos_index - bos_index)
sequence_padded = F.pad(
sequence_to_pad, (0, sequence_length - sequence_to_pad.size(0)), value=self.tokenizer.pad_token_id
)
input_ids_rm[i] = sequence_padded
if sequence_length - sequence_to_pad.size(0) > 0:
attention_mask_rm[i, : sequence_to_pad.size(0) + 1] = 1
else:
attention_mask_rm[i, :] = 1
attention_mask_rm = attention_mask_rm.to(dtype=torch.bool)
r = self.reward_model(
input_ids=input_ids_rm.to(dtype=torch.long, device=sequences.device),
attention_mask=attention_mask_rm.to(device=sequences.device),
response_start=response_start,
response_end=response_end,
gt_answer=gt_answer[s:e],
)
batch_sequences.append(sequences)
batch_input_ids_rm.append(input_ids_rm)
batch_attention_mask_rm.append(attention_mask_rm)
batch_attention_mask.append(attention_mask)
batch_r.append(r)
batch_action_log_probs.append(action_log_probs.cpu())
batch_base_action_log_probs.append(base_action_log_probs.cpu())
batch_action_mask.append(action_mask)
sequences = torch.cat(batch_sequences, dim=0)
input_ids_rm = torch.cat(batch_input_ids_rm, dim=0)
attention_mask_rm = torch.cat(batch_attention_mask_rm, dim=0)
attention_mask = torch.cat(batch_attention_mask, dim=0)
r = torch.cat(batch_r, dim=0)
action_log_probs = torch.cat(batch_action_log_probs, dim=0).to(sequences.device)
base_action_log_probs = torch.cat(batch_base_action_log_probs, dim=0).to(sequences.device)
action_mask = torch.cat(batch_action_mask, dim=0).to(sequences.device)
if not self.use_grpo:
value = self.critic(
input_ids=input_ids_rm.to(dtype=torch.long, device=sequences.device),
attention_mask=attention_mask_rm.to(device=sequences.device),
)
value = value[:, -num_actions:] * action_mask
reward, kl = compute_reward(
r, self.kl_coef, action_log_probs, base_action_log_probs, action_mask=action_mask
)
advantages = self.calculate_advantage(value, reward, num_actions)
advantages = advantages.detach()
value = value.detach()
else:
# GRPO advantage calculation
kl = torch.sum(
-self.kl_coef * (action_log_probs - base_action_log_probs) * action_mask, dim=-1
) / torch.sum(
action_mask, dim=-1
) # address numerical instability issue
r = kl + r
mean_gr = r.view(-1, self.num_generation).mean(dim=1)
std_gr = r.view(-1, self.num_generation).std(dim=1)
mean_gr = mean_gr.repeat_interleave(self.num_generation, dim=0)
std_gr = std_gr.repeat_interleave(self.num_generation, dim=0)
advantages = (r - mean_gr) / (std_gr + 1e-4)
value = r.detach() # dummy value
r = r.detach()
return Experience(
sequences.cpu(),
action_log_probs.cpu(),
value.cpu(),
r.cpu(),
kl.cpu(),
advantages.cpu(),
attention_mask.cpu(),
action_mask.cpu(),
)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalChat/coati/experience_maker/__init__.py | applications/ColossalChat/coati/experience_maker/__init__.py | from .base import Experience, ExperienceMaker
from .naive import NaiveExperienceMaker
__all__ = ["Experience", "ExperienceMaker", "NaiveExperienceMaker"]
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalChat/coati/experience_maker/base.py | applications/ColossalChat/coati/experience_maker/base.py | from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Optional
import torch
from coati.models import Critic, RewardModel
from transformers import PreTrainedModel
@dataclass
class Experience:
"""Experience is a batch of data.
These data should have the sequence length and number of actions.
Left padding for sequences is applied.
Shapes of each tensor:
sequences: (B, S)
action_log_probs: (B, A)
values: (B)
reward: (B)
advantages: (B)
attention_mask: (B, S)
action_mask: (B, A)
"A" is the number of actions.
"""
sequences: torch.Tensor
action_log_probs: torch.Tensor
values: torch.Tensor
reward: torch.Tensor
kl: torch.Tensor
advantages: torch.Tensor
attention_mask: Optional[torch.LongTensor]
action_mask: Optional[torch.BoolTensor]
@torch.no_grad()
def to_device(self, device: torch.device) -> None:
self.sequences = self.sequences.to(device)
self.action_log_probs = self.action_log_probs.to(device)
self.values = self.values.to(device)
self.reward = self.reward.to(device)
self.advantages = self.advantages.to(device)
self.kl = self.kl.to(device)
if self.attention_mask is not None:
self.attention_mask = self.attention_mask.to(device)
if self.action_mask is not None:
self.action_mask = self.action_mask.to(device)
def pin_memory(self):
self.sequences = self.sequences.pin_memory()
self.action_log_probs = self.action_log_probs.pin_memory()
self.values = self.values.pin_memory()
self.reward = self.reward.pin_memory()
self.advantages = self.advantages.pin_memory()
self.kl = self.kl.pin_memory()
if self.attention_mask is not None:
self.attention_mask = self.attention_mask.pin_memory()
if self.action_mask is not None:
self.action_mask = self.action_mask.pin_memory()
return self
class ExperienceMaker(ABC):
"""
Base class for experience makers.
"""
def __init__(
self, actor: PreTrainedModel, critic: Critic, reward_model: RewardModel, initial_model: PreTrainedModel
) -> None:
super().__init__()
self.actor = actor
self.critic = critic
self.reward_model = reward_model
self.initial_model = initial_model
@abstractmethod
def make_experience(self, input_ids: torch.Tensor, attention_mask: torch.Tensor, **generate_kwargs) -> Experience:
"""
Abstract method to generate an experience.
Args:
input_ids (torch.Tensor): The input tensor.
attention_mask (torch.Tensor): The attention mask tensor.
**generate_kwargs: Additional keyword arguments for generating the experience.
Returns:
Experience: The generated experience.
"""
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalChat/coati/models/reward_model.py | applications/ColossalChat/coati/models/reward_model.py | """
reward model
"""
from typing import Optional
import torch
import torch.nn as nn
from coati.models import BaseModel
from transformers import PretrainedConfig
class RewardModel(BaseModel):
"""
Reward model class.
Args:
pretrained str: huggingface or local model path
config: PretrainedConfig object
**kwargs: all other kwargs as in AutoModel.from_pretrained
"""
def __init__(self, pretrained: str = None, config: Optional[PretrainedConfig] = None, **kwargs) -> None:
super().__init__(pretrained=pretrained, config=config, **kwargs)
self.value_head = nn.Linear(self.last_hidden_state_size, 1)
self.value_head.weight.data.normal_(mean=0.0, std=1 / (self.last_hidden_state_size + 1))
def forward(
self, input_ids: torch.LongTensor, attention_mask: Optional[torch.Tensor] = None, **kwargs
) -> torch.Tensor:
outputs = self.model(input_ids, attention_mask=attention_mask)
last_hidden_states = outputs["last_hidden_state"]
sequence_lengths = torch.max(attention_mask * torch.arange(input_ids.size(1), device=input_ids.device), dim=1)[
0
]
sequence_hidden_states = last_hidden_states[torch.arange(last_hidden_states.size(0)), sequence_lengths].type(
self.value_head.weight.dtype
)
values = self.value_head(sequence_hidden_states).squeeze(-1) # Ensure shape is (B,)
return values
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def get_output_embeddings(self):
return self.model.get_output_embeddings()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/applications/ColossalChat/coati/models/lora.py | applications/ColossalChat/coati/models/lora.py | """
LORA utils
"""
import dataclasses
import math
import warnings
from typing import List, Optional, Union
import loralib as lora
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from colossalai.logging import get_dist_logger
logger = get_dist_logger()
@dataclasses.dataclass
class LoraManager:
able_to_merge: bool = True
lora_manager = LoraManager()
@dataclasses.dataclass
class LoraConfig:
r: int = 0
lora_alpha: int = 32
linear_lora_dropout: float = 0.1
embedding_lora_dropout: float = 0.0
lora_train_bias: str = "none"
lora_initialization_method: str = "kaiming_uniform"
target_modules: List = None
@classmethod
def from_file(cls, config_file: str):
import json
with open(config_file, "r") as f:
config = json.load(f)
return cls(**config)
class LoraBase(lora.LoRALayer, nn.Module):
def __init__(
self,
r: int = 0,
lora_alpha: int = 32,
lora_dropout: float = 0.1,
lora_initialization_method: str = "kaiming_uniform",
):
nn.Module.__init__(self)
lora.LoRALayer.__init__(self, r=r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, merge_weights=False)
self.r = r
self.lora_alpha = lora_alpha
self.lora_dropout = nn.Dropout(lora_dropout)
self.merged = False
self.lora_initialization_method = lora_initialization_method
self.weight = None
self.bias = None
self.lora_A = None
self.lora_B = None
def reset_parameters(self):
if hasattr(self, "lora_A"):
if self.lora_initialization_method == "kaiming_uniform" or self.weight.size() != (
self.out_features,
self.in_features,
):
# Initialize A with the default values for nn.Linear and set B to zero.
nn.init.kaiming_uniform_(self.lora_A, a=math.sqrt(5))
nn.init.zeros_(self.lora_B)
elif self.lora_initialization_method == "PiSSA":
# PiSSA method in this paper: https://arxiv.org/abs/2404.02948
# Assume the SVD of the original weights is W = USV^T
# Initialize a frozen weight to U[:,r:]S[r:,r:]V^T[:,r:] to store less significent part of W
# Only A, B are trainable, which are initialized to S[r:,:r]^0.5V^T[:,:r] and U[:,:r]S[r:,:r] respectively
# self.scaling = 1.
# SVD
U, S, Vh = torch.svd_lowrank(
self.weight.to(torch.float32).data, self.r, niter=4
) # U: [out_features, in_features], S: [in_features], V: [in_features, in_features]
# weight_backup = self.weight.clone()
# Initialize A, B
S = S / self.scaling
self.lora_B.data = (U @ torch.diag(torch.sqrt(S))).to(torch.float32).contiguous()
self.lora_A.data = (torch.diag(torch.sqrt(S)) @ Vh.T).to(torch.float32).contiguous()
# Initialize weight
# To reduce floating point error, we use residual instead of directly using U[:, :self.r] @ S[:self.r] @ Vh[:self.r, :]
self.weight.data = (
((self.weight - self.scaling * self.lora_B @ self.lora_A)).contiguous().to(self.weight.dtype)
)
self.lora_A.requires_grad = True
self.lora_B.requires_grad = True
else:
raise ValueError(f"Unknown LoRA initialization method {self.lora_initialization_method}")
def train(self, mode: bool = True):
"""
This function runs when model.train() is invoked. It is used to prepare the linear layer for training
"""
self.training = mode
if mode and self.merged:
warnings.warn("Invoke module.train() would unmerge LoRA weights.")
raise NotImplementedError("LoRA unmerge is not tested.")
elif not mode and not self.merged and lora_manager.able_to_merge:
warnings.warn("Invoke module.eval() would merge LoRA weights.")
# Merge the weights and mark it
if self.r > 0:
self.weight.data += self.lora_B @ self.lora_A * self.scaling
delattr(self, "lora_A")
delattr(self, "lora_B")
self.merged = True
return self
class LoraLinear(LoraBase):
"""Replace in-place ops to out-of-place ops to fit gemini. Convert a torch.nn.Linear to LoraLinear."""
def __init__(
self,
weight: nn.Parameter,
bias: Union[nn.Parameter, bool],
r: int = 0,
lora_alpha: int = 32,
lora_dropout: float = 0.0,
lora_initialization_method: str = "kaiming_uniform",
):
super().__init__(
r=r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, lora_initialization_method=lora_initialization_method
)
self.weight = weight
self.bias = bias
if bias is True:
self.bias = nn.Parameter(torch.zeros(weight.shape[0]))
if bias is not None:
self.bias.requires_grad = True
out_features, in_features = weight.shape
self.in_features = in_features
self.out_features = out_features
assert lora_initialization_method in ["kaiming_uniform", "PiSSA"]
self.lora_initialization_method = lora_initialization_method
# Actual trainable parameters
if r > 0:
self.lora_A = nn.Parameter(torch.randn((r, in_features)))
self.lora_B = nn.Parameter(torch.randn((out_features, r)))
self.scaling = self.lora_alpha / self.r
# Freezing the pre-trained weight matrix
self.weight.requires_grad = False
self.reset_parameters()
def forward(self, x: torch.Tensor):
if self.r > 0 and not self.merged:
result = F.linear(x, self.weight, bias=self.bias)
result = result + (self.lora_dropout(x) @ self.lora_A.t() @ self.lora_B.t()) * self.scaling
return result
else:
return F.linear(x, self.weight, bias=self.bias)
class LoraEmbedding(LoraBase):
"""Replace in-place ops to out-of-place ops to fit gemini. Convert a torch.nn.Linear to LoraLinear."""
def __init__(
self,
weight: nn.Parameter,
r: int = 0,
lora_alpha: int = 32,
lora_dropout: float = 0.1,
num_embeddings: int = None,
embedding_dim: int = None,
padding_idx: Optional[int] = None,
max_norm: Optional[float] = None,
norm_type: float = 2.0,
scale_grad_by_freq: bool = False,
sparse: bool = False,
lora_initialization_method: str = "kaiming_uniform",
):
super().__init__(
r=r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, lora_initialization_method=lora_initialization_method
)
self.padding_idx = padding_idx
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
self.sparse = sparse
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
self.weight = weight
in_features, out_features = num_embeddings, embedding_dim
self.in_features = in_features
self.out_features = out_features
assert lora_initialization_method in ["kaiming_uniform", "PiSSA"]
self.lora_initialization_method = lora_initialization_method
# Actual trainable parameters
if r > 0:
self.lora_A = nn.Parameter(torch.randn((r, in_features)))
self.lora_B = nn.Parameter(torch.randn((out_features, r)))
self.scaling = self.lora_alpha / self.r
# Freezing the pre-trained weight matrix
self.weight.requires_grad = False
# reset parameters
nn.init.zeros_(self.lora_A)
nn.init.normal_(self.lora_B)
def _embed(self, x: torch.Tensor, weight) -> torch.Tensor:
return F.embedding(
x,
weight,
padding_idx=self.padding_idx,
max_norm=self.max_norm,
norm_type=self.norm_type,
scale_grad_by_freq=self.scale_grad_by_freq,
sparse=self.sparse,
)
def forward(self, x: torch.Tensor):
base_embedding = self._embed(x, self.weight)
# base_embedding.requires_grad = True # force the embedding layer to be trainable for gradient checkpointing
if self.r > 0 and not self.merged:
lora_A_embedding = self._embed(x, self.lora_A.t())
embedding = base_embedding + (lora_A_embedding @ self.lora_B.t()) * self.scaling
return embedding
else:
return base_embedding
def train(self, mode: bool = True):
"""
This function runs when model.train() is invoked. It is used to prepare the linear layer for training
"""
self.training = mode
if mode and self.merged:
warnings.warn("Invoke module.train() would unmerge LoRA weights.")
raise NotImplementedError("LoRA unmerge is not tested.")
elif not mode and not self.merged and lora_manager.able_to_merge:
warnings.warn("Invoke module.eval() would merge LoRA weights.")
# Merge the weights and mark it
if self.r > 0:
self.weight.data += self.lora_A.t() @ self.lora_B.t() * self.scaling
delattr(self, "lora_A")
delattr(self, "lora_B")
self.merged = True
return self
def _lora_linear_wrapper(linear: nn.Linear, lora_config: LoraConfig) -> LoraLinear:
"""
Wraps a linear layer with LoRA functionality.
Args:
linear (nn.Linear): The linear layer to be wrapped.
lora_rank (int): The rank of the LoRA decomposition.
lora_train_bias (str): Whether to train the bias. Can be "none", "all", "lora".
lora_initialization_method (str): The initialization method for LoRA. Can be "kaiming_uniform" or "PiSSA".
Returns:
LoraLinear: The wrapped linear layer with LoRA functionality.
"""
assert (
lora_config.r <= linear.in_features
), f"LoRA rank ({lora_config.r}) must be less than or equal to in features ({linear.in_features})"
bias = None
if lora_config.lora_train_bias in ["all", "lora"]:
bias = linear.bias
if bias is None:
bias = True
lora_linear = LoraLinear(
linear.weight, bias, r=lora_config.r, lora_initialization_method=lora_config.lora_initialization_method
)
return lora_linear
def _convert_to_lora_recursively(module: nn.Module, parent_name: str, lora_config: LoraConfig) -> None:
"""
Recursively converts the given module and its children to LoRA (Low-Rank Approximation) form.
Args:
module (nn.Module): The module to convert to LoRA form.
lora_rank (int): The rank of the LoRA approximation.
lora_train_bias (str): Whether to train the bias. Can be "none", "all", "lora".
parent_name (str): The name of the parent module.
lora_initialization_method (str): The initialization method for LoRA. Can be "kaiming_uniform" or "PiSSA".
Returns:
None
"""
for name, child in module.named_children():
if isinstance(child, nn.Linear):
if lora_config.target_modules is None or any(
[name in target_module for target_module in lora_config.target_modules]
):
if dist.is_initialized() and dist.get_rank() == 0:
logger.info(f"Converting {parent_name}.{name} to LoRA")
setattr(module, name, _lora_linear_wrapper(child, lora_config))
elif isinstance(child, nn.Embedding):
if lora_config.target_modules is None or any(
[name in target_module for target_module in lora_config.target_modules]
):
if dist.is_initialized() and dist.get_rank() == 0:
logger.info(f"Converting {parent_name}.{name} to LoRA")
setattr(
module,
name,
LoraEmbedding(
child.weight,
r=lora_config.r,
lora_alpha=lora_config.lora_alpha,
lora_dropout=lora_config.embedding_lora_dropout,
num_embeddings=child.num_embeddings,
embedding_dim=child.embedding_dim,
padding_idx=child.padding_idx,
max_norm=child.max_norm,
norm_type=child.norm_type,
scale_grad_by_freq=child.scale_grad_by_freq,
sparse=child.sparse,
lora_initialization_method=lora_config.lora_initialization_method,
),
)
else:
_convert_to_lora_recursively(child, f"{parent_name}.{name}", lora_config)
def convert_to_lora_module(module: nn.Module, lora_config: LoraConfig) -> nn.Module:
"""Convert a torch.nn.Module to a LoRA module.
Args:
module (nn.Module): The module to convert.
lora_rank (int): LoRA rank.
lora_train_bias (str): Whether to train the bias. Can be "none", "all", "lora".
lora_initialization_method (str): The initialization method for LoRA. Can be "kaiming_uniform" or "PiSSA".
Returns:
nn.Module: The converted module.
"""
if lora_config.r <= 0:
return module
# make all parameter not trainable, if lora_train_bias is "all", set bias to trainable
total_parameter_size = 0
for name, p in module.named_parameters():
p.requires_grad = False
if "bias" in name and lora_config.lora_train_bias == "all":
p.requires_grad = True
total_parameter_size += p.numel()
_convert_to_lora_recursively(module, "", lora_config)
trainable_parameter_size = 0
for name, p in module.named_parameters():
if p.requires_grad == True:
trainable_parameter_size += p.numel()
if dist.is_initialized() and dist.get_rank() == 0:
logger.info(
f"Trainable parameter size: {trainable_parameter_size/1024/1024:.2f}M\nOriginal trainable parameter size: {total_parameter_size/1024/1024:.2f}M\nPercentage: {trainable_parameter_size/total_parameter_size*100:.2f}%"
)
return module
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.