code stringlengths 66 870k | docstring stringlengths 19 26.7k | func_name stringlengths 1 138 | language stringclasses 1
value | repo stringlengths 7 68 | path stringlengths 5 324 | url stringlengths 46 389 | license stringclasses 7
values |
|---|---|---|---|---|---|---|---|
def call_func_by_name(*args, func_name: str = None, **kwargs) -> Any:
"""Finds the python object with the given name and calls it as a function."""
assert func_name is not None
func_obj = get_obj_by_name(func_name)
assert callable(func_obj)
return func_obj(*args, **kwargs) | Finds the python object with the given name and calls it as a function. | call_func_by_name | python | modelscope/data-juicer | tools/mm_eval/inception_metrics/util.py | https://github.com/modelscope/data-juicer/blob/master/tools/mm_eval/inception_metrics/util.py | Apache-2.0 |
def get_top_level_function_name(obj: Any) -> str:
"""Return the fully-qualified name of a top-level function."""
assert is_top_level_function(obj)
module = obj.__module__
if module == '__main__':
module = os.path.splitext(os.path.basename(sys.modules[module].__file__))[0]
return module + "."... | Return the fully-qualified name of a top-level function. | get_top_level_function_name | python | modelscope/data-juicer | tools/mm_eval/inception_metrics/util.py | https://github.com/modelscope/data-juicer/blob/master/tools/mm_eval/inception_metrics/util.py | Apache-2.0 |
def list_dir_recursively_with_ignore(dir_path: str, ignores: List[str] = None, add_base_to_relative: bool = False) -> List[Tuple[str, str]]:
"""List all files recursively in a given directory while ignoring given file and directory names.
Returns list of tuples containing both absolute and relative paths."""
... | List all files recursively in a given directory while ignoring given file and directory names.
Returns list of tuples containing both absolute and relative paths. | list_dir_recursively_with_ignore | python | modelscope/data-juicer | tools/mm_eval/inception_metrics/util.py | https://github.com/modelscope/data-juicer/blob/master/tools/mm_eval/inception_metrics/util.py | Apache-2.0 |
def copy_files_and_create_dirs(files: List[Tuple[str, str]]) -> None:
"""Takes in a list of tuples of (src, dst) paths and copies files.
Will create all necessary directories."""
for file in files:
target_dir_name = os.path.dirname(file[1])
# will create all intermediate-level directories
... | Takes in a list of tuples of (src, dst) paths and copies files.
Will create all necessary directories. | copy_files_and_create_dirs | python | modelscope/data-juicer | tools/mm_eval/inception_metrics/util.py | https://github.com/modelscope/data-juicer/blob/master/tools/mm_eval/inception_metrics/util.py | Apache-2.0 |
def is_url(obj: Any, allow_file_urls: bool = False) -> bool:
"""Determine whether the given object is a valid URL string."""
if not isinstance(obj, str) or not "://" in obj:
return False
if allow_file_urls and obj.startswith('file://'):
return True
try:
res = requests.compat.urlp... | Determine whether the given object is a valid URL string. | is_url | python | modelscope/data-juicer | tools/mm_eval/inception_metrics/util.py | https://github.com/modelscope/data-juicer/blob/master/tools/mm_eval/inception_metrics/util.py | Apache-2.0 |
def open_url(url: str, cache_dir: str = None, num_attempts: int = 10, verbose: bool = True, return_filename: bool = False, cache: bool = True) -> Any:
"""Download the given URL and return a binary-mode file object to access the data."""
assert num_attempts >= 1
assert not (return_filename and (not cache))
... | Download the given URL and return a binary-mode file object to access the data. | open_url | python | modelscope/data-juicer | tools/mm_eval/inception_metrics/util.py | https://github.com/modelscope/data-juicer/blob/master/tools/mm_eval/inception_metrics/util.py | Apache-2.0 |
def is50k(opts):
'''
Compute Inception Score(IS) of frames, sample 50000 frames from fake dataset at most, split features to 10 subset to compute ISs and return the mean and std.
'''
mean, std = inception_score.compute_is(opts, num_gen=50000, num_splits=10)
return dict(is50k_mean=mean, is50k_std... |
Compute Inception Score(IS) of frames, sample 50000 frames from fake dataset at most, split features to 10 subset to compute ISs and return the mean and std.
| is50k | python | modelscope/data-juicer | tools/mm_eval/inception_metrics/video_metrics/metric_main.py | https://github.com/modelscope/data-juicer/blob/master/tools/mm_eval/inception_metrics/video_metrics/metric_main.py | Apache-2.0 |
def pr50k_3n(opts):
'''
Compute Precision/Recall (PR) of frames, sample 50000 frames from fake dataset at most, with the 4th (3+1) nearest features to estimate the distributions
'''
precision, recall = precision_recall.compute_pr(opts, max_real=50000, num_gen=50000, nhood_size=3, row_batch_size=1000... |
Compute Precision/Recall (PR) of frames, sample 50000 frames from fake dataset at most, with the 4th (3+1) nearest features to estimate the distributions
| pr50k_3n | python | modelscope/data-juicer | tools/mm_eval/inception_metrics/video_metrics/metric_main.py | https://github.com/modelscope/data-juicer/blob/master/tools/mm_eval/inception_metrics/video_metrics/metric_main.py | Apache-2.0 |
def isv2048_ucf(opts):
'''
Compute Inception Score of Videos (ISV), sample 2048 times in dataset, 16 adjacent frames each time, split features to 10 subset to compute ISs and return the mean and std.
'''
mean, std = video_inception_score.compute_isv(opts, num_gen=2048, num_splits=10, backbone='c3d_u... |
Compute Inception Score of Videos (ISV), sample 2048 times in dataset, 16 adjacent frames each time, split features to 10 subset to compute ISs and return the mean and std.
| isv2048_ucf | python | modelscope/data-juicer | tools/mm_eval/inception_metrics/video_metrics/metric_main.py | https://github.com/modelscope/data-juicer/blob/master/tools/mm_eval/inception_metrics/video_metrics/metric_main.py | Apache-2.0 |
def prv2048_3n_16f(opts):
'''
Compute Precision/Recall of Videos (PRV), sample 2048 times in dataset, 16 adjacent frames each time, with the 4th (3+1) nearest features to estimate the distributions
'''
precision, recall = video_precision_recall.compute_pr(opts, max_real=2048, num_gen=2048, nhood_siz... |
Compute Precision/Recall of Videos (PRV), sample 2048 times in dataset, 16 adjacent frames each time, with the 4th (3+1) nearest features to estimate the distributions
| prv2048_3n_16f | python | modelscope/data-juicer | tools/mm_eval/inception_metrics/video_metrics/metric_main.py | https://github.com/modelscope/data-juicer/blob/master/tools/mm_eval/inception_metrics/video_metrics/metric_main.py | Apache-2.0 |
def rewrite_opts_for_gen_dataset(opts):
"""
Updates dataset arguments in the opts to enable the second dataset stats computation
"""
new_opts = copy.deepcopy(opts)
new_opts.dataset_kwargs = new_opts.gen_dataset_kwargs
new_opts.cache = False
return new_opts |
Updates dataset arguments in the opts to enable the second dataset stats computation
| rewrite_opts_for_gen_dataset | python | modelscope/data-juicer | tools/mm_eval/inception_metrics/video_metrics/metric_utils.py | https://github.com/modelscope/data-juicer/blob/master/tools/mm_eval/inception_metrics/video_metrics/metric_utils.py | Apache-2.0 |
def main(data_path,
text_keys='text',
tokenizer_method='EleutherAI/pythia-6.9b-deduped',
num_proc=1):
"""
Count the number of tokens for given dataset and tokenizer.
:param data_path: path to the input dataset. Only support 'jsonl' now.
:param text_keys: field keys that will ... |
Count the number of tokens for given dataset and tokenizer.
:param data_path: path to the input dataset. Only support 'jsonl' now.
:param text_keys: field keys that will be considered into token counts.
:param tokenizer_method: name of the Hugging Face tokenizer.
:param num_proc: number of process... | main | python | modelscope/data-juicer | tools/postprocess/count_token.py | https://github.com/modelscope/data-juicer/blob/master/tools/postprocess/count_token.py | Apache-2.0 |
def run_mixture():
"""
Mix multiple datasets into one dataset.
Randomly select samples from every dataset and mix these
samples, then export to a new mixed dataset
`data_path` with optional weight(1.0 as default),
e.g.
1) a single data path
2) multiple datasets in the format... |
Mix multiple datasets into one dataset.
Randomly select samples from every dataset and mix these
samples, then export to a new mixed dataset
`data_path` with optional weight(1.0 as default),
e.g.
1) a single data path
2) multiple datasets in the format: <w1> dataset1-path
... | run_mixture | python | modelscope/data-juicer | tools/postprocess/data_mixture.py | https://github.com/modelscope/data-juicer/blob/master/tools/postprocess/data_mixture.py | Apache-2.0 |
def meta_deserialize(file_name, target_file, serialized_key):
"""
Deserialize the specified field into dict.
:param file_name: path to source jsonl files.
:param target_file: path to store the converted jsonl files.
:param serialized_key: the key corresponding to the field that will be
deseriali... |
Deserialize the specified field into dict.
:param file_name: path to source jsonl files.
:param target_file: path to store the converted jsonl files.
:param serialized_key: the key corresponding to the field that will be
deserialized.
| meta_deserialize | python | modelscope/data-juicer | tools/postprocess/deserialize_meta.py | https://github.com/modelscope/data-juicer/blob/master/tools/postprocess/deserialize_meta.py | Apache-2.0 |
def main(src_dir, target_dir, serialized_key='source_info', num_proc=1):
"""
Deserialize the specified field in the jsonl file.
:param src_dir: path that's stores jsonl files.
:param target_dir: path to save the converted jsonl files.
:param serialized_key: the key corresponding to the field that wi... |
Deserialize the specified field in the jsonl file.
:param src_dir: path that's stores jsonl files.
:param target_dir: path to save the converted jsonl files.
:param serialized_key: the key corresponding to the field that will be
deserialized. Default it's 'source_info'.
:param num_proc: number ... | main | python | modelscope/data-juicer | tools/postprocess/deserialize_meta.py | https://github.com/modelscope/data-juicer/blob/master/tools/postprocess/deserialize_meta.py | Apache-2.0 |
def keep_by_lang(sample, lang):
"""
Keep samples with the specified language.
:param sample: a sample in dataset
:param lang: the specified language
:return: True to keep, False to discard
"""
if sample[Fields.stats][StatsKeys.lang] == lang:
return True
return False |
Keep samples with the specified language.
:param sample: a sample in dataset
:param lang: the specified language
:return: True to keep, False to discard
| keep_by_lang | python | modelscope/data-juicer | tools/preprocess/dataset_split_by_language.py | https://github.com/modelscope/data-juicer/blob/master/tools/preprocess/dataset_split_by_language.py | Apache-2.0 |
def main(src_dir, target_dir, text_key=None, suffixes=[], num_proc=1):
"""
Load dataset from the source directory, then apply language identification
using the operation filter called `LanguageIDScoreFilter`,
finally, split the dataset by language and save it.
:param src_dir: path to store the datas... |
Load dataset from the source directory, then apply language identification
using the operation filter called `LanguageIDScoreFilter`,
finally, split the dataset by language and save it.
:param src_dir: path to store the dataset.
:param target_dir: path to store subset files(`jsonl` format)
:par... | main | python | modelscope/data-juicer | tools/preprocess/dataset_split_by_language.py | https://github.com/modelscope/data-juicer/blob/master/tools/preprocess/dataset_split_by_language.py | Apache-2.0 |
def merge_and_add_meta(filename, target_dir):
"""
Merge `instruction`/`input`/`output` to `text` for process,
and add meta info.
:param filename: input dataset file
:param target_dir: path to save updated dataset
"""
ds = load_dataset('json', data_files=[filename], split='train')
if 'i... |
Merge `instruction`/`input`/`output` to `text` for process,
and add meta info.
:param filename: input dataset file
:param target_dir: path to save updated dataset
| merge_and_add_meta | python | modelscope/data-juicer | tools/preprocess/raw_alpaca_cot_merge_add_meta.py | https://github.com/modelscope/data-juicer/blob/master/tools/preprocess/raw_alpaca_cot_merge_add_meta.py | Apache-2.0 |
def _merge_and_add_meta(sample, path, meta):
"""
Merge `instruction`/`input`/`output` to `text` for process,
and add meta info.
:param sample: a dict sample in dataset
:param path: sample in which file
:param meta: meta added to sample
:return: updated sample
... |
Merge `instruction`/`input`/`output` to `text` for process,
and add meta info.
:param sample: a dict sample in dataset
:param path: sample in which file
:param meta: meta added to sample
:return: updated sample
| _merge_and_add_meta | python | modelscope/data-juicer | tools/preprocess/raw_alpaca_cot_merge_add_meta.py | https://github.com/modelscope/data-juicer/blob/master/tools/preprocess/raw_alpaca_cot_merge_add_meta.py | Apache-2.0 |
def main(src_dir, target_dir, num_proc=4):
"""
Load dataset from the source directory, then apply language identification
using the operation filter called `LanguageIDScoreFilter`,
finally, split the dataset by language and save it.
:param src_dir: path that's store dataset directory
:param targ... |
Load dataset from the source directory, then apply language identification
using the operation filter called `LanguageIDScoreFilter`,
finally, split the dataset by language and save it.
:param src_dir: path that's store dataset directory
:param target_dir: path to store subset files(`jsonl` format)... | main | python | modelscope/data-juicer | tools/preprocess/raw_alpaca_cot_merge_add_meta.py | https://github.com/modelscope/data-juicer/blob/master/tools/preprocess/raw_alpaca_cot_merge_add_meta.py | Apache-2.0 |
def tex_proj_loader(file_or_dir_path: pathlib.Path):
"""
Load the tex files from a tar file or a gzip file.
:param file_or_dir_path: path to tar file or the gzip file
:return: a list of content in tex files
"""
files_and_content = []
try:
# if it is a directory, open it as a tarfile... |
Load the tex files from a tar file or a gzip file.
:param file_or_dir_path: path to tar file or the gzip file
:return: a list of content in tex files
| tex_proj_loader | python | modelscope/data-juicer | tools/preprocess/raw_arxiv_to_jsonl.py | https://github.com/modelscope/data-juicer/blob/master/tools/preprocess/raw_arxiv_to_jsonl.py | Apache-2.0 |
def convert_tar_to_jsonl(tar_fp, jsonl_fp, tmp_dir):
"""
Extract the contents of tex files from tar file, convert and
save to jsonl file
:param tar_fp: path to tar file
:param jsonl_fp: path to save jsonl file
:param tmp_dir: a temporary directory to save extracted files
"""
failed = 0
... |
Extract the contents of tex files from tar file, convert and
save to jsonl file
:param tar_fp: path to tar file
:param jsonl_fp: path to save jsonl file
:param tmp_dir: a temporary directory to save extracted files
| convert_tar_to_jsonl | python | modelscope/data-juicer | tools/preprocess/raw_arxiv_to_jsonl.py | https://github.com/modelscope/data-juicer/blob/master/tools/preprocess/raw_arxiv_to_jsonl.py | Apache-2.0 |
def main(arxiv_src_dir, target_dir, work_dir='./tmp/', num_proc=1):
"""
:param arxiv_src_dir: if you download raw arXiv data as Redpajama did,
you will get a directory src which includes thousands of tar
files whose filenames are like "arXiv_src_yymm_xxx.tar". You
just need to s... |
:param arxiv_src_dir: if you download raw arXiv data as Redpajama did,
you will get a directory src which includes thousands of tar
files whose filenames are like "arXiv_src_yymm_xxx.tar". You
just need to set this argument to the path of this dir.
:param target_dir: result dir... | main | python | modelscope/data-juicer | tools/preprocess/raw_arxiv_to_jsonl.py | https://github.com/modelscope/data-juicer/blob/master/tools/preprocess/raw_arxiv_to_jsonl.py | Apache-2.0 |
def get_sites_count(path, topk=28):
"""
Take top-K sites(`.xml`) by its size of content
:param path: path to stack_exchage data
:param topk: number of top-k sites
:return
1) a dict stores pair of site and its size of content
2) a list of topk sites
"""
logger.info('Got count... |
Take top-K sites(`.xml`) by its size of content
:param path: path to stack_exchage data
:param topk: number of top-k sites
:return
1) a dict stores pair of site and its size of content
2) a list of topk sites
| get_sites_count | python | modelscope/data-juicer | tools/preprocess/raw_stackexchange_to_jsonl.py | https://github.com/modelscope/data-juicer/blob/master/tools/preprocess/raw_stackexchange_to_jsonl.py | Apache-2.0 |
def get_parents(site, counts):
"""
Find all answers's parent id, and groups by parent id
:param site: site(xml) name
:param counts: a dict stores pair of site and its size of content
:return: a dict stores pair of parent question id and list of answer id
"""
parents = {}
with open(site, ... |
Find all answers's parent id, and groups by parent id
:param site: site(xml) name
:param counts: a dict stores pair of site and its size of content
:return: a dict stores pair of parent question id and list of answer id
| get_parents | python | modelscope/data-juicer | tools/preprocess/raw_stackexchange_to_jsonl.py | https://github.com/modelscope/data-juicer/blob/master/tools/preprocess/raw_stackexchange_to_jsonl.py | Apache-2.0 |
def get_qapairs(site, counts, parents):
"""
Find and group all matched pairs of question and answer in site file
:param site: site(.xml) name
:param counts: a dict stores pair of site and its size of content
:param parents: a dict stores pair of parent question id and
list of ans... |
Find and group all matched pairs of question and answer in site file
:param site: site(.xml) name
:param counts: a dict stores pair of site and its size of content
:param parents: a dict stores pair of parent question id and
list of answer id
:return: a list of qa pairs
| get_qapairs | python | modelscope/data-juicer | tools/preprocess/raw_stackexchange_to_jsonl.py | https://github.com/modelscope/data-juicer/blob/master/tools/preprocess/raw_stackexchange_to_jsonl.py | Apache-2.0 |
def process_qa_pair(pair, site_name, site_count):
"""
Sort answers by their score for question in qa pair sample,
add meta info to sample
:param pair: input qa pair sample
:param site_name: site name of qa pair
:param site_count: content size of site
:return: a dict of qa pair, including ["t... |
Sort answers by their score for question in qa pair sample,
add meta info to sample
:param pair: input qa pair sample
:param site_name: site name of qa pair
:param site_count: content size of site
:return: a dict of qa pair, including ["text", "meta"]
| process_qa_pair | python | modelscope/data-juicer | tools/preprocess/raw_stackexchange_to_jsonl.py | https://github.com/modelscope/data-juicer/blob/master/tools/preprocess/raw_stackexchange_to_jsonl.py | Apache-2.0 |
def process_site(site, counts, src_dir, target_dir, num_proc=24):
"""
Convert one raw Stack Exchange site data to jsonl file.
1) find all answers's parent id and groups by parent id
2) find matched pair of question and answers
3) sort answers by their score for each question
:param ... |
Convert one raw Stack Exchange site data to jsonl file.
1) find all answers's parent id and groups by parent id
2) find matched pair of question and answers
3) sort answers by their score for each question
:param site: site name endwith `".xml"`
:param counts: dict stores pair of s... | process_site | python | modelscope/data-juicer | tools/preprocess/raw_stackexchange_to_jsonl.py | https://github.com/modelscope/data-juicer/blob/master/tools/preprocess/raw_stackexchange_to_jsonl.py | Apache-2.0 |
def main(src_dir, target_dir, topk=28, num_proc=1):
"""
Convert the raw Stack Exchange data downloaded from from Archive
(ref: https://archive.org/download/stackexchange) to several
jsonl files.
:param src_dir: if you download raw Stack Exchange data as Redpajama did,
you will get a d... |
Convert the raw Stack Exchange data downloaded from from Archive
(ref: https://archive.org/download/stackexchange) to several
jsonl files.
:param src_dir: if you download raw Stack Exchange data as Redpajama did,
you will get a directory src which includes hundreds of 7z files
... | main | python | modelscope/data-juicer | tools/preprocess/raw_stackexchange_to_jsonl.py | https://github.com/modelscope/data-juicer/blob/master/tools/preprocess/raw_stackexchange_to_jsonl.py | Apache-2.0 |
def reformat_nan_value(fp, jsonl_fp, keep_default_na, kwargs):
"""
Reformat a csv/tsv file with kwargs.
:param fp: a csv/tsv file
:param jsonl_fp: path to save jsonl file
:param keep_default_na: if False, no string will be parsed as NaN,
otherwise only the default NaN values are used for... |
Reformat a csv/tsv file with kwargs.
:param fp: a csv/tsv file
:param jsonl_fp: path to save jsonl file
:param keep_default_na: if False, no string will be parsed as NaN,
otherwise only the default NaN values are used for parsing.
:param kwargs: for tsv file, kwargs["sep'} is ` `
:... | reformat_nan_value | python | modelscope/data-juicer | tools/preprocess/reformat_csv_nan_value.py | https://github.com/modelscope/data-juicer/blob/master/tools/preprocess/reformat_csv_nan_value.py | Apache-2.0 |
def main(src_dir,
target_dir,
suffixes=['.csv'],
is_tsv=False,
keep_default_na=False,
num_proc=1,
**kwargs):
"""
Reformat csv or tsv files that may contain Nan values using HuggingFace
to load with extra args, e.g. set `keep_default_na` to False
:par... |
Reformat csv or tsv files that may contain Nan values using HuggingFace
to load with extra args, e.g. set `keep_default_na` to False
:param src_dir: path that's stores filenames are like "*.csv" or "*.tsv".
:param target_dir: path to store the converted jsonl files.
:param suffixes: files with suff... | main | python | modelscope/data-juicer | tools/preprocess/reformat_csv_nan_value.py | https://github.com/modelscope/data-juicer/blob/master/tools/preprocess/reformat_csv_nan_value.py | Apache-2.0 |
def check_dict_non_nan(obj):
"""
Check if all fields in the dict object are non-Nan
:param: a dict object
:return: True if all fields in the dict object are non-Nan,
else False
"""
no_nan = True
for key, value in obj.items():
if isinstance(value, dict):
no_nan... |
Check if all fields in the dict object are non-Nan
:param: a dict object
:return: True if all fields in the dict object are non-Nan,
else False
| check_dict_non_nan | python | modelscope/data-juicer | tools/preprocess/reformat_jsonl_nan_value.py | https://github.com/modelscope/data-juicer/blob/master/tools/preprocess/reformat_jsonl_nan_value.py | Apache-2.0 |
def get_non_nan_features(src_dir):
"""
Get the first object feature which does not contain Nan value.
:param src_dir: path which stores jsonl files.
:return: reference feature of dataset.
"""
for fp in fp_iter(src_dir):
with jsonlines.open(fp, 'r') as reader:
for obj in reade... |
Get the first object feature which does not contain Nan value.
:param src_dir: path which stores jsonl files.
:return: reference feature of dataset.
| get_non_nan_features | python | modelscope/data-juicer | tools/preprocess/reformat_jsonl_nan_value.py | https://github.com/modelscope/data-juicer/blob/master/tools/preprocess/reformat_jsonl_nan_value.py | Apache-2.0 |
def reformat_jsonl(fp, jsonl_fp, features):
"""
Reformat a jsonl file with reference features
:param fp: input jsonl file
:param jsonl_fp: formatted jsonl file
:param features: reference feature to use for dataset.
"""
with jsonlines.open(fp, 'r') as reader:
objs = [obj for obj in re... |
Reformat a jsonl file with reference features
:param fp: input jsonl file
:param jsonl_fp: formatted jsonl file
:param features: reference feature to use for dataset.
| reformat_jsonl | python | modelscope/data-juicer | tools/preprocess/reformat_jsonl_nan_value.py | https://github.com/modelscope/data-juicer/blob/master/tools/preprocess/reformat_jsonl_nan_value.py | Apache-2.0 |
def main(src_dir, target_dir, num_proc=1):
"""
Reformat the jsonl files which may contain Nan values. Traverse jsonl
files to find the first object that does not contain Nan as a
reference feature type, then set it for loading all jsonl files.
:param src_dir: path that's stores jsonl files.
:par... |
Reformat the jsonl files which may contain Nan values. Traverse jsonl
files to find the first object that does not contain Nan as a
reference feature type, then set it for loading all jsonl files.
:param src_dir: path that's stores jsonl files.
:param target_dir: path to store the converted jsonl f... | main | python | modelscope/data-juicer | tools/preprocess/reformat_jsonl_nan_value.py | https://github.com/modelscope/data-juicer/blob/master/tools/preprocess/reformat_jsonl_nan_value.py | Apache-2.0 |
def meta_serialize(file_name, target_file, text_key, serialized_key):
"""
Serialize all fields except the specified fields into strings.
:param file_name: path to source jsonl files.
:param target_file: path to store the converted jsonl files.
:text_key: the key corresponding to the field that will ... |
Serialize all fields except the specified fields into strings.
:param file_name: path to source jsonl files.
:param target_file: path to store the converted jsonl files.
:text_key: the key corresponding to the field that will not be serialized.
:param serialized_key: the key corresponding to the fi... | meta_serialize | python | modelscope/data-juicer | tools/preprocess/serialize_meta.py | https://github.com/modelscope/data-juicer/blob/master/tools/preprocess/serialize_meta.py | Apache-2.0 |
def main(src_dir,
target_dir,
text_key='text',
serialized_key='source_info',
num_proc=1):
"""
Serialize all the fields in the jsonl file except the fields specified
by users to ensure that the jsonl file with inconsistent text format
for each line can also be load nor... |
Serialize all the fields in the jsonl file except the fields specified
by users to ensure that the jsonl file with inconsistent text format
for each line can also be load normally by the dataset.
:param src_dir: path that's stores jsonl files.
:param target_dir: path to save the converted jsonl fil... | main | python | modelscope/data-juicer | tools/preprocess/serialize_meta.py | https://github.com/modelscope/data-juicer/blob/master/tools/preprocess/serialize_meta.py | Apache-2.0 |
def main(positive_datasets=None,
negative_datasets=None,
model='my_quality_model',
tokenizer=None,
text_key='text'):
"""
Evaluate a trained quality classifier using specific positive/negative
datasets
:param positive_datasets: the paths to the positive datasets. It co... |
Evaluate a trained quality classifier using specific positive/negative
datasets
:param positive_datasets: the paths to the positive datasets. It could be a
string for a single dataset, e.g. 'pos.parquet', or a list of strings
for multiple datasets, e.g. '["pos1.parquet", "pos2.parquet"]'
... | main | python | modelscope/data-juicer | tools/quality_classifier/eval.py | https://github.com/modelscope/data-juicer/blob/master/tools/quality_classifier/eval.py | Apache-2.0 |
def predict_score(dataset_path,
result_path,
model='gpt3',
tokenizer=None,
keep_method='gpt3',
text_key='text',
overall_stats=False):
"""
Use specific quality classifier to predict document scores on your... |
Use specific quality classifier to predict document scores on your dataset
:param dataset_path: the path to the dataset you want to predict for
:param result_path: the path to store the predicted result dataset
:param model: quality classifier name to apply. It's "gpt3" in default. You
can use ... | predict_score | python | modelscope/data-juicer | tools/quality_classifier/predict.py | https://github.com/modelscope/data-juicer/blob/master/tools/quality_classifier/predict.py | Apache-2.0 |
def init_spark(spark_executor_memory=None,
spark_driver_memory=None,
spark_executor_memoryOverhead=None):
"""
Initialize a spark session. You can set parameters such as memory, number
of partitions, timeout and so on here
:return: A spark session instance.
"""
if no... |
Initialize a spark session. You can set parameters such as memory, number
of partitions, timeout and so on here
:return: A spark session instance.
| init_spark | python | modelscope/data-juicer | tools/quality_classifier/qc_utils.py | https://github.com/modelscope/data-juicer/blob/master/tools/quality_classifier/qc_utils.py | Apache-2.0 |
def prepare_model(model_name, model_path=DATA_JUICER_MODELS_CACHE):
"""
Prepare the specific model from model cache path or the remote oss
:param model_name: name of the quality classifier model
:param model_path: the path to store the model to be loaded
:return: a loaded PipelineModel
"""
u... |
Prepare the specific model from model cache path or the remote oss
:param model_name: name of the quality classifier model
:param model_path: the path to store the model to be loaded
:return: a loaded PipelineModel
| prepare_model | python | modelscope/data-juicer | tools/quality_classifier/qc_utils.py | https://github.com/modelscope/data-juicer/blob/master/tools/quality_classifier/qc_utils.py | Apache-2.0 |
def load_dataset(spark, ds_path, text_key='text', only_text=False):
"""
Load a single dataset using PySpark. Only support 'json', 'jsonl', or
'parquet' files for now
:param spark: spark session
:param ds_path: dataset path
:param text_key: the name of the column that stores the contents of texts... |
Load a single dataset using PySpark. Only support 'json', 'jsonl', or
'parquet' files for now
:param spark: spark session
:param ds_path: dataset path
:param text_key: the name of the column that stores the contents of texts
:param only_text: whether to load texts only and drop other columns.
... | load_dataset | python | modelscope/data-juicer | tools/quality_classifier/qc_utils.py | https://github.com/modelscope/data-juicer/blob/master/tools/quality_classifier/qc_utils.py | Apache-2.0 |
def load_datasets(spark,
ds_paths,
text_key='text',
label=None,
only_text=True):
"""
Load a list of datasets. Only support 'json', 'jsonl', or 'parquet' files
for now
:param spark: spark session
:param ds_paths: a list of datase... |
Load a list of datasets. Only support 'json', 'jsonl', or 'parquet' files
for now
:param spark: spark session
:param ds_paths: a list of datasets to be loaded.
:param text_key: the name of the column that stores the contents of texts
:param label: the label set to these datasets. Used in traini... | load_datasets | python | modelscope/data-juicer | tools/quality_classifier/qc_utils.py | https://github.com/modelscope/data-juicer/blob/master/tools/quality_classifier/qc_utils.py | Apache-2.0 |
def shuffle(df):
"""
Shuffle a data frame
:param df: input data frame
:return: shuffled data frame
"""
temp_df = df.withColumn('rand', rand(seed=42))
df_rnd = temp_df.orderBy(temp_df.rand)
return df_rnd.drop(df_rnd.rand) |
Shuffle a data frame
:param df: input data frame
:return: shuffled data frame
| shuffle | python | modelscope/data-juicer | tools/quality_classifier/qc_utils.py | https://github.com/modelscope/data-juicer/blob/master/tools/quality_classifier/qc_utils.py | Apache-2.0 |
def export_result(ds, res_path):
"""
Export a dataset to specified path. Only support 'json', 'jsonl', or
'parquet' export formats for now
:param ds: the dataset to be exported
:param res_path: the path to store the exported dataset
:return:
"""
logger.info(f'Exporting predicted result t... |
Export a dataset to specified path. Only support 'json', 'jsonl', or
'parquet' export formats for now
:param ds: the dataset to be exported
:param res_path: the path to store the exported dataset
:return:
| export_result | python | modelscope/data-juicer | tools/quality_classifier/qc_utils.py | https://github.com/modelscope/data-juicer/blob/master/tools/quality_classifier/qc_utils.py | Apache-2.0 |
def get_keep_method_udf(keep_method):
"""
Given the name of keep method, return a PySpark user-defined function of
this kind of keep method. Only support 'gpt3' or 'label' for now
:param keep_method: name of keep method
:return: a PySpark udf of specified keep method
"""
if keep_method == 'l... |
Given the name of keep method, return a PySpark user-defined function of
this kind of keep method. Only support 'gpt3' or 'label' for now
:param keep_method: name of keep method
:return: a PySpark udf of specified keep method
| get_keep_method_udf | python | modelscope/data-juicer | tools/quality_classifier/qc_utils.py | https://github.com/modelscope/data-juicer/blob/master/tools/quality_classifier/qc_utils.py | Apache-2.0 |
def tokenize_dataset(ds, tokenizer):
"""
Tokenize the texts in input dataset using specified tokenizer
:param ds: dataset to be tokenized
:param tokenizer: tokenizer used to tokenize texts
:return: a dataset with an extra column "words" that stores the tokenized
texts
"""
tkn = prepa... |
Tokenize the texts in input dataset using specified tokenizer
:param ds: dataset to be tokenized
:param tokenizer: tokenizer used to tokenize texts
:return: a dataset with an extra column "words" that stores the tokenized
texts
| tokenize_dataset | python | modelscope/data-juicer | tools/quality_classifier/qc_utils.py | https://github.com/modelscope/data-juicer/blob/master/tools/quality_classifier/qc_utils.py | Apache-2.0 |
def train(output_model_path, ds, tokenizer=None):
"""
Train a quality classifier with training dataset and export the trained
model to a specified path
:param output_model_path: the path to store the trained model
:param ds: training dataset
:param tokenizer: specified sentencepiece tokenizer. I... |
Train a quality classifier with training dataset and export the trained
model to a specified path
:param output_model_path: the path to store the trained model
:param ds: training dataset
:param tokenizer: specified sentencepiece tokenizer. It's None in default,
which means using the standa... | train | python | modelscope/data-juicer | tools/quality_classifier/qc_utils.py | https://github.com/modelscope/data-juicer/blob/master/tools/quality_classifier/qc_utils.py | Apache-2.0 |
def eval(model_path, ds, tokenizer=None):
"""
Evaluate a quality classifier model on specified dataset
:param model_path: the path to the model to be evaluated
:param ds: evaluation dataset
:param tokenizer: specified sentencepiece tokenizer. It's None in default,
which means using the stand... |
Evaluate a quality classifier model on specified dataset
:param model_path: the path to the model to be evaluated
:param ds: evaluation dataset
:param tokenizer: specified sentencepiece tokenizer. It's None in default,
which means using the standard Tokenizer in PySpark
:return:
| eval | python | modelscope/data-juicer | tools/quality_classifier/qc_utils.py | https://github.com/modelscope/data-juicer/blob/master/tools/quality_classifier/qc_utils.py | Apache-2.0 |
def predict(model, ds, tokenizer=None, keep_method='label'):
"""
Predict document scores for a dataset using a trained quality classifier
model
:param model: the model used to predict
:param ds: the dataset to be predicted
:param tokenizer: specified sentencepiece tokenizer. It's None in default... |
Predict document scores for a dataset using a trained quality classifier
model
:param model: the model used to predict
:param ds: the dataset to be predicted
:param tokenizer: specified sentencepiece tokenizer. It's None in default,
which means using the standard Tokenizer in PySpark
:p... | predict | python | modelscope/data-juicer | tools/quality_classifier/qc_utils.py | https://github.com/modelscope/data-juicer/blob/master/tools/quality_classifier/qc_utils.py | Apache-2.0 |
def main(positive_datasets,
negative_datasets,
output_model_path='my_quality_model',
num_training_samples=0,
train_test_split_ratio=0.8,
tokenizer=None,
evaluation=True,
text_key='text'):
"""
Train a quality classifier using your own pos/neg dataset... |
Train a quality classifier using your own pos/neg datasets
:param positive_datasets: the paths to the positive datasets. It could be a
string for a single dataset, e.g. 'pos.parquet', or a list of strings
for several datasets, e.g. '["pos1.parquet", "pos2.parquet"]'
:param negative_datasets... | main | python | modelscope/data-juicer | tools/quality_classifier/train.py | https://github.com/modelscope/data-juicer/blob/master/tools/quality_classifier/train.py | Apache-2.0 |
def change_iamgepath_markdown(file_path):
"""
change 
to 
"""
search_text = "images/"
replace_text = "../images/" + file_path.split('/')[-2] + "/"
print(search_text,replace_text, file_path)
with open(file_path, 'r', enc... |
change 
to 
| change_iamgepath_markdown | python | chenzomi12/AIInfra | build_books/create_dir.py | https://github.com/chenzomi12/AIInfra/blob/master/build_books/create_dir.py | Apache-2.0 |
def create_predictor_spec(
framework,
runtime_version,
resource_requests,
resource_limits,
storage_uri,
canary_traffic_percent,
service_account,
min_replicas,
max_replicas,
containers,
request_timeout,
):
"""
Create and return V1beta1PredictorSpec to be used in a V1be... |
Create and return V1beta1PredictorSpec to be used in a V1beta1InferenceServiceSpec
object.
| create_predictor_spec | python | kserve/kserve | docs/kfp/src/kservedeployer.py | https://github.com/kserve/kserve/blob/master/docs/kfp/src/kservedeployer.py | Apache-2.0 |
def create_custom_container_spec(custom_model_spec):
"""
Given a JSON container spec, return a V1Container object
representing the container. This is used for passing in
custom server images. The expected format for the input is:
{ "image": "test/containerimage",
"port":5000,
"name": "c... |
Given a JSON container spec, return a V1Container object
representing the container. This is used for passing in
custom server images. The expected format for the input is:
{ "image": "test/containerimage",
"port":5000,
"name": "custom-container" }
| create_custom_container_spec | python | kserve/kserve | docs/kfp/src/kservedeployer.py | https://github.com/kserve/kserve/blob/master/docs/kfp/src/kservedeployer.py | Apache-2.0 |
def submit_api_request(
kserve_client, action, name, isvc, namespace=None, watch=False, timeout_seconds=300
):
"""
Creates or updates a Kubernetes custom object. This code is borrowed from the
KServeClient.create/patch methods as using those directly doesn't allow for
sending in dicts as the Inferen... |
Creates or updates a Kubernetes custom object. This code is borrowed from the
KServeClient.create/patch methods as using those directly doesn't allow for
sending in dicts as the InferenceService object which is needed for supporting passing
in raw InferenceService serialized YAML.
| submit_api_request | python | kserve/kserve | docs/kfp/src/kservedeployer.py | https://github.com/kserve/kserve/blob/master/docs/kfp/src/kservedeployer.py | Apache-2.0 |
def perform_action(
action,
model_name,
model_uri,
canary_traffic_percent,
namespace,
framework,
runtime_version,
resource_requests,
resource_limits,
custom_model_spec,
service_account,
inferenceservice_yaml,
request_timeout,
autoscaling_target=0,
enable_istio... |
Perform the specified action. If the action is not 'delete' and `inferenceService_yaml`
was provided, the dict representation of the YAML will be sent directly to the
Kubernetes API. Otherwise, a V1beta1InferenceService object will be built using the
provided input and then sent for creation/update.
... | perform_action | python | kserve/kserve | docs/kfp/src/kservedeployer.py | https://github.com/kserve/kserve/blob/master/docs/kfp/src/kservedeployer.py | Apache-2.0 |
def main():
"""
This parses arguments passed in from the CLI and performs the corresponding action.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--action", type=str, help="Action to execute on KServe", default="create"
)
parser.add_argument(
"--model-name", ty... |
This parses arguments passed in from the CLI and performs the corresponding action.
| main | python | kserve/kserve | docs/kfp/src/kservedeployer.py | https://github.com/kserve/kserve/blob/master/docs/kfp/src/kservedeployer.py | Apache-2.0 |
def get_istio_auth_session(url: str, username: str, password: str) -> dict:
"""
Determine if the specified URL is secured by Dex and try to obtain a session cookie.
WARNING: only Dex `staticPasswords` and `LDAP` authentication are currently supported
(we default to using `staticPasswords` if bo... |
Determine if the specified URL is secured by Dex and try to obtain a session cookie.
WARNING: only Dex `staticPasswords` and `LDAP` authentication are currently supported
(we default to using `staticPasswords` if both are enabled)
:param url: Kubeflow server URL, including protocol
:param... | get_istio_auth_session | python | kserve/kserve | docs/samples/istio-dex/dex_auth.py | https://github.com/kserve/kserve/blob/master/docs/samples/istio-dex/dex_auth.py | Apache-2.0 |
def preprocess(self, requests):
"""Basic text preprocessing, based on the user's chocie of application mode.
Args:
requests (str): The Input data in the form of text is passed on to the preprocess
function.
Returns:
list : The preprocess function returns a lis... | Basic text preprocessing, based on the user's chocie of application mode.
Args:
requests (str): The Input data in the form of text is passed on to the preprocess
function.
Returns:
list : The preprocess function returns a list of Tensor for the size of the word tokens... | preprocess | python | kserve/kserve | docs/samples/v1beta1/torchserve/v2/bert/sequence_classification/Transformer_kserve_handler.py | https://github.com/kserve/kserve/blob/master/docs/samples/v1beta1/torchserve/v2/bert/sequence_classification/Transformer_kserve_handler.py | Apache-2.0 |
def get_insights(self, input_batch, text, target):
"""This function initialize and calls the layer integrated gradient to get word importance
of the input text if captum explanation has been selected through setup_config
Args:
input_batch (int): Batches of tokens IDs of text
... | This function initialize and calls the layer integrated gradient to get word importance
of the input text if captum explanation has been selected through setup_config
Args:
input_batch (int): Batches of tokens IDs of text
text (str): The Text specified in the input request
... | get_insights | python | kserve/kserve | docs/samples/v1beta1/torchserve/v2/bert/sequence_classification/Transformer_kserve_handler.py | https://github.com/kserve/kserve/blob/master/docs/samples/v1beta1/torchserve/v2/bert/sequence_classification/Transformer_kserve_handler.py | Apache-2.0 |
def _to_datatype(dtype: np.dtype) -> str:
"""
Converts numpy datatype to KServe datatype
"""
as_str = str(dtype)
datatype = _NumpyToDatatype[as_str]
return datatype |
Converts numpy datatype to KServe datatype
| _to_datatype | python | kserve/kserve | docs/samples/v1beta1/torchserve/v2/tensor_conv/utils.py | https://github.com/kserve/kserve/blob/master/docs/samples/v1beta1/torchserve/v2/tensor_conv/utils.py | Apache-2.0 |
def check_image_with_pil(path):
"""
Check if input file is an image
"""
try:
Image.open(path)
except IOError:
return False
return True |
Check if input file is an image
| check_image_with_pil | python | kserve/kserve | docs/samples/v1beta1/torchserve/v2/tensor_conv/utils.py | https://github.com/kserve/kserve/blob/master/docs/samples/v1beta1/torchserve/v2/tensor_conv/utils.py | Apache-2.0 |
def __init__(
self,
name: str,
predictor_host: str,
protocol: str,
feast_serving_url: str,
entity_id_name: str,
feature_refs: List[str],
):
"""Initialize the model name, predictor host, Feast serving URL,
entity IDs, and feature references
... | Initialize the model name, predictor host, Feast serving URL,
entity IDs, and feature references
Args:
name (str): Name of the model.
predictor_host (str): The host in which the predictor runs.
protocol (str): The protocol in which the predictor runs.
... | __init__ | python | kserve/kserve | docs/samples/v1beta1/transformer/feast/driver_transformer/driver_transformer/driver_transformer.py | https://github.com/kserve/kserve/blob/master/docs/samples/v1beta1/transformer/feast/driver_transformer/driver_transformer/driver_transformer.py | Apache-2.0 |
def buildEntityRow(self, inputs) -> Dict:
"""Build an entity row and return it as a dict.
Args:
inputs (Dict): entity ids to identify unique entities
Returns:
Dict: Returns the entity id attributes as an entity row
"""
entity_rows = {}
entity_id... | Build an entity row and return it as a dict.
Args:
inputs (Dict): entity ids to identify unique entities
Returns:
Dict: Returns the entity id attributes as an entity row
| buildEntityRow | python | kserve/kserve | docs/samples/v1beta1/transformer/feast/driver_transformer/driver_transformer/driver_transformer.py | https://github.com/kserve/kserve/blob/master/docs/samples/v1beta1/transformer/feast/driver_transformer/driver_transformer/driver_transformer.py | Apache-2.0 |
def buildPredictRequest(self, inputs, features) -> Dict:
"""Build the predict request for all entities and return it as a dict.
Args:
inputs (Dict): entity ids from http request
features (Dict): entity features extracted from the feature store
Returns:
Dict:... | Build the predict request for all entities and return it as a dict.
Args:
inputs (Dict): entity ids from http request
features (Dict): entity features extracted from the feature store
Returns:
Dict: Returns the entity ids with features
| buildPredictRequest | python | kserve/kserve | docs/samples/v1beta1/transformer/feast/driver_transformer/driver_transformer/driver_transformer.py | https://github.com/kserve/kserve/blob/master/docs/samples/v1beta1/transformer/feast/driver_transformer/driver_transformer/driver_transformer.py | Apache-2.0 |
def preprocess(
self, inputs: Union[Dict, InferRequest], headers: Dict[str, str] = None
) -> Union[Dict, InferRequest]:
"""Pre-process activity of the driver input data.
Args:
inputs (Dict|CloudEvent|InferRequest): Body of the request, v2 endpoints pass InferRequest.
... | Pre-process activity of the driver input data.
Args:
inputs (Dict|CloudEvent|InferRequest): Body of the request, v2 endpoints pass InferRequest.
headers (Dict): Request headers.
Returns:
Dict|InferRequest: Transformed inputs to ``predict`` handler or return InferReq... | preprocess | python | kserve/kserve | docs/samples/v1beta1/transformer/feast/driver_transformer/driver_transformer/driver_transformer.py | https://github.com/kserve/kserve/blob/master/docs/samples/v1beta1/transformer/feast/driver_transformer/driver_transformer/driver_transformer.py | Apache-2.0 |
def postprocess(
self,
response: Union[Dict, InferResponse, ModelInferResponse],
headers: Dict[str, str] = None,
) -> Union[Dict, ModelInferResponse]:
"""Post process function of the driver ranking output data. Here we
simply pass the raw rankings through. Convert gRPC respon... | Post process function of the driver ranking output data. Here we
simply pass the raw rankings through. Convert gRPC response if needed.
Args:
response (Dict|InferResponse|ModelInferResponse): The response passed from ``predict`` handler.
headers (Dict): Request headers.
... | postprocess | python | kserve/kserve | docs/samples/v1beta1/transformer/feast/driver_transformer/driver_transformer/driver_transformer.py | https://github.com/kserve/kserve/blob/master/docs/samples/v1beta1/transformer/feast/driver_transformer/driver_transformer/driver_transformer.py | Apache-2.0 |
def convert_doc_tokens(paragraph_text):
"""Return the list of tokens from the doc text"""
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
doc_tokens = []
prev_is_whitespace = True
for c in paragraph_... | Return the list of tokens from the doc text | convert_doc_tokens | python | kserve/kserve | python/custom_tokenizer/data_processing.py | https://github.com/kserve/kserve/blob/master/python/custom_tokenizer/data_processing.py | Apache-2.0 |
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a ga... | Check if this is the 'max context' doc span for the token. | _check_is_max_context | python | kserve/kserve | python/custom_tokenizer/data_processing.py | https://github.com/kserve/kserve/blob/master/python/custom_tokenizer/data_processing.py | Apache-2.0 |
def convert_examples_to_features(
doc_tokens, question_text, tokenizer, max_seq_length, doc_stride, max_query_length
):
"""Loads a data file into a list of `InputBatch`s."""
query_tokens = tokenizer.tokenize(question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:... | Loads a data file into a list of `InputBatch`s. | convert_examples_to_features | python | kserve/kserve | python/custom_tokenizer/data_processing.py | https://github.com/kserve/kserve/blob/master/python/custom_tokenizer/data_processing.py | Apache-2.0 |
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_an... | Get the n-best logits from a list. | _get_best_indexes | python | kserve/kserve | python/custom_tokenizer/data_processing.py | https://github.com/kserve/kserve/blob/master/python/custom_tokenizer/data_processing.py | Apache-2.0 |
def get_final_text(pred_text, orig_text, do_lower_case):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the sp... | Project the tokenized prediction back to the original text. | get_final_text | python | kserve/kserve | python/custom_tokenizer/data_processing.py | https://github.com/kserve/kserve/blob/master/python/custom_tokenizer/data_processing.py | Apache-2.0 |
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
... | Compute softmax probability over raw logits. | _compute_softmax | python | kserve/kserve | python/custom_tokenizer/data_processing.py | https://github.com/kserve/kserve/blob/master/python/custom_tokenizer/data_processing.py | Apache-2.0 |
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
"""Checks whether the casing config is consistent with the checkpoint name."""
# The casing has to be passed in by the user and there is no explicit check
# as to whether it matches the checkpoint. The casing information probably
# s... | Checks whether the casing config is consistent with the checkpoint name. | validate_case_matches_checkpoint | python | kserve/kserve | python/custom_tokenizer/tokenization.py | https://github.com/kserve/kserve/blob/master/python/custom_tokenizer/tokenization.py | Apache-2.0 |
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Un... | Converts `text` to Unicode (if it's not already), assuming utf-8 input. | convert_to_unicode | python | kserve/kserve | python/custom_tokenizer/tokenization.py | https://github.com/kserve/kserve/blob/master/python/custom_tokenizer/tokenization.py | Apache-2.0 |
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text... | Returns text encoded in a way suitable for print or `tf.logging`. | printable_text | python | kserve/kserve | python/custom_tokenizer/tokenization.py | https://github.com/kserve/kserve/blob/master/python/custom_tokenizer/tokenization.py | Apache-2.0 |
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output | Converts a sequence of [tokens|ids] using the vocab. | convert_by_vocab | python | kserve/kserve | python/custom_tokenizer/tokenization.py | https://github.com/kserve/kserve/blob/master/python/custom_tokenizer/tokenization.py | Apache-2.0 |
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens | Runs basic whitespace cleaning and splitting on a piece of text. | whitespace_tokenize | python | kserve/kserve | python/custom_tokenizer/tokenization.py | https://github.com/kserve/kserve/blob/master/python/custom_tokenizer/tokenization.py | Apache-2.0 |
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
... | Strips accents from a piece of text. | _run_strip_accents | python | kserve/kserve | python/custom_tokenizer/tokenization.py | https://github.com/kserve/kserve/blob/master/python/custom_tokenizer/tokenization.py | Apache-2.0 |
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
... | Splits punctuation on a piece of text. | _run_split_on_punc | python | kserve/kserve | python/custom_tokenizer/tokenization.py | https://github.com/kserve/kserve/blob/master/python/custom_tokenizer/tokenization.py | Apache-2.0 |
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
... | Adds whitespace around any CJK character. | _tokenize_chinese_chars | python | kserve/kserve | python/custom_tokenizer/tokenization.py | https://github.com/kserve/kserve/blob/master/python/custom_tokenizer/tokenization.py | Apache-2.0 |
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is ... | Checks whether CP is the codepoint of a CJK character. | _is_chinese_char | python | kserve/kserve | python/custom_tokenizer/tokenization.py | https://github.com/kserve/kserve/blob/master/python/custom_tokenizer/tokenization.py | Apache-2.0 |
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xFFFD or _is_control(char):
continue
if _is_whitespace(char):
... | Performs invalid character removal and whitespace cleanup on text. | _clean_text | python | kserve/kserve | python/custom_tokenizer/tokenization.py | https://github.com/kserve/kserve/blob/master/python/custom_tokenizer/tokenization.py | Apache-2.0 |
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
... | Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespa... | tokenize | python | kserve/kserve | python/custom_tokenizer/tokenization.py | https://github.com/kserve/kserve/blob/master/python/custom_tokenizer/tokenization.py | Apache-2.0 |
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically control characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat... | Checks whether `chars` is a whitespace character. | _is_whitespace | python | kserve/kserve | python/custom_tokenizer/tokenization.py | https://github.com/kserve/kserve/blob/master/python/custom_tokenizer/tokenization.py | Apache-2.0 |
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
... | Checks whether `chars` is a control character. | _is_control | python | kserve/kserve | python/custom_tokenizer/tokenization.py | https://github.com/kserve/kserve/blob/master/python/custom_tokenizer/tokenization.py | Apache-2.0 |
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.... | Checks whether `chars` is a punctuation character. | _is_punctuation | python | kserve/kserve | python/custom_tokenizer/tokenization.py | https://github.com/kserve/kserve/blob/master/python/custom_tokenizer/tokenization.py | Apache-2.0 |
def image_transform(model_name, data):
"""converts the input image of Bytes Array into Tensor
Args:
model_name: The model name
data: The input image bytes.
Returns:
numpy.array: Returns the numpy array after the image preprocessing.
"""
preprocess = transforms.Compose(
... | converts the input image of Bytes Array into Tensor
Args:
model_name: The model name
data: The input image bytes.
Returns:
numpy.array: Returns the numpy array after the image preprocessing.
| image_transform | python | kserve/kserve | python/custom_transformer/model.py | https://github.com/kserve/kserve/blob/master/python/custom_transformer/model.py | Apache-2.0 |
def image_transform(data):
"""converts the input image of Bytes Array into Tensor
Args:
request input instance: The request input instance for image.
Returns:
List: Returns the data key's value and converts that into a list
after converting it into a tensor
"""
preprocess = t... | converts the input image of Bytes Array into Tensor
Args:
request input instance: The request input instance for image.
Returns:
List: Returns the data key's value and converts that into a list
after converting it into a tensor
| image_transform | python | kserve/kserve | python/custom_transformer/model_grpc.py | https://github.com/kserve/kserve/blob/master/python/custom_transformer/model_grpc.py | Apache-2.0 |
def on_finalized_text(self, text: str, stream_end: bool = False):
"""Put the new text in the queue. If the stream is ending, also put a stop signal in the queue."""
self.text_queue.put_nowait(text)
if stream_end:
self.text_queue.put_nowait(self.stop_signal) | Put the new text in the queue. If the stream is ending, also put a stop signal in the queue. | on_finalized_text | python | kserve/kserve | python/huggingfaceserver/huggingfaceserver/async_generate_stream.py | https://github.com/kserve/kserve/blob/master/python/huggingfaceserver/huggingfaceserver/async_generate_stream.py | Apache-2.0 |
def _process_requests(self):
"""
Process requests from the request queue in a background thread.
This ensures we don't block the event loop while running generation.
"""
while True:
req = self._request_queue.get()
# If request is None we should stop proce... |
Process requests from the request queue in a background thread.
This ensures we don't block the event loop while running generation.
| _process_requests | python | kserve/kserve | python/huggingfaceserver/huggingfaceserver/generative_model.py | https://github.com/kserve/kserve/blob/master/python/huggingfaceserver/huggingfaceserver/generative_model.py | Apache-2.0 |
def _submit_request(
self,
kwargs: Dict[str, Any],
request: CompletionRequest,
context: Dict[str, Any],
) -> asyncio.Queue:
"""
Add a request to the request queue to be processed. Results for this request
will be pushed to the returned async queue.
"""... |
Add a request to the request queue to be processed. Results for this request
will be pushed to the returned async queue.
| _submit_request | python | kserve/kserve | python/huggingfaceserver/huggingfaceserver/generative_model.py | https://github.com/kserve/kserve/blob/master/python/huggingfaceserver/huggingfaceserver/generative_model.py | Apache-2.0 |
def validate_supported_completion_params(self, request: CompletionRequest):
"""
Check that only support params have been provided
"""
if request.frequency_penalty is not None and request.frequency_penalty > 0:
raise OpenAIError("'frequency_penalty' is not supported")
... |
Check that only support params have been provided
| validate_supported_completion_params | python | kserve/kserve | python/huggingfaceserver/huggingfaceserver/generative_model.py | https://github.com/kserve/kserve/blob/master/python/huggingfaceserver/huggingfaceserver/generative_model.py | Apache-2.0 |
def apply_chat_template(
self,
request: ChatCompletionRequest,
) -> (
ChatPrompt
): # TODO: Does not supprot multi-modal, also does not solve mistral tokenizer issue.
"""
Given a list of chat completion messages, convert them to a prompt.
"""
conversation... |
Given a list of chat completion messages, convert them to a prompt.
| apply_chat_template | python | kserve/kserve | python/huggingfaceserver/huggingfaceserver/generative_model.py | https://github.com/kserve/kserve/blob/master/python/huggingfaceserver/huggingfaceserver/generative_model.py | Apache-2.0 |
def _get_and_verify_max_len(
hf_config: PretrainedConfig,
max_model_len: Optional[int],
disable_sliding_window: bool = False,
sliding_window_len: Optional[Union[int, List[Optional[int]]]] = None,
spec_target_max_model_len: Optional[int] = None,
encoder_config: Optional[Any] = None,
) -> int:
... | Get and verify the model's maximum length. | _get_and_verify_max_len | python | kserve/kserve | python/huggingfaceserver/huggingfaceserver/utils.py | https://github.com/kserve/kserve/blob/master/python/huggingfaceserver/huggingfaceserver/utils.py | Apache-2.0 |
def _mean_pooling(token_embeddings, attention_mask):
"""
Take attention mask into account for correct averaging.
This implementation is taken from the sentence-transformers library:
https://github.com/UKPLab/sentence-transformers/blob/f012ab33189d23cef0dd00df7c5642ebb0bac2d4/sentence_transformers/model... |
Take attention mask into account for correct averaging.
This implementation is taken from the sentence-transformers library:
https://github.com/UKPLab/sentence-transformers/blob/f012ab33189d23cef0dd00df7c5642ebb0bac2d4/sentence_transformers/model_card_templates.py#L136-L146
| _mean_pooling | python | kserve/kserve | python/huggingfaceserver/huggingfaceserver/utils.py | https://github.com/kserve/kserve/blob/master/python/huggingfaceserver/huggingfaceserver/utils.py | Apache-2.0 |
async def build_async_engine_client_from_engine_args(
engine_args: AsyncEngineArgs,
disable_frontend_multiprocessing: bool = False,
) -> AsyncIterator[EngineClient]:
"""
Create EngineClient, either:
- V1 AsyncLLM (default)
- V0 AsyncLLMEngine (legacy)
Returns the Client or None if t... |
Create EngineClient, either:
- V1 AsyncLLM (default)
- V0 AsyncLLMEngine (legacy)
Returns the Client or None if the creation failed.
| build_async_engine_client_from_engine_args | python | kserve/kserve | python/huggingfaceserver/huggingfaceserver/vllm/utils.py | https://github.com/kserve/kserve/blob/master/python/huggingfaceserver/huggingfaceserver/vllm/utils.py | Apache-2.0 |
async def test_parallel_streaming(client: openai.AsyncOpenAI, model_name: str):
"""Streaming for parallel sampling.
The tokens from multiple samples, are flattened into a single stream,
with an index to indicate which sample the token belongs to.
"""
prompt = "What is an LLM?"
n = 3
max_tok... | Streaming for parallel sampling.
The tokens from multiple samples, are flattened into a single stream,
with an index to indicate which sample the token belongs to.
| test_parallel_streaming | python | kserve/kserve | python/huggingfaceserver/tests/test_vllm_generative.py | https://github.com/kserve/kserve/blob/master/python/huggingfaceserver/tests/test_vllm_generative.py | Apache-2.0 |
def pool(self):
"""Create thread pool on first request
avoids instantiating unused threadpool for blocking clients.
"""
if self._pool is None:
atexit.register(self.close)
self._pool = ThreadPool(self.pool_threads)
return self._pool | Create thread pool on first request
avoids instantiating unused threadpool for blocking clients.
| pool | python | kserve/kserve | python/kserve/kserve/api_client.py | https://github.com/kserve/kserve/blob/master/python/kserve/kserve/api_client.py | Apache-2.0 |
def sanitize_for_serialization(self, obj):
"""Builds a JSON POST object.
If obj is None, return None.
If obj is str, int, long, float, bool, return directly.
If obj is datetime.datetime, datetime.date
convert to string in iso8601 format.
If obj is list, sanitize each... | Builds a JSON POST object.
If obj is None, return None.
If obj is str, int, long, float, bool, return directly.
If obj is datetime.datetime, datetime.date
convert to string in iso8601 format.
If obj is list, sanitize each element in the list.
If obj is dict, return t... | sanitize_for_serialization | python | kserve/kserve | python/kserve/kserve/api_client.py | https://github.com/kserve/kserve/blob/master/python/kserve/kserve/api_client.py | Apache-2.0 |
Subsets and Splits
Django Code with Docstrings
Filters Python code examples from Django repository that contain Django-related code, helping identify relevant code snippets for understanding Django framework usage patterns.
SQL Console for Shuu12121/python-treesitter-filtered-datasetsV2
Retrieves specific code examples from the Flask repository but doesn't provide meaningful analysis or patterns beyond basic data retrieval.
HTTPX Repo Code and Docstrings
Retrieves specific code examples from the httpx repository, which is useful for understanding how particular libraries are used but doesn't provide broader analytical insights about the dataset.
Requests Repo Docstrings & Code
Retrieves code examples with their docstrings and file paths from the requests repository, providing basic filtering but limited analytical value beyond finding specific code samples.
Quart Repo Docstrings & Code
Retrieves code examples with their docstrings from the Quart repository, providing basic code samples but offering limited analytical value for understanding broader patterns or relationships in the dataset.