body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
c0c2eae7054cb1816a32c69039498bf23b980ad9bbbbba7a6b2ab1a841891c39
@app.route('/predict', methods=['POST']) def predict_medicine(): '\n Return prediction about medicine in image.\n\n Request Format:\n Multipart Form Request with image to run inference sent with key "image"\n\n Response Format:\n JSON Response\n {\n "authentic" : <bool>,\n "name" : <medicine name text>\n "description" : <text description about the medicine>\n }\n ' filestr = request.files['medicine_image'].read() npimg = numpy.fromstring(filestr, numpy.uint8) img = cv2.imdecode(npimg, cv2.IMREAD_UNCHANGED) (authentic, med_name, med_description) = infer_medicine(img) return jsonify({'authentic': authentic, 'name': med_name, 'description': med_description})
Return prediction about medicine in image. Request Format: Multipart Form Request with image to run inference sent with key "image" Response Format: JSON Response { "authentic" : <bool>, "name" : <medicine name text> "description" : <text description about the medicine> }
fake/server.py
predict_medicine
PriyanshuRj/BrigAID
1
python
@app.route('/predict', methods=['POST']) def predict_medicine(): '\n Return prediction about medicine in image.\n\n Request Format:\n Multipart Form Request with image to run inference sent with key "image"\n\n Response Format:\n JSON Response\n {\n "authentic" : <bool>,\n "name" : <medicine name text>\n "description" : <text description about the medicine>\n }\n ' filestr = request.files['medicine_image'].read() npimg = numpy.fromstring(filestr, numpy.uint8) img = cv2.imdecode(npimg, cv2.IMREAD_UNCHANGED) (authentic, med_name, med_description) = infer_medicine(img) return jsonify({'authentic': authentic, 'name': med_name, 'description': med_description})
@app.route('/predict', methods=['POST']) def predict_medicine(): '\n Return prediction about medicine in image.\n\n Request Format:\n Multipart Form Request with image to run inference sent with key "image"\n\n Response Format:\n JSON Response\n {\n "authentic" : <bool>,\n "name" : <medicine name text>\n "description" : <text description about the medicine>\n }\n ' filestr = request.files['medicine_image'].read() npimg = numpy.fromstring(filestr, numpy.uint8) img = cv2.imdecode(npimg, cv2.IMREAD_UNCHANGED) (authentic, med_name, med_description) = infer_medicine(img) return jsonify({'authentic': authentic, 'name': med_name, 'description': med_description})<|docstring|>Return prediction about medicine in image. Request Format: Multipart Form Request with image to run inference sent with key "image" Response Format: JSON Response { "authentic" : <bool>, "name" : <medicine name text> "description" : <text description about the medicine> }<|endoftext|>
6c0c5dc478a4fee779a4ada7c7a37915686b8646ff507069ac0ad7466a66be79
def test_part1() -> None: '\n Examples for Part 1.\n ' assert (manhattan_distance((0, 0, 0, 0), (3, 0, 0, 0)) == 3) assert (manhattan_distance((9, 0, 0, 0), (0, 3, 0, 0)) == 12) test_input = '\n'.join(('0,0,0,0', '3,0,0,0', '0,3,0,0', '0,0,3,0', '0,0,0,3', '0,0,0,6', '9,0,0,0', '12,0,0,0')) test1 = ((0, 0, 0, 0), (3, 0, 0, 0), (0, 3, 0, 0), (0, 0, 3, 0), (0, 0, 0, 3), (0, 0, 0, 6), (9, 0, 0, 0), (12, 0, 0, 0)) assert (read_points(test_input) == test1) conn_map = connection_map(test1) assert (all_reachable(conn_map, (0, 0, 0, 0)) == {(0, 0, 0, 0), (3, 0, 0, 0), (0, 3, 0, 0), (0, 0, 3, 0), (0, 0, 0, 3), (0, 0, 0, 6)}) assert (all_reachable(conn_map, (9, 0, 0, 0)) == {(9, 0, 0, 0), (12, 0, 0, 0)}) assert (constellations(test1) == 2) test2 = (((- 1), 2, 2, 0), (0, 0, 2, (- 2)), (0, 0, 0, (- 2)), ((- 1), 2, 0, 0), ((- 2), (- 2), (- 2), 2), (3, 0, 2, (- 1)), ((- 1), 3, 2, 2), ((- 1), 0, (- 1), 0), (0, 2, 1, (- 2)), (3, 0, 0, 0)) assert (constellations(test2) == 4)
Examples for Part 1.
2018/day25.py
test_part1
andypymont/adventofcode
0
python
def test_part1() -> None: '\n \n ' assert (manhattan_distance((0, 0, 0, 0), (3, 0, 0, 0)) == 3) assert (manhattan_distance((9, 0, 0, 0), (0, 3, 0, 0)) == 12) test_input = '\n'.join(('0,0,0,0', '3,0,0,0', '0,3,0,0', '0,0,3,0', '0,0,0,3', '0,0,0,6', '9,0,0,0', '12,0,0,0')) test1 = ((0, 0, 0, 0), (3, 0, 0, 0), (0, 3, 0, 0), (0, 0, 3, 0), (0, 0, 0, 3), (0, 0, 0, 6), (9, 0, 0, 0), (12, 0, 0, 0)) assert (read_points(test_input) == test1) conn_map = connection_map(test1) assert (all_reachable(conn_map, (0, 0, 0, 0)) == {(0, 0, 0, 0), (3, 0, 0, 0), (0, 3, 0, 0), (0, 0, 3, 0), (0, 0, 0, 3), (0, 0, 0, 6)}) assert (all_reachable(conn_map, (9, 0, 0, 0)) == {(9, 0, 0, 0), (12, 0, 0, 0)}) assert (constellations(test1) == 2) test2 = (((- 1), 2, 2, 0), (0, 0, 2, (- 2)), (0, 0, 0, (- 2)), ((- 1), 2, 0, 0), ((- 2), (- 2), (- 2), 2), (3, 0, 2, (- 1)), ((- 1), 3, 2, 2), ((- 1), 0, (- 1), 0), (0, 2, 1, (- 2)), (3, 0, 0, 0)) assert (constellations(test2) == 4)
def test_part1() -> None: '\n \n ' assert (manhattan_distance((0, 0, 0, 0), (3, 0, 0, 0)) == 3) assert (manhattan_distance((9, 0, 0, 0), (0, 3, 0, 0)) == 12) test_input = '\n'.join(('0,0,0,0', '3,0,0,0', '0,3,0,0', '0,0,3,0', '0,0,0,3', '0,0,0,6', '9,0,0,0', '12,0,0,0')) test1 = ((0, 0, 0, 0), (3, 0, 0, 0), (0, 3, 0, 0), (0, 0, 3, 0), (0, 0, 0, 3), (0, 0, 0, 6), (9, 0, 0, 0), (12, 0, 0, 0)) assert (read_points(test_input) == test1) conn_map = connection_map(test1) assert (all_reachable(conn_map, (0, 0, 0, 0)) == {(0, 0, 0, 0), (3, 0, 0, 0), (0, 3, 0, 0), (0, 0, 3, 0), (0, 0, 0, 3), (0, 0, 0, 6)}) assert (all_reachable(conn_map, (9, 0, 0, 0)) == {(9, 0, 0, 0), (12, 0, 0, 0)}) assert (constellations(test1) == 2) test2 = (((- 1), 2, 2, 0), (0, 0, 2, (- 2)), (0, 0, 0, (- 2)), ((- 1), 2, 0, 0), ((- 2), (- 2), (- 2), 2), (3, 0, 2, (- 1)), ((- 1), 3, 2, 2), ((- 1), 0, (- 1), 0), (0, 2, 1, (- 2)), (3, 0, 0, 0)) assert (constellations(test2) == 4)<|docstring|>Examples for Part 1.<|endoftext|>
b4c6bdc1286dc6d2067ac0b6392a84427680351d0f1284df89fb0f7bf6ddc827
def main() -> None: '\n Calculate and output the solutions based on the real puzzle input.\n ' data = aocd.get_data(year=2018, day=25) points = read_points(data) print(f'Part 1: {constellations(points)}')
Calculate and output the solutions based on the real puzzle input.
2018/day25.py
main
andypymont/adventofcode
0
python
def main() -> None: '\n \n ' data = aocd.get_data(year=2018, day=25) points = read_points(data) print(f'Part 1: {constellations(points)}')
def main() -> None: '\n \n ' data = aocd.get_data(year=2018, day=25) points = read_points(data) print(f'Part 1: {constellations(points)}')<|docstring|>Calculate and output the solutions based on the real puzzle input.<|endoftext|>
f65186f5fbd3373e04237f78714ffa9a94b024cb741c8bfa70e49cedebb1f8bc
def create_tarfile(source_dir: str, output_filename: str='zipped.tar.gz', exclude_function: Optional[Callable[([tarfile.TarInfo], Optional[tarfile.TarInfo])]]=None) -> None: 'Create a compressed representation of source_dir.\n\n Args:\n source_dir: Path to source dir.\n output_filename: Name of outputted gz.\n exclude_function: Function that determines whether to exclude file.\n ' if (exclude_function is None): def exclude_function(tarinfo: tarfile.TarInfo) -> Optional[tarfile.TarInfo]: 'Exclude files from tar.\n\n Args:\n tarinfo: Any\n\n Returns:\n tarinfo required for exclude.\n ' filename = tarinfo.name if (('.zenml/' in filename) or ('venv/' in filename)): return None else: return tarinfo with tarfile.open(output_filename, 'w:gz') as tar: tar.add(source_dir, arcname='', filter=exclude_function)
Create a compressed representation of source_dir. Args: source_dir: Path to source dir. output_filename: Name of outputted gz. exclude_function: Function that determines whether to exclude file.
src/zenml/io/utils.py
create_tarfile
Ankur3107/zenml
1
python
def create_tarfile(source_dir: str, output_filename: str='zipped.tar.gz', exclude_function: Optional[Callable[([tarfile.TarInfo], Optional[tarfile.TarInfo])]]=None) -> None: 'Create a compressed representation of source_dir.\n\n Args:\n source_dir: Path to source dir.\n output_filename: Name of outputted gz.\n exclude_function: Function that determines whether to exclude file.\n ' if (exclude_function is None): def exclude_function(tarinfo: tarfile.TarInfo) -> Optional[tarfile.TarInfo]: 'Exclude files from tar.\n\n Args:\n tarinfo: Any\n\n Returns:\n tarinfo required for exclude.\n ' filename = tarinfo.name if (('.zenml/' in filename) or ('venv/' in filename)): return None else: return tarinfo with tarfile.open(output_filename, 'w:gz') as tar: tar.add(source_dir, arcname=, filter=exclude_function)
def create_tarfile(source_dir: str, output_filename: str='zipped.tar.gz', exclude_function: Optional[Callable[([tarfile.TarInfo], Optional[tarfile.TarInfo])]]=None) -> None: 'Create a compressed representation of source_dir.\n\n Args:\n source_dir: Path to source dir.\n output_filename: Name of outputted gz.\n exclude_function: Function that determines whether to exclude file.\n ' if (exclude_function is None): def exclude_function(tarinfo: tarfile.TarInfo) -> Optional[tarfile.TarInfo]: 'Exclude files from tar.\n\n Args:\n tarinfo: Any\n\n Returns:\n tarinfo required for exclude.\n ' filename = tarinfo.name if (('.zenml/' in filename) or ('venv/' in filename)): return None else: return tarinfo with tarfile.open(output_filename, 'w:gz') as tar: tar.add(source_dir, arcname=, filter=exclude_function)<|docstring|>Create a compressed representation of source_dir. Args: source_dir: Path to source dir. output_filename: Name of outputted gz. exclude_function: Function that determines whether to exclude file.<|endoftext|>
0d3612760a793d292a2a4d33ecee311b6f1b13c01f3e159c030714f9b9247860
def extract_tarfile(source_tar: str, output_dir: str) -> None: 'Extracts all files in a compressed tar file to output_dir.\n\n Args:\n source_tar: Path to a tar compressed file.\n output_dir: Directory where to extract.\n ' if is_remote(source_tar): raise NotImplementedError('Use local tars for now.') with tarfile.open(source_tar, 'r:gz') as tar: tar.extractall(output_dir)
Extracts all files in a compressed tar file to output_dir. Args: source_tar: Path to a tar compressed file. output_dir: Directory where to extract.
src/zenml/io/utils.py
extract_tarfile
Ankur3107/zenml
1
python
def extract_tarfile(source_tar: str, output_dir: str) -> None: 'Extracts all files in a compressed tar file to output_dir.\n\n Args:\n source_tar: Path to a tar compressed file.\n output_dir: Directory where to extract.\n ' if is_remote(source_tar): raise NotImplementedError('Use local tars for now.') with tarfile.open(source_tar, 'r:gz') as tar: tar.extractall(output_dir)
def extract_tarfile(source_tar: str, output_dir: str) -> None: 'Extracts all files in a compressed tar file to output_dir.\n\n Args:\n source_tar: Path to a tar compressed file.\n output_dir: Directory where to extract.\n ' if is_remote(source_tar): raise NotImplementedError('Use local tars for now.') with tarfile.open(source_tar, 'r:gz') as tar: tar.extractall(output_dir)<|docstring|>Extracts all files in a compressed tar file to output_dir. Args: source_tar: Path to a tar compressed file. output_dir: Directory where to extract.<|endoftext|>
453a2f67a8a7ae300a29e1b92e1e01ead44139291d4d329ee1c0a6af344420e0
def get_global_config_directory() -> str: 'Returns the global config directory for ZenML.' return click.get_app_dir(APP_NAME)
Returns the global config directory for ZenML.
src/zenml/io/utils.py
get_global_config_directory
Ankur3107/zenml
1
python
def get_global_config_directory() -> str: return click.get_app_dir(APP_NAME)
def get_global_config_directory() -> str: return click.get_app_dir(APP_NAME)<|docstring|>Returns the global config directory for ZenML.<|endoftext|>
2479616ce7b8412c6b3739d62b7ad4635f347e13ebeb6dd68925fe48b15845b8
def write_file_contents_as_string(file_path: str, content: str) -> None: 'Writes contents of file.\n\n Args:\n file_path: Path to file.\n content: Contents of file.\n ' with open(file_path, 'w') as f: f.write(content)
Writes contents of file. Args: file_path: Path to file. content: Contents of file.
src/zenml/io/utils.py
write_file_contents_as_string
Ankur3107/zenml
1
python
def write_file_contents_as_string(file_path: str, content: str) -> None: 'Writes contents of file.\n\n Args:\n file_path: Path to file.\n content: Contents of file.\n ' with open(file_path, 'w') as f: f.write(content)
def write_file_contents_as_string(file_path: str, content: str) -> None: 'Writes contents of file.\n\n Args:\n file_path: Path to file.\n content: Contents of file.\n ' with open(file_path, 'w') as f: f.write(content)<|docstring|>Writes contents of file. Args: file_path: Path to file. content: Contents of file.<|endoftext|>
9fc8a99baad7b37ea5f12379b185d086f0d460f65b0198785a4c61cb3544532f
def read_file_contents_as_string(file_path: str) -> str: 'Reads contents of file.\n\n Args:\n file_path: Path to file.\n ' if (not file_exists(file_path)): raise FileNotFoundError(f'{file_path} does not exist!') return open(file_path).read()
Reads contents of file. Args: file_path: Path to file.
src/zenml/io/utils.py
read_file_contents_as_string
Ankur3107/zenml
1
python
def read_file_contents_as_string(file_path: str) -> str: 'Reads contents of file.\n\n Args:\n file_path: Path to file.\n ' if (not file_exists(file_path)): raise FileNotFoundError(f'{file_path} does not exist!') return open(file_path).read()
def read_file_contents_as_string(file_path: str) -> str: 'Reads contents of file.\n\n Args:\n file_path: Path to file.\n ' if (not file_exists(file_path)): raise FileNotFoundError(f'{file_path} does not exist!') return open(file_path).read()<|docstring|>Reads contents of file. Args: file_path: Path to file.<|endoftext|>
bf805c050f774054406f1d15ea00c60596ee115669c1baa50a4b3f5b33487393
def is_gcs_path(path: str) -> bool: 'Returns True if path is on Google Cloud Storage.\n\n Args:\n path: Any path as a string.\n\n Returns:\n True if gcs path, else False.\n ' return path.startswith('gs://')
Returns True if path is on Google Cloud Storage. Args: path: Any path as a string. Returns: True if gcs path, else False.
src/zenml/io/utils.py
is_gcs_path
Ankur3107/zenml
1
python
def is_gcs_path(path: str) -> bool: 'Returns True if path is on Google Cloud Storage.\n\n Args:\n path: Any path as a string.\n\n Returns:\n True if gcs path, else False.\n ' return path.startswith('gs://')
def is_gcs_path(path: str) -> bool: 'Returns True if path is on Google Cloud Storage.\n\n Args:\n path: Any path as a string.\n\n Returns:\n True if gcs path, else False.\n ' return path.startswith('gs://')<|docstring|>Returns True if path is on Google Cloud Storage. Args: path: Any path as a string. Returns: True if gcs path, else False.<|endoftext|>
306d018e419eefb4e0712af1557a288c4803ca5e4b2e138e3c4c8048e29cadbc
def exclude_function(tarinfo: tarfile.TarInfo) -> Optional[tarfile.TarInfo]: 'Exclude files from tar.\n\n Args:\n tarinfo: Any\n\n Returns:\n tarinfo required for exclude.\n ' filename = tarinfo.name if (('.zenml/' in filename) or ('venv/' in filename)): return None else: return tarinfo
Exclude files from tar. Args: tarinfo: Any Returns: tarinfo required for exclude.
src/zenml/io/utils.py
exclude_function
Ankur3107/zenml
1
python
def exclude_function(tarinfo: tarfile.TarInfo) -> Optional[tarfile.TarInfo]: 'Exclude files from tar.\n\n Args:\n tarinfo: Any\n\n Returns:\n tarinfo required for exclude.\n ' filename = tarinfo.name if (('.zenml/' in filename) or ('venv/' in filename)): return None else: return tarinfo
def exclude_function(tarinfo: tarfile.TarInfo) -> Optional[tarfile.TarInfo]: 'Exclude files from tar.\n\n Args:\n tarinfo: Any\n\n Returns:\n tarinfo required for exclude.\n ' filename = tarinfo.name if (('.zenml/' in filename) or ('venv/' in filename)): return None else: return tarinfo<|docstring|>Exclude files from tar. Args: tarinfo: Any Returns: tarinfo required for exclude.<|endoftext|>
f190740da349a929d18e453ad059a358cd78583d02a22e2e43c2376a23ba68ff
def preprocess_sentence(sentence): '\n sentence = re.sub(r\'\\*+\', \'\', sentence)\n sentence = re.sub(\n u"[’!"#$%&\'()*+,-./:;<=>?@,。?★、…【】《》?“”‘’![\\]^_`{|}~]+", "", sentence\n )\n ' sentence = Converter('zh-hans').convert(sentence) sentence = change_sentence(sentence) return sentence
sentence = re.sub(r'\*+', '', sentence) sentence = re.sub( u"[’!"#$%&'()*+,-./:;<=>?@,。?★、…【】《》?“”‘’![\]^_`{|}~]+", "", sentence )
submit/vec_feat_xgb_test/test.py
preprocess_sentence
ubuntu733/SentencePairs
0
python
def preprocess_sentence(sentence): '\n sentence = re.sub(r\'\\*+\', \'\', sentence)\n sentence = re.sub(\n u"[’!"#$%&\'()*+,-./:;<=>?@,。?★、…【】《》?“”‘’![\\]^_`{|}~]+", , sentence\n )\n ' sentence = Converter('zh-hans').convert(sentence) sentence = change_sentence(sentence) return sentence
def preprocess_sentence(sentence): '\n sentence = re.sub(r\'\\*+\', \'\', sentence)\n sentence = re.sub(\n u"[’!"#$%&\'()*+,-./:;<=>?@,。?★、…【】《》?“”‘’![\\]^_`{|}~]+", , sentence\n )\n ' sentence = Converter('zh-hans').convert(sentence) sentence = change_sentence(sentence) return sentence<|docstring|>sentence = re.sub(r'\*+', '', sentence) sentence = re.sub( u"[’!"#$%&'()*+,-./:;<=>?@,。?★、…【】《》?“”‘’![\]^_`{|}~]+", "", sentence )<|endoftext|>
b21716a33aeae6bcafe8c1438bfcc21b97c2c070d8484d9851d6c2615c6991d4
def ape(y, p): 'Absolute Percentage Error (APE).\n Args:\n y (float): target\n p (float): prediction\n\n Returns:\n e (float): APE\n ' assert (np.abs(y) > EPS) return np.abs((1 - (p / y)))
Absolute Percentage Error (APE). Args: y (float): target p (float): prediction Returns: e (float): APE
causalml/metrics/regression.py
ape
rsoleimani/causalml
2,919
python
def ape(y, p): 'Absolute Percentage Error (APE).\n Args:\n y (float): target\n p (float): prediction\n\n Returns:\n e (float): APE\n ' assert (np.abs(y) > EPS) return np.abs((1 - (p / y)))
def ape(y, p): 'Absolute Percentage Error (APE).\n Args:\n y (float): target\n p (float): prediction\n\n Returns:\n e (float): APE\n ' assert (np.abs(y) > EPS) return np.abs((1 - (p / y)))<|docstring|>Absolute Percentage Error (APE). Args: y (float): target p (float): prediction Returns: e (float): APE<|endoftext|>
f01195ca4f4434ee9310c83fea36239346cfe5c532c799d01276ff6a8350bdd7
def mape(y, p): 'Mean Absolute Percentage Error (MAPE).\n Args:\n y (numpy.array): target\n p (numpy.array): prediction\n\n Returns:\n e (numpy.float64): MAPE\n ' filt = (np.abs(y) > EPS) return np.mean(np.abs((1 - (p[filt] / y[filt]))))
Mean Absolute Percentage Error (MAPE). Args: y (numpy.array): target p (numpy.array): prediction Returns: e (numpy.float64): MAPE
causalml/metrics/regression.py
mape
rsoleimani/causalml
2,919
python
def mape(y, p): 'Mean Absolute Percentage Error (MAPE).\n Args:\n y (numpy.array): target\n p (numpy.array): prediction\n\n Returns:\n e (numpy.float64): MAPE\n ' filt = (np.abs(y) > EPS) return np.mean(np.abs((1 - (p[filt] / y[filt]))))
def mape(y, p): 'Mean Absolute Percentage Error (MAPE).\n Args:\n y (numpy.array): target\n p (numpy.array): prediction\n\n Returns:\n e (numpy.float64): MAPE\n ' filt = (np.abs(y) > EPS) return np.mean(np.abs((1 - (p[filt] / y[filt]))))<|docstring|>Mean Absolute Percentage Error (MAPE). Args: y (numpy.array): target p (numpy.array): prediction Returns: e (numpy.float64): MAPE<|endoftext|>
b6cbc3d50460fdbfc83bc24d9a01fd872acd60ba651560c790061b6da532ce75
def smape(y, p): 'Symmetric Mean Absolute Percentage Error (sMAPE).\n Args:\n y (numpy.array): target\n p (numpy.array): prediction\n\n Returns:\n e (numpy.float64): sMAPE\n ' return (2.0 * np.mean((np.abs((y - p)) / (np.abs(y) + np.abs(p)))))
Symmetric Mean Absolute Percentage Error (sMAPE). Args: y (numpy.array): target p (numpy.array): prediction Returns: e (numpy.float64): sMAPE
causalml/metrics/regression.py
smape
rsoleimani/causalml
2,919
python
def smape(y, p): 'Symmetric Mean Absolute Percentage Error (sMAPE).\n Args:\n y (numpy.array): target\n p (numpy.array): prediction\n\n Returns:\n e (numpy.float64): sMAPE\n ' return (2.0 * np.mean((np.abs((y - p)) / (np.abs(y) + np.abs(p)))))
def smape(y, p): 'Symmetric Mean Absolute Percentage Error (sMAPE).\n Args:\n y (numpy.array): target\n p (numpy.array): prediction\n\n Returns:\n e (numpy.float64): sMAPE\n ' return (2.0 * np.mean((np.abs((y - p)) / (np.abs(y) + np.abs(p)))))<|docstring|>Symmetric Mean Absolute Percentage Error (sMAPE). Args: y (numpy.array): target p (numpy.array): prediction Returns: e (numpy.float64): sMAPE<|endoftext|>
1049ef896a923930c98c837901363ef96953baaf438c6e162be1f24ad707887a
def rmse(y, p): 'Root Mean Squared Error (RMSE).\n Args:\n y (numpy.array): target\n p (numpy.array): prediction\n\n Returns:\n e (numpy.float64): RMSE\n ' assert (y.shape == p.shape) return np.sqrt(mse(y, p))
Root Mean Squared Error (RMSE). Args: y (numpy.array): target p (numpy.array): prediction Returns: e (numpy.float64): RMSE
causalml/metrics/regression.py
rmse
rsoleimani/causalml
2,919
python
def rmse(y, p): 'Root Mean Squared Error (RMSE).\n Args:\n y (numpy.array): target\n p (numpy.array): prediction\n\n Returns:\n e (numpy.float64): RMSE\n ' assert (y.shape == p.shape) return np.sqrt(mse(y, p))
def rmse(y, p): 'Root Mean Squared Error (RMSE).\n Args:\n y (numpy.array): target\n p (numpy.array): prediction\n\n Returns:\n e (numpy.float64): RMSE\n ' assert (y.shape == p.shape) return np.sqrt(mse(y, p))<|docstring|>Root Mean Squared Error (RMSE). Args: y (numpy.array): target p (numpy.array): prediction Returns: e (numpy.float64): RMSE<|endoftext|>
c2d20e7358dda072b7f389923758688778f889486a18fff8fae5081b1cdc5a49
def gini(y, p): 'Normalized Gini Coefficient.\n\n Args:\n y (numpy.array): target\n p (numpy.array): prediction\n\n Returns:\n e (numpy.float64): normalized Gini coefficient\n ' assert (y.shape == p.shape) n_samples = y.shape[0] arr = np.array([y, p]).transpose() true_order = arr[arr[(:, 0)].argsort()][(::(- 1), 0)] pred_order = arr[arr[(:, 1)].argsort()][(::(- 1), 0)] l_true = (np.cumsum(true_order) / np.sum(true_order)) l_pred = (np.cumsum(pred_order) / np.sum(pred_order)) l_ones = np.linspace((1 / n_samples), 1, n_samples) g_true = np.sum((l_ones - l_true)) g_pred = np.sum((l_ones - l_pred)) return (g_pred / g_true)
Normalized Gini Coefficient. Args: y (numpy.array): target p (numpy.array): prediction Returns: e (numpy.float64): normalized Gini coefficient
causalml/metrics/regression.py
gini
rsoleimani/causalml
2,919
python
def gini(y, p): 'Normalized Gini Coefficient.\n\n Args:\n y (numpy.array): target\n p (numpy.array): prediction\n\n Returns:\n e (numpy.float64): normalized Gini coefficient\n ' assert (y.shape == p.shape) n_samples = y.shape[0] arr = np.array([y, p]).transpose() true_order = arr[arr[(:, 0)].argsort()][(::(- 1), 0)] pred_order = arr[arr[(:, 1)].argsort()][(::(- 1), 0)] l_true = (np.cumsum(true_order) / np.sum(true_order)) l_pred = (np.cumsum(pred_order) / np.sum(pred_order)) l_ones = np.linspace((1 / n_samples), 1, n_samples) g_true = np.sum((l_ones - l_true)) g_pred = np.sum((l_ones - l_pred)) return (g_pred / g_true)
def gini(y, p): 'Normalized Gini Coefficient.\n\n Args:\n y (numpy.array): target\n p (numpy.array): prediction\n\n Returns:\n e (numpy.float64): normalized Gini coefficient\n ' assert (y.shape == p.shape) n_samples = y.shape[0] arr = np.array([y, p]).transpose() true_order = arr[arr[(:, 0)].argsort()][(::(- 1), 0)] pred_order = arr[arr[(:, 1)].argsort()][(::(- 1), 0)] l_true = (np.cumsum(true_order) / np.sum(true_order)) l_pred = (np.cumsum(pred_order) / np.sum(pred_order)) l_ones = np.linspace((1 / n_samples), 1, n_samples) g_true = np.sum((l_ones - l_true)) g_pred = np.sum((l_ones - l_pred)) return (g_pred / g_true)<|docstring|>Normalized Gini Coefficient. Args: y (numpy.array): target p (numpy.array): prediction Returns: e (numpy.float64): normalized Gini coefficient<|endoftext|>
bac4f47a93ff93e943398745ab80b81d7310b8974fc410508be9115ec8f63cd8
def regression_metrics(y, p, w=None, metrics={'RMSE': rmse, 'sMAPE': smape, 'Gini': gini}): 'Log metrics for regressors.\n\n Args:\n y (numpy.array): target\n p (numpy.array): prediction\n w (numpy.array, optional): a treatment vector (1 or True: treatment, 0 or False: control). If given, log\n metrics for the treatment and control group separately\n metrics (dict, optional): a dictionary of the metric names and functions\n ' assert metrics assert (y.shape[0] == p.shape[0]) for (name, func) in metrics.items(): if (w is not None): assert (y.shape[0] == w.shape[0]) if (w.dtype != bool): w = (w == 1) logger.info('{:>8s} (Control): {:10.4f}'.format(name, func(y[(~ w)], p[(~ w)]))) logger.info('{:>8s} (Treatment): {:10.4f}'.format(name, func(y[w], p[w]))) else: logger.info('{:>8s}: {:10.4f}'.format(name, func(y, p)))
Log metrics for regressors. Args: y (numpy.array): target p (numpy.array): prediction w (numpy.array, optional): a treatment vector (1 or True: treatment, 0 or False: control). If given, log metrics for the treatment and control group separately metrics (dict, optional): a dictionary of the metric names and functions
causalml/metrics/regression.py
regression_metrics
rsoleimani/causalml
2,919
python
def regression_metrics(y, p, w=None, metrics={'RMSE': rmse, 'sMAPE': smape, 'Gini': gini}): 'Log metrics for regressors.\n\n Args:\n y (numpy.array): target\n p (numpy.array): prediction\n w (numpy.array, optional): a treatment vector (1 or True: treatment, 0 or False: control). If given, log\n metrics for the treatment and control group separately\n metrics (dict, optional): a dictionary of the metric names and functions\n ' assert metrics assert (y.shape[0] == p.shape[0]) for (name, func) in metrics.items(): if (w is not None): assert (y.shape[0] == w.shape[0]) if (w.dtype != bool): w = (w == 1) logger.info('{:>8s} (Control): {:10.4f}'.format(name, func(y[(~ w)], p[(~ w)]))) logger.info('{:>8s} (Treatment): {:10.4f}'.format(name, func(y[w], p[w]))) else: logger.info('{:>8s}: {:10.4f}'.format(name, func(y, p)))
def regression_metrics(y, p, w=None, metrics={'RMSE': rmse, 'sMAPE': smape, 'Gini': gini}): 'Log metrics for regressors.\n\n Args:\n y (numpy.array): target\n p (numpy.array): prediction\n w (numpy.array, optional): a treatment vector (1 or True: treatment, 0 or False: control). If given, log\n metrics for the treatment and control group separately\n metrics (dict, optional): a dictionary of the metric names and functions\n ' assert metrics assert (y.shape[0] == p.shape[0]) for (name, func) in metrics.items(): if (w is not None): assert (y.shape[0] == w.shape[0]) if (w.dtype != bool): w = (w == 1) logger.info('{:>8s} (Control): {:10.4f}'.format(name, func(y[(~ w)], p[(~ w)]))) logger.info('{:>8s} (Treatment): {:10.4f}'.format(name, func(y[w], p[w]))) else: logger.info('{:>8s}: {:10.4f}'.format(name, func(y, p)))<|docstring|>Log metrics for regressors. Args: y (numpy.array): target p (numpy.array): prediction w (numpy.array, optional): a treatment vector (1 or True: treatment, 0 or False: control). If given, log metrics for the treatment and control group separately metrics (dict, optional): a dictionary of the metric names and functions<|endoftext|>
ff5afd1b20f5384889812f6003a95a2b7995a4fde7dbac37769a0f2a999b7eb0
def is_dirty(self) -> bool: 'Indicates if the local template root is dirty.\n\n Only applicable for VCS-tracked templates.\n ' if (self.vcs == 'git'): with local.cwd(self.local_abspath): return bool(git('status', '--porcelain').strip()) return False
Indicates if the local template root is dirty. Only applicable for VCS-tracked templates.
copier/subproject.py
is_dirty
jacobstr/copier
438
python
def is_dirty(self) -> bool: 'Indicates if the local template root is dirty.\n\n Only applicable for VCS-tracked templates.\n ' if (self.vcs == 'git'): with local.cwd(self.local_abspath): return bool(git('status', '--porcelain').strip()) return False
def is_dirty(self) -> bool: 'Indicates if the local template root is dirty.\n\n Only applicable for VCS-tracked templates.\n ' if (self.vcs == 'git'): with local.cwd(self.local_abspath): return bool(git('status', '--porcelain').strip()) return False<|docstring|>Indicates if the local template root is dirty. Only applicable for VCS-tracked templates.<|endoftext|>
2da28a46b70851552eb4609656212e93a3984a6e10ca7d6b35bc44fc6add1609
@property def _raw_answers(self) -> AnyByStrDict: 'The last answers, loaded raw as yaml.' try: return yaml.safe_load((self.local_abspath / self.answers_relpath).read_text()) except OSError: return {}
The last answers, loaded raw as yaml.
copier/subproject.py
_raw_answers
jacobstr/copier
438
python
@property def _raw_answers(self) -> AnyByStrDict: try: return yaml.safe_load((self.local_abspath / self.answers_relpath).read_text()) except OSError: return {}
@property def _raw_answers(self) -> AnyByStrDict: try: return yaml.safe_load((self.local_abspath / self.answers_relpath).read_text()) except OSError: return {}<|docstring|>The last answers, loaded raw as yaml.<|endoftext|>
e3bcd15241d4d513e6bcf71a1394f8be90af46557c7875ec8bcdad9642f59eea
@cached_property def last_answers(self) -> AnyByStrDict: 'Last answers, excluding private ones (except _src_path and _commit).' return {key: value for (key, value) in self._raw_answers.items() if ((key in {'_src_path', '_commit'}) or (not key.startswith('_')))}
Last answers, excluding private ones (except _src_path and _commit).
copier/subproject.py
last_answers
jacobstr/copier
438
python
@cached_property def last_answers(self) -> AnyByStrDict: return {key: value for (key, value) in self._raw_answers.items() if ((key in {'_src_path', '_commit'}) or (not key.startswith('_')))}
@cached_property def last_answers(self) -> AnyByStrDict: return {key: value for (key, value) in self._raw_answers.items() if ((key in {'_src_path', '_commit'}) or (not key.startswith('_')))}<|docstring|>Last answers, excluding private ones (except _src_path and _commit).<|endoftext|>
675c87fb9b50d1c747d847093b3d6903ea2fabdd3bb93242bf6fb6c354111390
@cached_property def template(self) -> Optional[Template]: 'Template, as it was used the last time.' last_url = self.last_answers.get('_src_path') last_ref = self.last_answers.get('_commit') if last_url: return Template(url=last_url, ref=last_ref)
Template, as it was used the last time.
copier/subproject.py
template
jacobstr/copier
438
python
@cached_property def template(self) -> Optional[Template]: last_url = self.last_answers.get('_src_path') last_ref = self.last_answers.get('_commit') if last_url: return Template(url=last_url, ref=last_ref)
@cached_property def template(self) -> Optional[Template]: last_url = self.last_answers.get('_src_path') last_ref = self.last_answers.get('_commit') if last_url: return Template(url=last_url, ref=last_ref)<|docstring|>Template, as it was used the last time.<|endoftext|>
6231a5ff8d7d44abb8621bd5a614039072ace3ae9cff0b7375dfb0c9a01f234a
@cached_property def vcs(self) -> Optional[VCSTypes]: 'VCS type of the subproject.' if is_in_git_repo(self.local_abspath): return 'git'
VCS type of the subproject.
copier/subproject.py
vcs
jacobstr/copier
438
python
@cached_property def vcs(self) -> Optional[VCSTypes]: if is_in_git_repo(self.local_abspath): return 'git'
@cached_property def vcs(self) -> Optional[VCSTypes]: if is_in_git_repo(self.local_abspath): return 'git'<|docstring|>VCS type of the subproject.<|endoftext|>
94b3e872a96ff5b9c29a57ac304496ad66e2734a198ee4d8af878609fb830460
def _get_client(handler): '\n Get the clients using newer methods from the CloudBolt main repo if this CB is running\n a version greater than 9.2. These internal methods implicitly take care of much of the other\n features in CloudBolt such as proxy and ssl verification.\n Otherwise, manually instantiate clients without support for those other CloudBolt settings.\n :param handler:\n :return:\n ' import settings from common.methods import is_version_newer set_progress('Connecting To Azure Management Service...') cb_version = settings.VERSION_INFO['VERSION'] if is_version_newer(cb_version, '9.2'): from resourcehandlers.azure_arm.azure_wrapper import configure_arm_client wrapper = handler.get_api_wrapper() web_client = configure_arm_client(wrapper, WebSiteManagementClient) else: credentials = ServicePrincipalCredentials(client_id=handler.client_id, secret=handler.secret, tenant=handler.tenant_id) web_client = WebSiteManagementClient(credentials, handler.serviceaccount) set_progress('Connection to Azure established') return web_client
Get the clients using newer methods from the CloudBolt main repo if this CB is running a version greater than 9.2. These internal methods implicitly take care of much of the other features in CloudBolt such as proxy and ssl verification. Otherwise, manually instantiate clients without support for those other CloudBolt settings. :param handler: :return:
blueprints/azure_web_app/create_azure_website.py
_get_client
gamethis/cloudbolt-forge
0
python
def _get_client(handler): '\n Get the clients using newer methods from the CloudBolt main repo if this CB is running\n a version greater than 9.2. These internal methods implicitly take care of much of the other\n features in CloudBolt such as proxy and ssl verification.\n Otherwise, manually instantiate clients without support for those other CloudBolt settings.\n :param handler:\n :return:\n ' import settings from common.methods import is_version_newer set_progress('Connecting To Azure Management Service...') cb_version = settings.VERSION_INFO['VERSION'] if is_version_newer(cb_version, '9.2'): from resourcehandlers.azure_arm.azure_wrapper import configure_arm_client wrapper = handler.get_api_wrapper() web_client = configure_arm_client(wrapper, WebSiteManagementClient) else: credentials = ServicePrincipalCredentials(client_id=handler.client_id, secret=handler.secret, tenant=handler.tenant_id) web_client = WebSiteManagementClient(credentials, handler.serviceaccount) set_progress('Connection to Azure established') return web_client
def _get_client(handler): '\n Get the clients using newer methods from the CloudBolt main repo if this CB is running\n a version greater than 9.2. These internal methods implicitly take care of much of the other\n features in CloudBolt such as proxy and ssl verification.\n Otherwise, manually instantiate clients without support for those other CloudBolt settings.\n :param handler:\n :return:\n ' import settings from common.methods import is_version_newer set_progress('Connecting To Azure Management Service...') cb_version = settings.VERSION_INFO['VERSION'] if is_version_newer(cb_version, '9.2'): from resourcehandlers.azure_arm.azure_wrapper import configure_arm_client wrapper = handler.get_api_wrapper() web_client = configure_arm_client(wrapper, WebSiteManagementClient) else: credentials = ServicePrincipalCredentials(client_id=handler.client_id, secret=handler.secret, tenant=handler.tenant_id) web_client = WebSiteManagementClient(credentials, handler.serviceaccount) set_progress('Connection to Azure established') return web_client<|docstring|>Get the clients using newer methods from the CloudBolt main repo if this CB is running a version greater than 9.2. These internal methods implicitly take care of much of the other features in CloudBolt such as proxy and ssl verification. Otherwise, manually instantiate clients without support for those other CloudBolt settings. :param handler: :return:<|endoftext|>
53bdbfcb9fdfaa491febc83e7689cb811020d978689c22438a314bc832ff395a
def get_generator(data_path): 'Create lists of validation and test generators' crop_224_valid_dir = os.path.join(data_path, '224', 'crop', 'Validation') crop_224_test_dir = os.path.join(data_path, '224', 'crop', 'Test') uncrop_224_valid_dir = os.path.join(data_path, '224', 'uncrop', 'Validation') uncrop_224_test_dir = os.path.join(data_path, '224', 'uncrop', 'Test') crop_331_valid_dir = os.path.join(data_path, '331', 'crop', 'Validation') crop_331_test_dir = os.path.join(data_path, '331', 'crop', 'Test') uncrop_331_valid_dir = os.path.join(data_path, '331', 'uncrop', 'Validation') uncrop_331_test_dir = os.path.join(data_path, '331', 'uncrop', 'Test') if (not (os.path.exists(crop_224_valid_dir) and os.path.exists(crop_331_valid_dir) and os.path.exists(uncrop_224_valid_dir) and os.path.exists(uncrop_331_valid_dir) and os.path.exists(crop_224_test_dir) and os.path.exists(uncrop_224_test_dir) and os.path.exists(crop_331_test_dir) and os.path.exists(uncrop_331_test_dir))): print('Data path is invalid. Please check that directory tree is set up as described in README file.') exit() valid_gen = get_datagenerators_folders(uncrop_224_valid_dir, crop_224_valid_dir, uncrop_331_valid_dir, crop_331_valid_dir, batch_size=16) test_gen = get_datagenerators_folders(uncrop_224_test_dir, crop_224_test_dir, uncrop_331_test_dir, crop_331_test_dir, batch_size=16) combined_gen = (valid_gen[0:4] + test_gen[0:4]) return combined_gen
Create lists of validation and test generators
ensemble.py
get_generator
jsheng7/DeepCovidXR
0
python
def get_generator(data_path): crop_224_valid_dir = os.path.join(data_path, '224', 'crop', 'Validation') crop_224_test_dir = os.path.join(data_path, '224', 'crop', 'Test') uncrop_224_valid_dir = os.path.join(data_path, '224', 'uncrop', 'Validation') uncrop_224_test_dir = os.path.join(data_path, '224', 'uncrop', 'Test') crop_331_valid_dir = os.path.join(data_path, '331', 'crop', 'Validation') crop_331_test_dir = os.path.join(data_path, '331', 'crop', 'Test') uncrop_331_valid_dir = os.path.join(data_path, '331', 'uncrop', 'Validation') uncrop_331_test_dir = os.path.join(data_path, '331', 'uncrop', 'Test') if (not (os.path.exists(crop_224_valid_dir) and os.path.exists(crop_331_valid_dir) and os.path.exists(uncrop_224_valid_dir) and os.path.exists(uncrop_331_valid_dir) and os.path.exists(crop_224_test_dir) and os.path.exists(uncrop_224_test_dir) and os.path.exists(crop_331_test_dir) and os.path.exists(uncrop_331_test_dir))): print('Data path is invalid. Please check that directory tree is set up as described in README file.') exit() valid_gen = get_datagenerators_folders(uncrop_224_valid_dir, crop_224_valid_dir, uncrop_331_valid_dir, crop_331_valid_dir, batch_size=16) test_gen = get_datagenerators_folders(uncrop_224_test_dir, crop_224_test_dir, uncrop_331_test_dir, crop_331_test_dir, batch_size=16) combined_gen = (valid_gen[0:4] + test_gen[0:4]) return combined_gen
def get_generator(data_path): crop_224_valid_dir = os.path.join(data_path, '224', 'crop', 'Validation') crop_224_test_dir = os.path.join(data_path, '224', 'crop', 'Test') uncrop_224_valid_dir = os.path.join(data_path, '224', 'uncrop', 'Validation') uncrop_224_test_dir = os.path.join(data_path, '224', 'uncrop', 'Test') crop_331_valid_dir = os.path.join(data_path, '331', 'crop', 'Validation') crop_331_test_dir = os.path.join(data_path, '331', 'crop', 'Test') uncrop_331_valid_dir = os.path.join(data_path, '331', 'uncrop', 'Validation') uncrop_331_test_dir = os.path.join(data_path, '331', 'uncrop', 'Test') if (not (os.path.exists(crop_224_valid_dir) and os.path.exists(crop_331_valid_dir) and os.path.exists(uncrop_224_valid_dir) and os.path.exists(uncrop_331_valid_dir) and os.path.exists(crop_224_test_dir) and os.path.exists(uncrop_224_test_dir) and os.path.exists(crop_331_test_dir) and os.path.exists(uncrop_331_test_dir))): print('Data path is invalid. Please check that directory tree is set up as described in README file.') exit() valid_gen = get_datagenerators_folders(uncrop_224_valid_dir, crop_224_valid_dir, uncrop_331_valid_dir, crop_331_valid_dir, batch_size=16) test_gen = get_datagenerators_folders(uncrop_224_test_dir, crop_224_test_dir, uncrop_331_test_dir, crop_331_test_dir, batch_size=16) combined_gen = (valid_gen[0:4] + test_gen[0:4]) return combined_gen<|docstring|>Create lists of validation and test generators<|endoftext|>
742a65ebf3a8cf98e8397d2ec8504171c375d1f2e5be0fe7efadc214dd483f5b
def create_member(model_name, model, generator_list): 'Create a member of model ensemble' name_parts = model_name.split('_') if (('224' in name_parts) and ('uncrop' in name_parts)): member = KerasMember(name=model_name, keras_model=model, train_batches=generator_list[0], val_batches=generator_list[4]) elif (('224' in name_parts) and ('crop' in name_parts)): member = KerasMember(name=model_name, keras_model=model, train_batches=generator_list[1], val_batches=generator_list[5]) elif (('331' in name_parts) and ('uncrop' in name_parts)): member = KerasMember(name=model_name, keras_model=model, train_batches=generator_list[2], val_batches=generator_list[6]) elif (('331' in name_parts) and ('crop' in name_parts)): member = KerasMember(name=model_name, keras_model=model, train_batches=generator_list[3], val_batches=generator_list[7]) return member
Create a member of model ensemble
ensemble.py
create_member
jsheng7/DeepCovidXR
0
python
def create_member(model_name, model, generator_list): name_parts = model_name.split('_') if (('224' in name_parts) and ('uncrop' in name_parts)): member = KerasMember(name=model_name, keras_model=model, train_batches=generator_list[0], val_batches=generator_list[4]) elif (('224' in name_parts) and ('crop' in name_parts)): member = KerasMember(name=model_name, keras_model=model, train_batches=generator_list[1], val_batches=generator_list[5]) elif (('331' in name_parts) and ('uncrop' in name_parts)): member = KerasMember(name=model_name, keras_model=model, train_batches=generator_list[2], val_batches=generator_list[6]) elif (('331' in name_parts) and ('crop' in name_parts)): member = KerasMember(name=model_name, keras_model=model, train_batches=generator_list[3], val_batches=generator_list[7]) return member
def create_member(model_name, model, generator_list): name_parts = model_name.split('_') if (('224' in name_parts) and ('uncrop' in name_parts)): member = KerasMember(name=model_name, keras_model=model, train_batches=generator_list[0], val_batches=generator_list[4]) elif (('224' in name_parts) and ('crop' in name_parts)): member = KerasMember(name=model_name, keras_model=model, train_batches=generator_list[1], val_batches=generator_list[5]) elif (('331' in name_parts) and ('uncrop' in name_parts)): member = KerasMember(name=model_name, keras_model=model, train_batches=generator_list[2], val_batches=generator_list[6]) elif (('331' in name_parts) and ('crop' in name_parts)): member = KerasMember(name=model_name, keras_model=model, train_batches=generator_list[3], val_batches=generator_list[7]) return member<|docstring|>Create a member of model ensemble<|endoftext|>
fe08268a20ed01bfcb71f34787423ebcb9a26ab3b325cdbf886efdd559706cd1
def get_members(combined_generator_list, weight_path): 'Creates the list of members for ensembling from a list of data generators and corresponding model weights' model_list = get_model_list(weight_path) model_name_list = ['dense_224_uncrop', 'dense_224_crop', 'dense_331_uncrop', 'dense_331_crop', 'res_224_uncrop', 'res_224_crop', 'res_331_uncrop', 'res_331_crop', 'inception_224_uncrop', 'inception_224_crop', 'inception_331_uncrop', 'inception_331_crop', 'inceptionresnet_224_uncrop', 'inceptionresnet_224_crop', 'inceptionresnet_331_uncrop', 'inceptionresnet_331_crop', 'xception_224_uncrop', 'xception_224_crop', 'xception_331_uncrop', 'xception_331_crop', 'efficient_224_uncrop', 'efficient_224_crop', 'efficient_331_uncrop', 'efficient_331_crop'] member_list = [] for (model_name, model) in zip(model_name_list, model_list): member = create_member(model_name, model, combined_generator_list) member_list.append(member) return member_list
Creates the list of members for ensembling from a list of data generators and corresponding model weights
ensemble.py
get_members
jsheng7/DeepCovidXR
0
python
def get_members(combined_generator_list, weight_path): model_list = get_model_list(weight_path) model_name_list = ['dense_224_uncrop', 'dense_224_crop', 'dense_331_uncrop', 'dense_331_crop', 'res_224_uncrop', 'res_224_crop', 'res_331_uncrop', 'res_331_crop', 'inception_224_uncrop', 'inception_224_crop', 'inception_331_uncrop', 'inception_331_crop', 'inceptionresnet_224_uncrop', 'inceptionresnet_224_crop', 'inceptionresnet_331_uncrop', 'inceptionresnet_331_crop', 'xception_224_uncrop', 'xception_224_crop', 'xception_331_uncrop', 'xception_331_crop', 'efficient_224_uncrop', 'efficient_224_crop', 'efficient_331_uncrop', 'efficient_331_crop'] member_list = [] for (model_name, model) in zip(model_name_list, model_list): member = create_member(model_name, model, combined_generator_list) member_list.append(member) return member_list
def get_members(combined_generator_list, weight_path): model_list = get_model_list(weight_path) model_name_list = ['dense_224_uncrop', 'dense_224_crop', 'dense_331_uncrop', 'dense_331_crop', 'res_224_uncrop', 'res_224_crop', 'res_331_uncrop', 'res_331_crop', 'inception_224_uncrop', 'inception_224_crop', 'inception_331_uncrop', 'inception_331_crop', 'inceptionresnet_224_uncrop', 'inceptionresnet_224_crop', 'inceptionresnet_331_uncrop', 'inceptionresnet_331_crop', 'xception_224_uncrop', 'xception_224_crop', 'xception_331_uncrop', 'xception_331_crop', 'efficient_224_uncrop', 'efficient_224_crop', 'efficient_331_uncrop', 'efficient_331_crop'] member_list = [] for (model_name, model) in zip(model_name_list, model_list): member = create_member(model_name, model, combined_generator_list) member_list.append(member) return member_list<|docstring|>Creates the list of members for ensembling from a list of data generators and corresponding model weights<|endoftext|>
c967b83c5b6feaa6a051b40c5d2c858a3f8c52c4ce2c6761cb0583f8ffdca8ae
def ensemble_members(member_list): 'Calculates weights for each model of an ensemble for weighted averaging of predictions using random\n search of a Dirichlet distribution' wAvgEnsemble = DirichletEnsemble() wAvgEnsemble.add_members(member_list) wAvgEnsemble.fit() wAvgEnsemble.describe() combined_weighted_probs = [] combined_probs = [] for (member, weight) in zip(member_list, wAvgEnsemble.bestweights): weighted_probs = np.multiply(member.val_probs, weight) combined_weighted_probs.append(weighted_probs) combined_probs.append(member.val_probs) combined_weighted_probs = np.asarray(combined_weighted_probs) individual_preds = pd.DataFrame(np.squeeze(np.stack(combined_probs, axis=(- 1))), columns=[member.name for member in member_list]) ensemble_pred = np.sum(combined_weighted_probs, axis=0) ensemble_pred_round = np.round(ensemble_pred) return (wAvgEnsemble.bestweights, ensemble_pred, ensemble_pred_round, individual_preds)
Calculates weights for each model of an ensemble for weighted averaging of predictions using random search of a Dirichlet distribution
ensemble.py
ensemble_members
jsheng7/DeepCovidXR
0
python
def ensemble_members(member_list): 'Calculates weights for each model of an ensemble for weighted averaging of predictions using random\n search of a Dirichlet distribution' wAvgEnsemble = DirichletEnsemble() wAvgEnsemble.add_members(member_list) wAvgEnsemble.fit() wAvgEnsemble.describe() combined_weighted_probs = [] combined_probs = [] for (member, weight) in zip(member_list, wAvgEnsemble.bestweights): weighted_probs = np.multiply(member.val_probs, weight) combined_weighted_probs.append(weighted_probs) combined_probs.append(member.val_probs) combined_weighted_probs = np.asarray(combined_weighted_probs) individual_preds = pd.DataFrame(np.squeeze(np.stack(combined_probs, axis=(- 1))), columns=[member.name for member in member_list]) ensemble_pred = np.sum(combined_weighted_probs, axis=0) ensemble_pred_round = np.round(ensemble_pred) return (wAvgEnsemble.bestweights, ensemble_pred, ensemble_pred_round, individual_preds)
def ensemble_members(member_list): 'Calculates weights for each model of an ensemble for weighted averaging of predictions using random\n search of a Dirichlet distribution' wAvgEnsemble = DirichletEnsemble() wAvgEnsemble.add_members(member_list) wAvgEnsemble.fit() wAvgEnsemble.describe() combined_weighted_probs = [] combined_probs = [] for (member, weight) in zip(member_list, wAvgEnsemble.bestweights): weighted_probs = np.multiply(member.val_probs, weight) combined_weighted_probs.append(weighted_probs) combined_probs.append(member.val_probs) combined_weighted_probs = np.asarray(combined_weighted_probs) individual_preds = pd.DataFrame(np.squeeze(np.stack(combined_probs, axis=(- 1))), columns=[member.name for member in member_list]) ensemble_pred = np.sum(combined_weighted_probs, axis=0) ensemble_pred_round = np.round(ensemble_pred) return (wAvgEnsemble.bestweights, ensemble_pred, ensemble_pred_round, individual_preds)<|docstring|>Calculates weights for each model of an ensemble for weighted averaging of predictions using random search of a Dirichlet distribution<|endoftext|>
e8b01d70992376b36156a78a9fcf8aba3ccbc48767d3d306385d82b8ee69ecb1
def _cnfify(exprs): 'Convert a sequence of expressions to their CNF form.' return [(('(' + str(to_cnf(expr))) + ')') for expr in exprs]
Convert a sequence of expressions to their CNF form.
donatello/factor_32.py
_cnfify
welchbj/donatello
2
python
def _cnfify(exprs): return [(('(' + str(to_cnf(expr))) + ')') for expr in exprs]
def _cnfify(exprs): return [(('(' + str(to_cnf(expr))) + ')') for expr in exprs]<|docstring|>Convert a sequence of expressions to their CNF form.<|endoftext|>
bb55881e0d7cfe68adcc1e4cae7cf1c6131f35120b39c4b5b9deac8afd2e25be
@lru_cache(maxsize=None) def _factor_bitwise(target, num_bits, bad_chars, ops, start_value): 'The engine behind everything novel in this project.\n\n Args:\n target (int): TODO\n bad_chars (Tuple[int]): TODO\n num_bits (int): TODO\n ops (List[str]): TODO\n num_factors (int): TODO\n start_value (int): TODO\n\n Returns:\n List[int]: TODO\n\n ' num_factors = len(ops) factor_clauses = [] for i in range(num_bits): bit_vars = iter(('b{}_f{}'.format(i, j) for j in range(num_factors))) clause = str(int(bool((start_value & (1 << i))))) for (op, bit_var) in zip(ops, bit_vars): clause = '({} {} {})'.format(clause, op, bit_var) if (not (target & (1 << i))): clause = ('~' + clause) factor_clauses.append(clause) char_constraint_clauses = [] for (bad_char, j) in product(bad_chars, range(num_factors)): bit_vars = iter(('b{}_f{}'.format(i, j) for i in range(num_bits))) clause = [(var if (bad_char & (1 << i)) else ('~' + var)) for (i, var) in enumerate(bit_vars)] char_constraint_clauses.append((('~(' + ' and '.join(clause)) + ')')) cnf_clauses = chain(_cnfify(factor_clauses), _cnfify(char_constraint_clauses)) expr = ' and '.join(cnf_clauses) b = BooleanExpression(expr) sat_sol = b.sat_one() if (sat_sol is None): return None factors = [] for j in range(num_factors): factor = 0 for i in range(num_bits): bit = getattr(sat_sol, 'b{}_f{}'.format(i, j)) factor |= (bit << i) factors.append(factor) return factors
The engine behind everything novel in this project. Args: target (int): TODO bad_chars (Tuple[int]): TODO num_bits (int): TODO ops (List[str]): TODO num_factors (int): TODO start_value (int): TODO Returns: List[int]: TODO
donatello/factor_32.py
_factor_bitwise
welchbj/donatello
2
python
@lru_cache(maxsize=None) def _factor_bitwise(target, num_bits, bad_chars, ops, start_value): 'The engine behind everything novel in this project.\n\n Args:\n target (int): TODO\n bad_chars (Tuple[int]): TODO\n num_bits (int): TODO\n ops (List[str]): TODO\n num_factors (int): TODO\n start_value (int): TODO\n\n Returns:\n List[int]: TODO\n\n ' num_factors = len(ops) factor_clauses = [] for i in range(num_bits): bit_vars = iter(('b{}_f{}'.format(i, j) for j in range(num_factors))) clause = str(int(bool((start_value & (1 << i))))) for (op, bit_var) in zip(ops, bit_vars): clause = '({} {} {})'.format(clause, op, bit_var) if (not (target & (1 << i))): clause = ('~' + clause) factor_clauses.append(clause) char_constraint_clauses = [] for (bad_char, j) in product(bad_chars, range(num_factors)): bit_vars = iter(('b{}_f{}'.format(i, j) for i in range(num_bits))) clause = [(var if (bad_char & (1 << i)) else ('~' + var)) for (i, var) in enumerate(bit_vars)] char_constraint_clauses.append((('~(' + ' and '.join(clause)) + ')')) cnf_clauses = chain(_cnfify(factor_clauses), _cnfify(char_constraint_clauses)) expr = ' and '.join(cnf_clauses) b = BooleanExpression(expr) sat_sol = b.sat_one() if (sat_sol is None): return None factors = [] for j in range(num_factors): factor = 0 for i in range(num_bits): bit = getattr(sat_sol, 'b{}_f{}'.format(i, j)) factor |= (bit << i) factors.append(factor) return factors
@lru_cache(maxsize=None) def _factor_bitwise(target, num_bits, bad_chars, ops, start_value): 'The engine behind everything novel in this project.\n\n Args:\n target (int): TODO\n bad_chars (Tuple[int]): TODO\n num_bits (int): TODO\n ops (List[str]): TODO\n num_factors (int): TODO\n start_value (int): TODO\n\n Returns:\n List[int]: TODO\n\n ' num_factors = len(ops) factor_clauses = [] for i in range(num_bits): bit_vars = iter(('b{}_f{}'.format(i, j) for j in range(num_factors))) clause = str(int(bool((start_value & (1 << i))))) for (op, bit_var) in zip(ops, bit_vars): clause = '({} {} {})'.format(clause, op, bit_var) if (not (target & (1 << i))): clause = ('~' + clause) factor_clauses.append(clause) char_constraint_clauses = [] for (bad_char, j) in product(bad_chars, range(num_factors)): bit_vars = iter(('b{}_f{}'.format(i, j) for i in range(num_bits))) clause = [(var if (bad_char & (1 << i)) else ('~' + var)) for (i, var) in enumerate(bit_vars)] char_constraint_clauses.append((('~(' + ' and '.join(clause)) + ')')) cnf_clauses = chain(_cnfify(factor_clauses), _cnfify(char_constraint_clauses)) expr = ' and '.join(cnf_clauses) b = BooleanExpression(expr) sat_sol = b.sat_one() if (sat_sol is None): return None factors = [] for j in range(num_factors): factor = 0 for i in range(num_bits): bit = getattr(sat_sol, 'b{}_f{}'.format(i, j)) factor |= (bit << i) factors.append(factor) return factors<|docstring|>The engine behind everything novel in this project. Args: target (int): TODO bad_chars (Tuple[int]): TODO num_bits (int): TODO ops (List[str]): TODO num_factors (int): TODO start_value (int): TODO Returns: List[int]: TODO<|endoftext|>
0525c28dfee62961e5998e7555ba7c7ab44353b8203d4edff2ce2ef2a070841f
@lru_cache(maxsize=None) def factor_by_byte(target, bad_chars, usable_ops=IMPLEMENTED_OPS, num_factors=2, start_value=0): 'TODO.\n\n Args:\n TODO\n\n Returns:\n List[Factor]: TODO\n\n Raises:\n DonatelloConfigurationError: If `num_factors` is less than 2.\n\n ' if (num_factors < 2): raise DonatelloConfigurationError('`num_factors` must be >= 2') for op_perm in product(usable_ops, repeat=num_factors): if ((start_value == 0) and (op_perm[0] == 'and')): continue msb_factors = _factor_bitwise(((target >> 24) & 255), NUM_BITS_IN_BYTE, bad_chars, op_perm, ((start_value >> 24) & 255)) if (msb_factors is None): continue second_msb_factors = _factor_bitwise(((target >> 16) & 255), NUM_BITS_IN_BYTE, bad_chars, op_perm, ((start_value >> 16) & 255)) if (second_msb_factors is None): continue second_lsb_factors = _factor_bitwise(((target >> 8) & 255), NUM_BITS_IN_BYTE, bad_chars, op_perm, ((start_value >> 8) & 255)) if (second_lsb_factors is None): continue lsb_factors = _factor_bitwise((target & 255), NUM_BITS_IN_BYTE, bad_chars, op_perm, (start_value & 255)) if (lsb_factors is None): continue num_factors = len(msb_factors) factors = [] for i in range(num_factors): operand = 0 operand |= (msb_factors[i] << 24) operand |= (second_msb_factors[i] << 16) operand |= (second_lsb_factors[i] << 8) operand |= lsb_factors[i] factors.append(Factor(op_perm[i], operand)) return factors return None
TODO. Args: TODO Returns: List[Factor]: TODO Raises: DonatelloConfigurationError: If `num_factors` is less than 2.
donatello/factor_32.py
factor_by_byte
welchbj/donatello
2
python
@lru_cache(maxsize=None) def factor_by_byte(target, bad_chars, usable_ops=IMPLEMENTED_OPS, num_factors=2, start_value=0): 'TODO.\n\n Args:\n TODO\n\n Returns:\n List[Factor]: TODO\n\n Raises:\n DonatelloConfigurationError: If `num_factors` is less than 2.\n\n ' if (num_factors < 2): raise DonatelloConfigurationError('`num_factors` must be >= 2') for op_perm in product(usable_ops, repeat=num_factors): if ((start_value == 0) and (op_perm[0] == 'and')): continue msb_factors = _factor_bitwise(((target >> 24) & 255), NUM_BITS_IN_BYTE, bad_chars, op_perm, ((start_value >> 24) & 255)) if (msb_factors is None): continue second_msb_factors = _factor_bitwise(((target >> 16) & 255), NUM_BITS_IN_BYTE, bad_chars, op_perm, ((start_value >> 16) & 255)) if (second_msb_factors is None): continue second_lsb_factors = _factor_bitwise(((target >> 8) & 255), NUM_BITS_IN_BYTE, bad_chars, op_perm, ((start_value >> 8) & 255)) if (second_lsb_factors is None): continue lsb_factors = _factor_bitwise((target & 255), NUM_BITS_IN_BYTE, bad_chars, op_perm, (start_value & 255)) if (lsb_factors is None): continue num_factors = len(msb_factors) factors = [] for i in range(num_factors): operand = 0 operand |= (msb_factors[i] << 24) operand |= (second_msb_factors[i] << 16) operand |= (second_lsb_factors[i] << 8) operand |= lsb_factors[i] factors.append(Factor(op_perm[i], operand)) return factors return None
@lru_cache(maxsize=None) def factor_by_byte(target, bad_chars, usable_ops=IMPLEMENTED_OPS, num_factors=2, start_value=0): 'TODO.\n\n Args:\n TODO\n\n Returns:\n List[Factor]: TODO\n\n Raises:\n DonatelloConfigurationError: If `num_factors` is less than 2.\n\n ' if (num_factors < 2): raise DonatelloConfigurationError('`num_factors` must be >= 2') for op_perm in product(usable_ops, repeat=num_factors): if ((start_value == 0) and (op_perm[0] == 'and')): continue msb_factors = _factor_bitwise(((target >> 24) & 255), NUM_BITS_IN_BYTE, bad_chars, op_perm, ((start_value >> 24) & 255)) if (msb_factors is None): continue second_msb_factors = _factor_bitwise(((target >> 16) & 255), NUM_BITS_IN_BYTE, bad_chars, op_perm, ((start_value >> 16) & 255)) if (second_msb_factors is None): continue second_lsb_factors = _factor_bitwise(((target >> 8) & 255), NUM_BITS_IN_BYTE, bad_chars, op_perm, ((start_value >> 8) & 255)) if (second_lsb_factors is None): continue lsb_factors = _factor_bitwise((target & 255), NUM_BITS_IN_BYTE, bad_chars, op_perm, (start_value & 255)) if (lsb_factors is None): continue num_factors = len(msb_factors) factors = [] for i in range(num_factors): operand = 0 operand |= (msb_factors[i] << 24) operand |= (second_msb_factors[i] << 16) operand |= (second_lsb_factors[i] << 8) operand |= lsb_factors[i] factors.append(Factor(op_perm[i], operand)) return factors return None<|docstring|>TODO. Args: TODO Returns: List[Factor]: TODO Raises: DonatelloConfigurationError: If `num_factors` is less than 2.<|endoftext|>
b809d36775c9e3406d159766228cd2d51d1fca1b59070aaea5483187b925df18
def run_migrations_offline(): "Run migrations in 'offline' mode.\n\n This configures the context with just a URL\n and not an Engine, though an Engine is acceptable\n here as well. By skipping the Engine creation\n we don't even need a DBAPI to be available.\n\n Calls to context.execute() here emit the given string to the\n script output.\n\n " url = config.get_main_option('sqlalchemy.url') context.configure(url=url, target_metadata=target_metadata, literal_binds=True, dialect_opts={'paramstyle': 'named'}) with context.begin_transaction(): context.run_migrations()
Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output.
cve_bot/migrations/env.py
run_migrations_offline
weastur/blog
3
python
def run_migrations_offline(): "Run migrations in 'offline' mode.\n\n This configures the context with just a URL\n and not an Engine, though an Engine is acceptable\n here as well. By skipping the Engine creation\n we don't even need a DBAPI to be available.\n\n Calls to context.execute() here emit the given string to the\n script output.\n\n " url = config.get_main_option('sqlalchemy.url') context.configure(url=url, target_metadata=target_metadata, literal_binds=True, dialect_opts={'paramstyle': 'named'}) with context.begin_transaction(): context.run_migrations()
def run_migrations_offline(): "Run migrations in 'offline' mode.\n\n This configures the context with just a URL\n and not an Engine, though an Engine is acceptable\n here as well. By skipping the Engine creation\n we don't even need a DBAPI to be available.\n\n Calls to context.execute() here emit the given string to the\n script output.\n\n " url = config.get_main_option('sqlalchemy.url') context.configure(url=url, target_metadata=target_metadata, literal_binds=True, dialect_opts={'paramstyle': 'named'}) with context.begin_transaction(): context.run_migrations()<|docstring|>Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output.<|endoftext|>
da3ec54124d380e26618bef34e2b036d8621b6932a250c49e3f13ebc04892ad8
def run_migrations_online(): "Run migrations in 'online' mode.\n\n In this scenario we need to create an Engine\n and associate a connection with the context.\n\n " connectable = engine_from_config(config.get_section(config.config_ini_section), prefix='sqlalchemy.', poolclass=pool.NullPool) with connectable.connect() as connection: context.configure(connection=connection, target_metadata=target_metadata) with context.begin_transaction(): context.run_migrations()
Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context.
cve_bot/migrations/env.py
run_migrations_online
weastur/blog
3
python
def run_migrations_online(): "Run migrations in 'online' mode.\n\n In this scenario we need to create an Engine\n and associate a connection with the context.\n\n " connectable = engine_from_config(config.get_section(config.config_ini_section), prefix='sqlalchemy.', poolclass=pool.NullPool) with connectable.connect() as connection: context.configure(connection=connection, target_metadata=target_metadata) with context.begin_transaction(): context.run_migrations()
def run_migrations_online(): "Run migrations in 'online' mode.\n\n In this scenario we need to create an Engine\n and associate a connection with the context.\n\n " connectable = engine_from_config(config.get_section(config.config_ini_section), prefix='sqlalchemy.', poolclass=pool.NullPool) with connectable.connect() as connection: context.configure(connection=connection, target_metadata=target_metadata) with context.begin_transaction(): context.run_migrations()<|docstring|>Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context.<|endoftext|>
2623bd5e73af548dd8cc588fd736b419b6489028594acbe9bd3823ca4bb726dc
def convert_to_4(self): '\n Convert a pre-4.0 configuration file to a 4.0 configuration file.\n ' from six.moves.urllib import parse if (not self.config.has_section('backends')): self.config.add_section('backends') site = parse.urlparse(self.get('site', default_value='')) backend_uri = 'zebra://{username}:{password}@{hostname}'.format(username=self.get('username', default_value=''), password=parse.quote(self.get('password', default_value=''), safe=''), hostname=site.hostname) self.config.set('backends', 'default', backend_uri) self.config.remove_option('default', 'username') self.config.remove_option('default', 'password') self.config.remove_option('default', 'site') if (not self.config.has_section('default_aliases')): self.config.add_section('default_aliases') if (not self.config.has_section('default_shared_aliases')): self.config.add_section('default_shared_aliases') if self.config.has_section('wrmap'): for (alias, mapping) in self.config.items('wrmap'): self.config.set('default_aliases', alias, mapping) self.config.remove_section('wrmap') if self.config.has_section('shared_wrmap'): for (alias, mapping) in self.config.items('shared_wrmap'): self.config.set('default_shared_aliases', alias, mapping) self.config.remove_section('shared_wrmap')
Convert a pre-4.0 configuration file to a 4.0 configuration file.
taxi/settings.py
convert_to_4
simonbru/taxi
0
python
def convert_to_4(self): '\n \n ' from six.moves.urllib import parse if (not self.config.has_section('backends')): self.config.add_section('backends') site = parse.urlparse(self.get('site', default_value=)) backend_uri = 'zebra://{username}:{password}@{hostname}'.format(username=self.get('username', default_value=), password=parse.quote(self.get('password', default_value=), safe=), hostname=site.hostname) self.config.set('backends', 'default', backend_uri) self.config.remove_option('default', 'username') self.config.remove_option('default', 'password') self.config.remove_option('default', 'site') if (not self.config.has_section('default_aliases')): self.config.add_section('default_aliases') if (not self.config.has_section('default_shared_aliases')): self.config.add_section('default_shared_aliases') if self.config.has_section('wrmap'): for (alias, mapping) in self.config.items('wrmap'): self.config.set('default_aliases', alias, mapping) self.config.remove_section('wrmap') if self.config.has_section('shared_wrmap'): for (alias, mapping) in self.config.items('shared_wrmap'): self.config.set('default_shared_aliases', alias, mapping) self.config.remove_section('shared_wrmap')
def convert_to_4(self): '\n \n ' from six.moves.urllib import parse if (not self.config.has_section('backends')): self.config.add_section('backends') site = parse.urlparse(self.get('site', default_value=)) backend_uri = 'zebra://{username}:{password}@{hostname}'.format(username=self.get('username', default_value=), password=parse.quote(self.get('password', default_value=), safe=), hostname=site.hostname) self.config.set('backends', 'default', backend_uri) self.config.remove_option('default', 'username') self.config.remove_option('default', 'password') self.config.remove_option('default', 'site') if (not self.config.has_section('default_aliases')): self.config.add_section('default_aliases') if (not self.config.has_section('default_shared_aliases')): self.config.add_section('default_shared_aliases') if self.config.has_section('wrmap'): for (alias, mapping) in self.config.items('wrmap'): self.config.set('default_aliases', alias, mapping) self.config.remove_section('wrmap') if self.config.has_section('shared_wrmap'): for (alias, mapping) in self.config.items('shared_wrmap'): self.config.set('default_shared_aliases', alias, mapping) self.config.remove_section('shared_wrmap')<|docstring|>Convert a pre-4.0 configuration file to a 4.0 configuration file.<|endoftext|>
1191c3b9275e365671ca1f6f18598afa013671b27aa6a96f008ab0ed1a50a8a4
def update_args(args, new_args, action_groups, exclude=('Model arguments',), silent=('',), force=(), list_arguments=('relation_scorers', 'data_variants')): "\n Update Namespace args with entries in new_args excluding action groups in 'exclude'.\n Logs updated entries at level INFO and differing entries that aren't updated at level WARNING\n :param args: Namespace to update\n :param new_args: Namespace with new values\n :param action_groups: _action_groups attribute of original argument parser\n :param exclude: tuple of action group names to exclude from the update\n :param silent: do not warn when we can't update these arguments\n :param force: update these values even if new_args has the default value and allows arguments in excluded action\n groups to be updated.\n :param list_arguments: arguments that are lists (new values will be appended)\n :return: Void\n " for group in action_groups: for action in group._group_actions: dest = action.dest if (dest == 'help'): continue if (dest not in args): logger.warning(f'argument {dest} not found in old args, adding with default value {action.default}') setattr(args, dest, action.default) new_value = getattr(new_args, dest) old_value = getattr(args, dest) if ((new_value == action.default) and (dest not in force) and (getattr(args, dest, None) is not None)): continue if (old_value != new_value): if ((group.title not in exclude) or (dest in force)): if (dest in list_arguments): changed = False for new_scorer in new_value: if (new_scorer not in old_value): old_value.append(new_scorer) changed = True if changed: setattr(args, dest, old_value) logger.info(f'Appending {group.title} argument {dest} to {old_value} with {new_value}') else: setattr(args, dest, new_value) logger.info(f'updating {group.title} argument {dest} from {old_value} to {new_value}') elif (dest not in silent): logger.warning(f"can't update {group.title} argument {dest} from {old_value} to {new_value}! It's built into the model!")
Update Namespace args with entries in new_args excluding action groups in 'exclude'. Logs updated entries at level INFO and differing entries that aren't updated at level WARNING :param args: Namespace to update :param new_args: Namespace with new values :param action_groups: _action_groups attribute of original argument parser :param exclude: tuple of action group names to exclude from the update :param silent: do not warn when we can't update these arguments :param force: update these values even if new_args has the default value and allows arguments in excluded action groups to be updated. :param list_arguments: arguments that are lists (new values will be appended) :return: Void
OpenKI/UtilityFunctions.py
update_args
drevicko/OpenKI
0
python
def update_args(args, new_args, action_groups, exclude=('Model arguments',), silent=(,), force=(), list_arguments=('relation_scorers', 'data_variants')): "\n Update Namespace args with entries in new_args excluding action groups in 'exclude'.\n Logs updated entries at level INFO and differing entries that aren't updated at level WARNING\n :param args: Namespace to update\n :param new_args: Namespace with new values\n :param action_groups: _action_groups attribute of original argument parser\n :param exclude: tuple of action group names to exclude from the update\n :param silent: do not warn when we can't update these arguments\n :param force: update these values even if new_args has the default value and allows arguments in excluded action\n groups to be updated.\n :param list_arguments: arguments that are lists (new values will be appended)\n :return: Void\n " for group in action_groups: for action in group._group_actions: dest = action.dest if (dest == 'help'): continue if (dest not in args): logger.warning(f'argument {dest} not found in old args, adding with default value {action.default}') setattr(args, dest, action.default) new_value = getattr(new_args, dest) old_value = getattr(args, dest) if ((new_value == action.default) and (dest not in force) and (getattr(args, dest, None) is not None)): continue if (old_value != new_value): if ((group.title not in exclude) or (dest in force)): if (dest in list_arguments): changed = False for new_scorer in new_value: if (new_scorer not in old_value): old_value.append(new_scorer) changed = True if changed: setattr(args, dest, old_value) logger.info(f'Appending {group.title} argument {dest} to {old_value} with {new_value}') else: setattr(args, dest, new_value) logger.info(f'updating {group.title} argument {dest} from {old_value} to {new_value}') elif (dest not in silent): logger.warning(f"can't update {group.title} argument {dest} from {old_value} to {new_value}! It's built into the model!")
def update_args(args, new_args, action_groups, exclude=('Model arguments',), silent=(,), force=(), list_arguments=('relation_scorers', 'data_variants')): "\n Update Namespace args with entries in new_args excluding action groups in 'exclude'.\n Logs updated entries at level INFO and differing entries that aren't updated at level WARNING\n :param args: Namespace to update\n :param new_args: Namespace with new values\n :param action_groups: _action_groups attribute of original argument parser\n :param exclude: tuple of action group names to exclude from the update\n :param silent: do not warn when we can't update these arguments\n :param force: update these values even if new_args has the default value and allows arguments in excluded action\n groups to be updated.\n :param list_arguments: arguments that are lists (new values will be appended)\n :return: Void\n " for group in action_groups: for action in group._group_actions: dest = action.dest if (dest == 'help'): continue if (dest not in args): logger.warning(f'argument {dest} not found in old args, adding with default value {action.default}') setattr(args, dest, action.default) new_value = getattr(new_args, dest) old_value = getattr(args, dest) if ((new_value == action.default) and (dest not in force) and (getattr(args, dest, None) is not None)): continue if (old_value != new_value): if ((group.title not in exclude) or (dest in force)): if (dest in list_arguments): changed = False for new_scorer in new_value: if (new_scorer not in old_value): old_value.append(new_scorer) changed = True if changed: setattr(args, dest, old_value) logger.info(f'Appending {group.title} argument {dest} to {old_value} with {new_value}') else: setattr(args, dest, new_value) logger.info(f'updating {group.title} argument {dest} from {old_value} to {new_value}') elif (dest not in silent): logger.warning(f"can't update {group.title} argument {dest} from {old_value} to {new_value}! It's built into the model!")<|docstring|>Update Namespace args with entries in new_args excluding action groups in 'exclude'. Logs updated entries at level INFO and differing entries that aren't updated at level WARNING :param args: Namespace to update :param new_args: Namespace with new values :param action_groups: _action_groups attribute of original argument parser :param exclude: tuple of action group names to exclude from the update :param silent: do not warn when we can't update these arguments :param force: update these values even if new_args has the default value and allows arguments in excluded action groups to be updated. :param list_arguments: arguments that are lists (new values will be appended) :return: Void<|endoftext|>
f740b4bec8769c2d481f07bfcd4507314256ccf2de99e77654cd7f20243cfa00
def refuse_cuda(self, is_cuda=True): "\n Dummy function for monkey patching cuda() on an nn.Parameter (eg: an embedding), forcing it to not respond to\n calls to `cuda()`. You still need to suitably process input tensors such that the module receives cpu inputs\n and process it's outputs such that subsequent processing receives gpu.\n To monkeypatch a parameter `self.weight`, use `self.weight.cuda = refuse_cuda.__get__(self.weight)`.\n A `forward()` method like the following may also be appropriate:\n def forward_cpu(self, input: torch.Tensor) -> torch.Tensor:\n output = super().forward(input.cpu())\n if self.is_cuda_:\n output = output.cuda()\n return output\n :param self:\n :param is_cuda: The value cuda() is supposed do be set to (eg: set cuda() on model outputs to this)\n :return: self\n " self.is_cuda_ = is_cuda return self
Dummy function for monkey patching cuda() on an nn.Parameter (eg: an embedding), forcing it to not respond to calls to `cuda()`. You still need to suitably process input tensors such that the module receives cpu inputs and process it's outputs such that subsequent processing receives gpu. To monkeypatch a parameter `self.weight`, use `self.weight.cuda = refuse_cuda.__get__(self.weight)`. A `forward()` method like the following may also be appropriate: def forward_cpu(self, input: torch.Tensor) -> torch.Tensor: output = super().forward(input.cpu()) if self.is_cuda_: output = output.cuda() return output :param self: :param is_cuda: The value cuda() is supposed do be set to (eg: set cuda() on model outputs to this) :return: self
OpenKI/UtilityFunctions.py
refuse_cuda
drevicko/OpenKI
0
python
def refuse_cuda(self, is_cuda=True): "\n Dummy function for monkey patching cuda() on an nn.Parameter (eg: an embedding), forcing it to not respond to\n calls to `cuda()`. You still need to suitably process input tensors such that the module receives cpu inputs\n and process it's outputs such that subsequent processing receives gpu.\n To monkeypatch a parameter `self.weight`, use `self.weight.cuda = refuse_cuda.__get__(self.weight)`.\n A `forward()` method like the following may also be appropriate:\n def forward_cpu(self, input: torch.Tensor) -> torch.Tensor:\n output = super().forward(input.cpu())\n if self.is_cuda_:\n output = output.cuda()\n return output\n :param self:\n :param is_cuda: The value cuda() is supposed do be set to (eg: set cuda() on model outputs to this)\n :return: self\n " self.is_cuda_ = is_cuda return self
def refuse_cuda(self, is_cuda=True): "\n Dummy function for monkey patching cuda() on an nn.Parameter (eg: an embedding), forcing it to not respond to\n calls to `cuda()`. You still need to suitably process input tensors such that the module receives cpu inputs\n and process it's outputs such that subsequent processing receives gpu.\n To monkeypatch a parameter `self.weight`, use `self.weight.cuda = refuse_cuda.__get__(self.weight)`.\n A `forward()` method like the following may also be appropriate:\n def forward_cpu(self, input: torch.Tensor) -> torch.Tensor:\n output = super().forward(input.cpu())\n if self.is_cuda_:\n output = output.cuda()\n return output\n :param self:\n :param is_cuda: The value cuda() is supposed do be set to (eg: set cuda() on model outputs to this)\n :return: self\n " self.is_cuda_ = is_cuda return self<|docstring|>Dummy function for monkey patching cuda() on an nn.Parameter (eg: an embedding), forcing it to not respond to calls to `cuda()`. You still need to suitably process input tensors such that the module receives cpu inputs and process it's outputs such that subsequent processing receives gpu. To monkeypatch a parameter `self.weight`, use `self.weight.cuda = refuse_cuda.__get__(self.weight)`. A `forward()` method like the following may also be appropriate: def forward_cpu(self, input: torch.Tensor) -> torch.Tensor: output = super().forward(input.cpu()) if self.is_cuda_: output = output.cuda() return output :param self: :param is_cuda: The value cuda() is supposed do be set to (eg: set cuda() on model outputs to this) :return: self<|endoftext|>
f0c7e46306aff33ffeb2adaaeff76b1e387eab9da31e9dd50e1ecb82d7b006d8
def parse_args(): '\n Parses command line arguments.\n ' parser = argparse.ArgumentParser('Reading Comprehension on BaiduRC dataset') parser.add_argument('--prepare', action='store_true', help='create the directories, prepare the vocabulary and embeddings') parser.add_argument('--train', action='store_true', help='train the model') parser.add_argument('--evaluate', action='store_true', help='evaluate the model on dev set') parser.add_argument('--predict', action='store_true', help='predict the answers for test set with trained model') parser.add_argument('--gpu', type=str, default='0', help='specify gpu device') train_settings = parser.add_argument_group('train settings') train_settings.add_argument('--optim', default='adam', help='optimizer type') train_settings.add_argument('--learning_rate', type=float, default=0.001, help='learning rate') train_settings.add_argument('--weight_decay', type=float, default=0, help='weight decay') train_settings.add_argument('--dropout_keep_prob', type=float, default=1, help='dropout keep rate') train_settings.add_argument('--batch_size', type=int, default=32, help='train batch size') train_settings.add_argument('--epochs', type=int, default=10, help='train epochs') model_settings = parser.add_argument_group('model settings') model_settings.add_argument('--algo', choices=['BIDAF', 'MLSTM'], default='BIDAF', help='choose the algorithm to use') model_settings.add_argument('--embed_size', type=int, default=300, help='size of the embeddings') model_settings.add_argument('--hidden_size', type=int, default=150, help='size of LSTM hidden units') model_settings.add_argument('--max_p_num', type=int, default=5, help='max passage num in one sample') model_settings.add_argument('--max_p_len', type=int, default=500, help='max length of passage') model_settings.add_argument('--max_q_len', type=int, default=60, help='max length of question') model_settings.add_argument('--max_a_len', type=int, default=200, help='max length of answer') path_settings = parser.add_argument_group('path settings') path_settings.add_argument('--train_files', nargs='+', default=['../data/demo/trainset/search.train.json'], help='list of files that contain the preprocessed train data') path_settings.add_argument('--dev_files', nargs='+', default=['../data/demo/devset/search.dev.json'], help='list of files that contain the preprocessed dev data') path_settings.add_argument('--test_files', nargs='+', default=['../data/demo/testset/search.test.json'], help='list of files that contain the preprocessed test data') path_settings.add_argument('--brc_dir', default='../data/baidu', help='the dir with preprocessed baidu reading comprehension data') path_settings.add_argument('--vocab_dir', default='../data/vocab/', help='the dir to save vocabulary') path_settings.add_argument('--model_dir', default='../data/models/', help='the dir to store models') path_settings.add_argument('--result_dir', default='../data/results/', help='the dir to output the results') path_settings.add_argument('--summary_dir', default='../data/summary/', help='the dir to write tensorboard summary') path_settings.add_argument('--log_path', help='path of the log file. If not set, logs are printed to console') return parser.parse_args()
Parses command line arguments.
tensorflow/run.py
parse_args
hhcyforever/19MRC
971
python
def parse_args(): '\n \n ' parser = argparse.ArgumentParser('Reading Comprehension on BaiduRC dataset') parser.add_argument('--prepare', action='store_true', help='create the directories, prepare the vocabulary and embeddings') parser.add_argument('--train', action='store_true', help='train the model') parser.add_argument('--evaluate', action='store_true', help='evaluate the model on dev set') parser.add_argument('--predict', action='store_true', help='predict the answers for test set with trained model') parser.add_argument('--gpu', type=str, default='0', help='specify gpu device') train_settings = parser.add_argument_group('train settings') train_settings.add_argument('--optim', default='adam', help='optimizer type') train_settings.add_argument('--learning_rate', type=float, default=0.001, help='learning rate') train_settings.add_argument('--weight_decay', type=float, default=0, help='weight decay') train_settings.add_argument('--dropout_keep_prob', type=float, default=1, help='dropout keep rate') train_settings.add_argument('--batch_size', type=int, default=32, help='train batch size') train_settings.add_argument('--epochs', type=int, default=10, help='train epochs') model_settings = parser.add_argument_group('model settings') model_settings.add_argument('--algo', choices=['BIDAF', 'MLSTM'], default='BIDAF', help='choose the algorithm to use') model_settings.add_argument('--embed_size', type=int, default=300, help='size of the embeddings') model_settings.add_argument('--hidden_size', type=int, default=150, help='size of LSTM hidden units') model_settings.add_argument('--max_p_num', type=int, default=5, help='max passage num in one sample') model_settings.add_argument('--max_p_len', type=int, default=500, help='max length of passage') model_settings.add_argument('--max_q_len', type=int, default=60, help='max length of question') model_settings.add_argument('--max_a_len', type=int, default=200, help='max length of answer') path_settings = parser.add_argument_group('path settings') path_settings.add_argument('--train_files', nargs='+', default=['../data/demo/trainset/search.train.json'], help='list of files that contain the preprocessed train data') path_settings.add_argument('--dev_files', nargs='+', default=['../data/demo/devset/search.dev.json'], help='list of files that contain the preprocessed dev data') path_settings.add_argument('--test_files', nargs='+', default=['../data/demo/testset/search.test.json'], help='list of files that contain the preprocessed test data') path_settings.add_argument('--brc_dir', default='../data/baidu', help='the dir with preprocessed baidu reading comprehension data') path_settings.add_argument('--vocab_dir', default='../data/vocab/', help='the dir to save vocabulary') path_settings.add_argument('--model_dir', default='../data/models/', help='the dir to store models') path_settings.add_argument('--result_dir', default='../data/results/', help='the dir to output the results') path_settings.add_argument('--summary_dir', default='../data/summary/', help='the dir to write tensorboard summary') path_settings.add_argument('--log_path', help='path of the log file. If not set, logs are printed to console') return parser.parse_args()
def parse_args(): '\n \n ' parser = argparse.ArgumentParser('Reading Comprehension on BaiduRC dataset') parser.add_argument('--prepare', action='store_true', help='create the directories, prepare the vocabulary and embeddings') parser.add_argument('--train', action='store_true', help='train the model') parser.add_argument('--evaluate', action='store_true', help='evaluate the model on dev set') parser.add_argument('--predict', action='store_true', help='predict the answers for test set with trained model') parser.add_argument('--gpu', type=str, default='0', help='specify gpu device') train_settings = parser.add_argument_group('train settings') train_settings.add_argument('--optim', default='adam', help='optimizer type') train_settings.add_argument('--learning_rate', type=float, default=0.001, help='learning rate') train_settings.add_argument('--weight_decay', type=float, default=0, help='weight decay') train_settings.add_argument('--dropout_keep_prob', type=float, default=1, help='dropout keep rate') train_settings.add_argument('--batch_size', type=int, default=32, help='train batch size') train_settings.add_argument('--epochs', type=int, default=10, help='train epochs') model_settings = parser.add_argument_group('model settings') model_settings.add_argument('--algo', choices=['BIDAF', 'MLSTM'], default='BIDAF', help='choose the algorithm to use') model_settings.add_argument('--embed_size', type=int, default=300, help='size of the embeddings') model_settings.add_argument('--hidden_size', type=int, default=150, help='size of LSTM hidden units') model_settings.add_argument('--max_p_num', type=int, default=5, help='max passage num in one sample') model_settings.add_argument('--max_p_len', type=int, default=500, help='max length of passage') model_settings.add_argument('--max_q_len', type=int, default=60, help='max length of question') model_settings.add_argument('--max_a_len', type=int, default=200, help='max length of answer') path_settings = parser.add_argument_group('path settings') path_settings.add_argument('--train_files', nargs='+', default=['../data/demo/trainset/search.train.json'], help='list of files that contain the preprocessed train data') path_settings.add_argument('--dev_files', nargs='+', default=['../data/demo/devset/search.dev.json'], help='list of files that contain the preprocessed dev data') path_settings.add_argument('--test_files', nargs='+', default=['../data/demo/testset/search.test.json'], help='list of files that contain the preprocessed test data') path_settings.add_argument('--brc_dir', default='../data/baidu', help='the dir with preprocessed baidu reading comprehension data') path_settings.add_argument('--vocab_dir', default='../data/vocab/', help='the dir to save vocabulary') path_settings.add_argument('--model_dir', default='../data/models/', help='the dir to store models') path_settings.add_argument('--result_dir', default='../data/results/', help='the dir to output the results') path_settings.add_argument('--summary_dir', default='../data/summary/', help='the dir to write tensorboard summary') path_settings.add_argument('--log_path', help='path of the log file. If not set, logs are printed to console') return parser.parse_args()<|docstring|>Parses command line arguments.<|endoftext|>
639a2d567c70acedc2cba171823e04ec7cd5e349111548afcf644db59a80cfe8
def prepare(args): '\n checks data, creates the directories, prepare the vocabulary and embeddings\n ' logger = logging.getLogger('brc') logger.info('Checking the data files...') for data_path in ((args.train_files + args.dev_files) + args.test_files): assert os.path.exists(data_path), '{} file does not exist.'.format(data_path) logger.info('Preparing the directories...') for dir_path in [args.vocab_dir, args.model_dir, args.result_dir, args.summary_dir]: if (not os.path.exists(dir_path)): os.makedirs(dir_path) logger.info('Building vocabulary...') brc_data = BRCDataset(args.max_p_num, args.max_p_len, args.max_q_len, args.train_files, args.dev_files, args.test_files) vocab = Vocab(lower=True) for word in brc_data.word_iter('train'): vocab.add(word) unfiltered_vocab_size = vocab.size() vocab.filter_tokens_by_cnt(min_cnt=2) filtered_num = (unfiltered_vocab_size - vocab.size()) logger.info('After filter {} tokens, the final vocab size is {}'.format(filtered_num, vocab.size())) logger.info('Assigning embeddings...') vocab.randomly_init_embeddings(args.embed_size) logger.info('Saving vocab...') with open(os.path.join(args.vocab_dir, 'vocab.data'), 'wb') as fout: pickle.dump(vocab, fout) logger.info('Done with preparing!')
checks data, creates the directories, prepare the vocabulary and embeddings
tensorflow/run.py
prepare
hhcyforever/19MRC
971
python
def prepare(args): '\n \n ' logger = logging.getLogger('brc') logger.info('Checking the data files...') for data_path in ((args.train_files + args.dev_files) + args.test_files): assert os.path.exists(data_path), '{} file does not exist.'.format(data_path) logger.info('Preparing the directories...') for dir_path in [args.vocab_dir, args.model_dir, args.result_dir, args.summary_dir]: if (not os.path.exists(dir_path)): os.makedirs(dir_path) logger.info('Building vocabulary...') brc_data = BRCDataset(args.max_p_num, args.max_p_len, args.max_q_len, args.train_files, args.dev_files, args.test_files) vocab = Vocab(lower=True) for word in brc_data.word_iter('train'): vocab.add(word) unfiltered_vocab_size = vocab.size() vocab.filter_tokens_by_cnt(min_cnt=2) filtered_num = (unfiltered_vocab_size - vocab.size()) logger.info('After filter {} tokens, the final vocab size is {}'.format(filtered_num, vocab.size())) logger.info('Assigning embeddings...') vocab.randomly_init_embeddings(args.embed_size) logger.info('Saving vocab...') with open(os.path.join(args.vocab_dir, 'vocab.data'), 'wb') as fout: pickle.dump(vocab, fout) logger.info('Done with preparing!')
def prepare(args): '\n \n ' logger = logging.getLogger('brc') logger.info('Checking the data files...') for data_path in ((args.train_files + args.dev_files) + args.test_files): assert os.path.exists(data_path), '{} file does not exist.'.format(data_path) logger.info('Preparing the directories...') for dir_path in [args.vocab_dir, args.model_dir, args.result_dir, args.summary_dir]: if (not os.path.exists(dir_path)): os.makedirs(dir_path) logger.info('Building vocabulary...') brc_data = BRCDataset(args.max_p_num, args.max_p_len, args.max_q_len, args.train_files, args.dev_files, args.test_files) vocab = Vocab(lower=True) for word in brc_data.word_iter('train'): vocab.add(word) unfiltered_vocab_size = vocab.size() vocab.filter_tokens_by_cnt(min_cnt=2) filtered_num = (unfiltered_vocab_size - vocab.size()) logger.info('After filter {} tokens, the final vocab size is {}'.format(filtered_num, vocab.size())) logger.info('Assigning embeddings...') vocab.randomly_init_embeddings(args.embed_size) logger.info('Saving vocab...') with open(os.path.join(args.vocab_dir, 'vocab.data'), 'wb') as fout: pickle.dump(vocab, fout) logger.info('Done with preparing!')<|docstring|>checks data, creates the directories, prepare the vocabulary and embeddings<|endoftext|>
dce7de5528141832208fbafa9fdfe4e44058513b4257efa8244759064864aead
def train(args): '\n trains the reading comprehension model\n ' logger = logging.getLogger('brc') logger.info('Load data_set and vocab...') with open(os.path.join(args.vocab_dir, 'vocab.data'), 'rb') as fin: vocab = pickle.load(fin) brc_data = BRCDataset(args.max_p_num, args.max_p_len, args.max_q_len, args.train_files, args.dev_files) logger.info('Converting text into ids...') brc_data.convert_to_ids(vocab) logger.info('Initialize the model...') rc_model = RCModel(vocab, args) logger.info('Training the model...') rc_model.train(brc_data, args.epochs, args.batch_size, save_dir=args.model_dir, save_prefix=args.algo, dropout_keep_prob=args.dropout_keep_prob) logger.info('Done with model training!')
trains the reading comprehension model
tensorflow/run.py
train
hhcyforever/19MRC
971
python
def train(args): '\n \n ' logger = logging.getLogger('brc') logger.info('Load data_set and vocab...') with open(os.path.join(args.vocab_dir, 'vocab.data'), 'rb') as fin: vocab = pickle.load(fin) brc_data = BRCDataset(args.max_p_num, args.max_p_len, args.max_q_len, args.train_files, args.dev_files) logger.info('Converting text into ids...') brc_data.convert_to_ids(vocab) logger.info('Initialize the model...') rc_model = RCModel(vocab, args) logger.info('Training the model...') rc_model.train(brc_data, args.epochs, args.batch_size, save_dir=args.model_dir, save_prefix=args.algo, dropout_keep_prob=args.dropout_keep_prob) logger.info('Done with model training!')
def train(args): '\n \n ' logger = logging.getLogger('brc') logger.info('Load data_set and vocab...') with open(os.path.join(args.vocab_dir, 'vocab.data'), 'rb') as fin: vocab = pickle.load(fin) brc_data = BRCDataset(args.max_p_num, args.max_p_len, args.max_q_len, args.train_files, args.dev_files) logger.info('Converting text into ids...') brc_data.convert_to_ids(vocab) logger.info('Initialize the model...') rc_model = RCModel(vocab, args) logger.info('Training the model...') rc_model.train(brc_data, args.epochs, args.batch_size, save_dir=args.model_dir, save_prefix=args.algo, dropout_keep_prob=args.dropout_keep_prob) logger.info('Done with model training!')<|docstring|>trains the reading comprehension model<|endoftext|>
4a2e76c3e3805261d40c17adfd9c601427f6007e032299ff11b767f691302152
def evaluate(args): '\n evaluate the trained model on dev files\n ' logger = logging.getLogger('brc') logger.info('Load data_set and vocab...') with open(os.path.join(args.vocab_dir, 'vocab.data'), 'rb') as fin: vocab = pickle.load(fin) assert (len(args.dev_files) > 0), 'No dev files are provided.' brc_data = BRCDataset(args.max_p_num, args.max_p_len, args.max_q_len, dev_files=args.dev_files) logger.info('Converting text into ids...') brc_data.convert_to_ids(vocab) logger.info('Restoring the model...') rc_model = RCModel(vocab, args) rc_model.restore(model_dir=args.model_dir, model_prefix=args.algo) logger.info('Evaluating the model on dev set...') dev_batches = brc_data.gen_mini_batches('dev', args.batch_size, pad_id=vocab.get_id(vocab.pad_token), shuffle=False) (dev_loss, dev_bleu_rouge) = rc_model.evaluate(dev_batches, result_dir=args.result_dir, result_prefix='dev.predicted') logger.info('Loss on dev set: {}'.format(dev_loss)) logger.info('Result on dev set: {}'.format(dev_bleu_rouge)) logger.info('Predicted answers are saved to {}'.format(os.path.join(args.result_dir)))
evaluate the trained model on dev files
tensorflow/run.py
evaluate
hhcyforever/19MRC
971
python
def evaluate(args): '\n \n ' logger = logging.getLogger('brc') logger.info('Load data_set and vocab...') with open(os.path.join(args.vocab_dir, 'vocab.data'), 'rb') as fin: vocab = pickle.load(fin) assert (len(args.dev_files) > 0), 'No dev files are provided.' brc_data = BRCDataset(args.max_p_num, args.max_p_len, args.max_q_len, dev_files=args.dev_files) logger.info('Converting text into ids...') brc_data.convert_to_ids(vocab) logger.info('Restoring the model...') rc_model = RCModel(vocab, args) rc_model.restore(model_dir=args.model_dir, model_prefix=args.algo) logger.info('Evaluating the model on dev set...') dev_batches = brc_data.gen_mini_batches('dev', args.batch_size, pad_id=vocab.get_id(vocab.pad_token), shuffle=False) (dev_loss, dev_bleu_rouge) = rc_model.evaluate(dev_batches, result_dir=args.result_dir, result_prefix='dev.predicted') logger.info('Loss on dev set: {}'.format(dev_loss)) logger.info('Result on dev set: {}'.format(dev_bleu_rouge)) logger.info('Predicted answers are saved to {}'.format(os.path.join(args.result_dir)))
def evaluate(args): '\n \n ' logger = logging.getLogger('brc') logger.info('Load data_set and vocab...') with open(os.path.join(args.vocab_dir, 'vocab.data'), 'rb') as fin: vocab = pickle.load(fin) assert (len(args.dev_files) > 0), 'No dev files are provided.' brc_data = BRCDataset(args.max_p_num, args.max_p_len, args.max_q_len, dev_files=args.dev_files) logger.info('Converting text into ids...') brc_data.convert_to_ids(vocab) logger.info('Restoring the model...') rc_model = RCModel(vocab, args) rc_model.restore(model_dir=args.model_dir, model_prefix=args.algo) logger.info('Evaluating the model on dev set...') dev_batches = brc_data.gen_mini_batches('dev', args.batch_size, pad_id=vocab.get_id(vocab.pad_token), shuffle=False) (dev_loss, dev_bleu_rouge) = rc_model.evaluate(dev_batches, result_dir=args.result_dir, result_prefix='dev.predicted') logger.info('Loss on dev set: {}'.format(dev_loss)) logger.info('Result on dev set: {}'.format(dev_bleu_rouge)) logger.info('Predicted answers are saved to {}'.format(os.path.join(args.result_dir)))<|docstring|>evaluate the trained model on dev files<|endoftext|>
291f1e58d4c76578a729b358a9827146afd42eccca7569daf857bdf8ce380724
def predict(args): '\n predicts answers for test files\n ' logger = logging.getLogger('brc') logger.info('Load data_set and vocab...') with open(os.path.join(args.vocab_dir, 'vocab.data'), 'rb') as fin: vocab = pickle.load(fin) assert (len(args.test_files) > 0), 'No test files are provided.' brc_data = BRCDataset(args.max_p_num, args.max_p_len, args.max_q_len, test_files=args.test_files) logger.info('Converting text into ids...') brc_data.convert_to_ids(vocab) logger.info('Restoring the model...') rc_model = RCModel(vocab, args) rc_model.restore(model_dir=args.model_dir, model_prefix=args.algo) logger.info('Predicting answers for test set...') test_batches = brc_data.gen_mini_batches('test', args.batch_size, pad_id=vocab.get_id(vocab.pad_token), shuffle=False) rc_model.evaluate(test_batches, result_dir=args.result_dir, result_prefix='test.predicted')
predicts answers for test files
tensorflow/run.py
predict
hhcyforever/19MRC
971
python
def predict(args): '\n \n ' logger = logging.getLogger('brc') logger.info('Load data_set and vocab...') with open(os.path.join(args.vocab_dir, 'vocab.data'), 'rb') as fin: vocab = pickle.load(fin) assert (len(args.test_files) > 0), 'No test files are provided.' brc_data = BRCDataset(args.max_p_num, args.max_p_len, args.max_q_len, test_files=args.test_files) logger.info('Converting text into ids...') brc_data.convert_to_ids(vocab) logger.info('Restoring the model...') rc_model = RCModel(vocab, args) rc_model.restore(model_dir=args.model_dir, model_prefix=args.algo) logger.info('Predicting answers for test set...') test_batches = brc_data.gen_mini_batches('test', args.batch_size, pad_id=vocab.get_id(vocab.pad_token), shuffle=False) rc_model.evaluate(test_batches, result_dir=args.result_dir, result_prefix='test.predicted')
def predict(args): '\n \n ' logger = logging.getLogger('brc') logger.info('Load data_set and vocab...') with open(os.path.join(args.vocab_dir, 'vocab.data'), 'rb') as fin: vocab = pickle.load(fin) assert (len(args.test_files) > 0), 'No test files are provided.' brc_data = BRCDataset(args.max_p_num, args.max_p_len, args.max_q_len, test_files=args.test_files) logger.info('Converting text into ids...') brc_data.convert_to_ids(vocab) logger.info('Restoring the model...') rc_model = RCModel(vocab, args) rc_model.restore(model_dir=args.model_dir, model_prefix=args.algo) logger.info('Predicting answers for test set...') test_batches = brc_data.gen_mini_batches('test', args.batch_size, pad_id=vocab.get_id(vocab.pad_token), shuffle=False) rc_model.evaluate(test_batches, result_dir=args.result_dir, result_prefix='test.predicted')<|docstring|>predicts answers for test files<|endoftext|>
21f71a4f3ae0033f980dcfc7ec94bcf4fa9b877514861774bffd06ef82d05377
def run(): '\n Prepares and runs the whole system.\n ' args = parse_args() logger = logging.getLogger('brc') logger.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') if args.log_path: file_handler = logging.FileHandler(args.log_path) file_handler.setLevel(logging.INFO) file_handler.setFormatter(formatter) logger.addHandler(file_handler) else: console_handler = logging.StreamHandler() console_handler.setLevel(logging.INFO) console_handler.setFormatter(formatter) logger.addHandler(console_handler) logger.info('Running with args : {}'.format(args)) os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu if args.prepare: prepare(args) if args.train: train(args) if args.evaluate: evaluate(args) if args.predict: predict(args)
Prepares and runs the whole system.
tensorflow/run.py
run
hhcyforever/19MRC
971
python
def run(): '\n \n ' args = parse_args() logger = logging.getLogger('brc') logger.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') if args.log_path: file_handler = logging.FileHandler(args.log_path) file_handler.setLevel(logging.INFO) file_handler.setFormatter(formatter) logger.addHandler(file_handler) else: console_handler = logging.StreamHandler() console_handler.setLevel(logging.INFO) console_handler.setFormatter(formatter) logger.addHandler(console_handler) logger.info('Running with args : {}'.format(args)) os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu if args.prepare: prepare(args) if args.train: train(args) if args.evaluate: evaluate(args) if args.predict: predict(args)
def run(): '\n \n ' args = parse_args() logger = logging.getLogger('brc') logger.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') if args.log_path: file_handler = logging.FileHandler(args.log_path) file_handler.setLevel(logging.INFO) file_handler.setFormatter(formatter) logger.addHandler(file_handler) else: console_handler = logging.StreamHandler() console_handler.setLevel(logging.INFO) console_handler.setFormatter(formatter) logger.addHandler(console_handler) logger.info('Running with args : {}'.format(args)) os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu if args.prepare: prepare(args) if args.train: train(args) if args.evaluate: evaluate(args) if args.predict: predict(args)<|docstring|>Prepares and runs the whole system.<|endoftext|>
667db9f35695ae1d7278a6a7a9a93f0b59805aa76c022a4b81e9d653aa119d6f
def __init__(self, fab=None, heavy_chains=None, light_chains=None, names=None): '\n Fab object container that handles combinations of light/heavy Chain pairs.\n\n Args:\n fab (list):\n heavy_chains (ChainCollection):\n light_chains (ChainCollection):\n names (list):\n ' if ((heavy_chains is None) and (light_chains is None) and (fab is None)): raise ValueError('Provide a list of Chain objects or an ChainCollection object') if (isinstance(fab, list) and all((isinstance(fab_i, Fab) for fab_i in fab))): self._fab = fab self._light_chains = ChainCollection([x[0] for x in self._fab]) self._heavy_chains = ChainCollection([x[1] for x in self._fab]) if ((fab is None) and ((heavy_chains is not None) and (light_chains is not None))): if isinstance(heavy_chains, list): self._heavy_chains = ChainCollection(antibody_objects=heavy_chains) elif isinstance(heavy_chains, ChainCollection): self._heavy_chains = heavy_chains else: raise ValueError('Provide a list of Chain objects or an ChainCollection object') if isinstance(light_chains, list): self._light_chains = ChainCollection(antibody_objects=light_chains) elif isinstance(light_chains, ChainCollection): self._light_chains = light_chains else: raise ValueError('Provide a list of Chain objects or an ChainCollection object') if (len(self._light_chains.loading_status()) == 0): self._light_chains.load() if (len(self._heavy_chains.loading_status()) == 0): self._heavy_chains.load() if (self._light_chains.n_ab != self._heavy_chains.n_ab): raise ValueError('Number of heavy chains must be the same of light chains') if (isinstance(names, list) and all((isinstance(name, str) for name in names))): if (len(names) == self._heavy_chains.n_ab): self._names = names else: raise ValueError('Length of name list must be the same as length of heavy_chains/light chains lists') elif (names is None): self._names = ['{} - {}'.format(heavy, light) for (heavy, light) in zip(self._heavy_chains.names, self._light_chains.names)] else: raise ValueError('Names expected a list of strings, instead got {}'.format(type(names))) self._n_ab = self._light_chains.n_ab self._pair_sequences = [(heavy + light) for (light, heavy) in zip(self._heavy_chains.sequences, self._light_chains.sequences)] self._internal_heavy_name = self._heavy_chains.names self._internal_light_name = self._light_chains.names
Fab object container that handles combinations of light/heavy Chain pairs. Args: fab (list): heavy_chains (ChainCollection): light_chains (ChainCollection): names (list):
abpytools/core/fab_collection.py
__init__
gf712/AbPyTools
13
python
def __init__(self, fab=None, heavy_chains=None, light_chains=None, names=None): '\n Fab object container that handles combinations of light/heavy Chain pairs.\n\n Args:\n fab (list):\n heavy_chains (ChainCollection):\n light_chains (ChainCollection):\n names (list):\n ' if ((heavy_chains is None) and (light_chains is None) and (fab is None)): raise ValueError('Provide a list of Chain objects or an ChainCollection object') if (isinstance(fab, list) and all((isinstance(fab_i, Fab) for fab_i in fab))): self._fab = fab self._light_chains = ChainCollection([x[0] for x in self._fab]) self._heavy_chains = ChainCollection([x[1] for x in self._fab]) if ((fab is None) and ((heavy_chains is not None) and (light_chains is not None))): if isinstance(heavy_chains, list): self._heavy_chains = ChainCollection(antibody_objects=heavy_chains) elif isinstance(heavy_chains, ChainCollection): self._heavy_chains = heavy_chains else: raise ValueError('Provide a list of Chain objects or an ChainCollection object') if isinstance(light_chains, list): self._light_chains = ChainCollection(antibody_objects=light_chains) elif isinstance(light_chains, ChainCollection): self._light_chains = light_chains else: raise ValueError('Provide a list of Chain objects or an ChainCollection object') if (len(self._light_chains.loading_status()) == 0): self._light_chains.load() if (len(self._heavy_chains.loading_status()) == 0): self._heavy_chains.load() if (self._light_chains.n_ab != self._heavy_chains.n_ab): raise ValueError('Number of heavy chains must be the same of light chains') if (isinstance(names, list) and all((isinstance(name, str) for name in names))): if (len(names) == self._heavy_chains.n_ab): self._names = names else: raise ValueError('Length of name list must be the same as length of heavy_chains/light chains lists') elif (names is None): self._names = ['{} - {}'.format(heavy, light) for (heavy, light) in zip(self._heavy_chains.names, self._light_chains.names)] else: raise ValueError('Names expected a list of strings, instead got {}'.format(type(names))) self._n_ab = self._light_chains.n_ab self._pair_sequences = [(heavy + light) for (light, heavy) in zip(self._heavy_chains.sequences, self._light_chains.sequences)] self._internal_heavy_name = self._heavy_chains.names self._internal_light_name = self._light_chains.names
def __init__(self, fab=None, heavy_chains=None, light_chains=None, names=None): '\n Fab object container that handles combinations of light/heavy Chain pairs.\n\n Args:\n fab (list):\n heavy_chains (ChainCollection):\n light_chains (ChainCollection):\n names (list):\n ' if ((heavy_chains is None) and (light_chains is None) and (fab is None)): raise ValueError('Provide a list of Chain objects or an ChainCollection object') if (isinstance(fab, list) and all((isinstance(fab_i, Fab) for fab_i in fab))): self._fab = fab self._light_chains = ChainCollection([x[0] for x in self._fab]) self._heavy_chains = ChainCollection([x[1] for x in self._fab]) if ((fab is None) and ((heavy_chains is not None) and (light_chains is not None))): if isinstance(heavy_chains, list): self._heavy_chains = ChainCollection(antibody_objects=heavy_chains) elif isinstance(heavy_chains, ChainCollection): self._heavy_chains = heavy_chains else: raise ValueError('Provide a list of Chain objects or an ChainCollection object') if isinstance(light_chains, list): self._light_chains = ChainCollection(antibody_objects=light_chains) elif isinstance(light_chains, ChainCollection): self._light_chains = light_chains else: raise ValueError('Provide a list of Chain objects or an ChainCollection object') if (len(self._light_chains.loading_status()) == 0): self._light_chains.load() if (len(self._heavy_chains.loading_status()) == 0): self._heavy_chains.load() if (self._light_chains.n_ab != self._heavy_chains.n_ab): raise ValueError('Number of heavy chains must be the same of light chains') if (isinstance(names, list) and all((isinstance(name, str) for name in names))): if (len(names) == self._heavy_chains.n_ab): self._names = names else: raise ValueError('Length of name list must be the same as length of heavy_chains/light chains lists') elif (names is None): self._names = ['{} - {}'.format(heavy, light) for (heavy, light) in zip(self._heavy_chains.names, self._light_chains.names)] else: raise ValueError('Names expected a list of strings, instead got {}'.format(type(names))) self._n_ab = self._light_chains.n_ab self._pair_sequences = [(heavy + light) for (light, heavy) in zip(self._heavy_chains.sequences, self._light_chains.sequences)] self._internal_heavy_name = self._heavy_chains.names self._internal_light_name = self._light_chains.names<|docstring|>Fab object container that handles combinations of light/heavy Chain pairs. Args: fab (list): heavy_chains (ChainCollection): light_chains (ChainCollection): names (list):<|endoftext|>
4ef9ea154a6d4cccbcd7fe6ef107a0b24a29763910f15e7a85ef395b4b7dc13d
def get_object(self, name): '\n\n :param name: str\n :return:\n ' if (name in self.names): index = self.names.index(name) return self[index] else: raise ValueError('Could not find sequence with specified name')
:param name: str :return:
abpytools/core/fab_collection.py
get_object
gf712/AbPyTools
13
python
def get_object(self, name): '\n\n :param name: str\n :return:\n ' if (name in self.names): index = self.names.index(name) return self[index] else: raise ValueError('Could not find sequence with specified name')
def get_object(self, name): '\n\n :param name: str\n :return:\n ' if (name in self.names): index = self.names.index(name) return self[index] else: raise ValueError('Could not find sequence with specified name')<|docstring|>:param name: str :return:<|endoftext|>
7450bb2bb8b5b5aab87765168aec578a461f4cd703296b57b0a91db1c7071cd6
def cbServerGreeting(proto, username, password): '\n Initial callback - invoked after the server sends us its greet message.\n ' tp = TrivialPrompter() stdio.StandardIO(tp) proto.prompt = tp.prompt proto.display = tp.display return proto.login(username, password).addCallback(cbAuthentication, proto)
Initial callback - invoked after the server sends us its greet message.
twisted/imap.py
cbServerGreeting
pengchenyu111/SpiderLearning
3
python
def cbServerGreeting(proto, username, password): '\n \n ' tp = TrivialPrompter() stdio.StandardIO(tp) proto.prompt = tp.prompt proto.display = tp.display return proto.login(username, password).addCallback(cbAuthentication, proto)
def cbServerGreeting(proto, username, password): '\n \n ' tp = TrivialPrompter() stdio.StandardIO(tp) proto.prompt = tp.prompt proto.display = tp.display return proto.login(username, password).addCallback(cbAuthentication, proto)<|docstring|>Initial callback - invoked after the server sends us its greet message.<|endoftext|>
a6f44e94a74c07262070e471b0a2651760be7662da684d4f886d9a39699a80a8
def ebConnection(reason): '\n Fallback error-handler. If anything goes wrong, log it and quit.\n ' log.startLogging(sys.stdout) log.err(reason) return reason
Fallback error-handler. If anything goes wrong, log it and quit.
twisted/imap.py
ebConnection
pengchenyu111/SpiderLearning
3
python
def ebConnection(reason): '\n \n ' log.startLogging(sys.stdout) log.err(reason) return reason
def ebConnection(reason): '\n \n ' log.startLogging(sys.stdout) log.err(reason) return reason<|docstring|>Fallback error-handler. If anything goes wrong, log it and quit.<|endoftext|>
3a1ff6ae716b634a9c01dc532a4f289fe734b2c9e1deba08290677a321b6756e
def buildProtocol(self, addr): "\n Initiate the protocol instance. Since we are building a simple IMAP\n client, we don't bother checking what capabilities the server has. We\n just add all the authenticators twisted.mail has. Note: Gmail no\n longer uses any of the methods below, it's been using XOAUTH since\n 2010.\n " assert (not self.usedUp) self.usedUp = True p = self.protocol() p.factory = self p.greetDeferred = self.onConn p.registerAuthenticator(imap4.PLAINAuthenticator(self.username)) p.registerAuthenticator(imap4.LOGINAuthenticator(self.username)) p.registerAuthenticator(imap4.CramMD5ClientAuthenticator(self.username)) return p
Initiate the protocol instance. Since we are building a simple IMAP client, we don't bother checking what capabilities the server has. We just add all the authenticators twisted.mail has. Note: Gmail no longer uses any of the methods below, it's been using XOAUTH since 2010.
twisted/imap.py
buildProtocol
pengchenyu111/SpiderLearning
3
python
def buildProtocol(self, addr): "\n Initiate the protocol instance. Since we are building a simple IMAP\n client, we don't bother checking what capabilities the server has. We\n just add all the authenticators twisted.mail has. Note: Gmail no\n longer uses any of the methods below, it's been using XOAUTH since\n 2010.\n " assert (not self.usedUp) self.usedUp = True p = self.protocol() p.factory = self p.greetDeferred = self.onConn p.registerAuthenticator(imap4.PLAINAuthenticator(self.username)) p.registerAuthenticator(imap4.LOGINAuthenticator(self.username)) p.registerAuthenticator(imap4.CramMD5ClientAuthenticator(self.username)) return p
def buildProtocol(self, addr): "\n Initiate the protocol instance. Since we are building a simple IMAP\n client, we don't bother checking what capabilities the server has. We\n just add all the authenticators twisted.mail has. Note: Gmail no\n longer uses any of the methods below, it's been using XOAUTH since\n 2010.\n " assert (not self.usedUp) self.usedUp = True p = self.protocol() p.factory = self p.greetDeferred = self.onConn p.registerAuthenticator(imap4.PLAINAuthenticator(self.username)) p.registerAuthenticator(imap4.LOGINAuthenticator(self.username)) p.registerAuthenticator(imap4.CramMD5ClientAuthenticator(self.username)) return p<|docstring|>Initiate the protocol instance. Since we are building a simple IMAP client, we don't bother checking what capabilities the server has. We just add all the authenticators twisted.mail has. Note: Gmail no longer uses any of the methods below, it's been using XOAUTH since 2010.<|endoftext|>
2eab7fcc536826d68b77adbe59a7e421e0824736a11629cf0dbd1b17401ce954
def check_url(url: str) -> bool: 'Check if the given URL is valid.\n\n Args:\n url: The URL to check.\n\n Returns:\n True if the URL is valid, False otherwise.\n ' return bool(re.match(URL_REGEX, url))
Check if the given URL is valid. Args: url: The URL to check. Returns: True if the URL is valid, False otherwise.
src/meltano/core/tracking/snowplow_tracker.py
check_url
Mu-L/meltano
0
python
def check_url(url: str) -> bool: 'Check if the given URL is valid.\n\n Args:\n url: The URL to check.\n\n Returns:\n True if the URL is valid, False otherwise.\n ' return bool(re.match(URL_REGEX, url))
def check_url(url: str) -> bool: 'Check if the given URL is valid.\n\n Args:\n url: The URL to check.\n\n Returns:\n True if the URL is valid, False otherwise.\n ' return bool(re.match(URL_REGEX, url))<|docstring|>Check if the given URL is valid. Args: url: The URL to check. Returns: True if the URL is valid, False otherwise.<|endoftext|>
897844e070f31bd1ad9432ff6e8ae6118f4141bf72d69f6e9e7582734d143944
def __init__(self, project: Project, *, request_timeout: int=2.0, **kwargs: Any): 'Create a Snowplow Tracker for the Meltano project.\n\n Args:\n project: The Meltano project.\n request_timeout: The timeout for all the event emitters.\n kwargs: Additional arguments to pass to the parent snowplow Tracker class.\n ' settings_service = ProjectSettingsService(project) endpoints = settings_service.get('snowplow.collector_endpoints') emitters: list[Emitter] = [] for endpoint in endpoints: if (not check_url(endpoint)): logger.warning('invalid_snowplow_endpoint', endpoint=endpoint) continue parsed_url = urlparse(endpoint) emitters.append(Emitter(endpoint=(parsed_url.hostname + parsed_url.path), protocol=(parsed_url.scheme or 'http'), port=parsed_url.port, request_timeout=request_timeout)) super().__init__(emitters=emitters, **kwargs)
Create a Snowplow Tracker for the Meltano project. Args: project: The Meltano project. request_timeout: The timeout for all the event emitters. kwargs: Additional arguments to pass to the parent snowplow Tracker class.
src/meltano/core/tracking/snowplow_tracker.py
__init__
Mu-L/meltano
0
python
def __init__(self, project: Project, *, request_timeout: int=2.0, **kwargs: Any): 'Create a Snowplow Tracker for the Meltano project.\n\n Args:\n project: The Meltano project.\n request_timeout: The timeout for all the event emitters.\n kwargs: Additional arguments to pass to the parent snowplow Tracker class.\n ' settings_service = ProjectSettingsService(project) endpoints = settings_service.get('snowplow.collector_endpoints') emitters: list[Emitter] = [] for endpoint in endpoints: if (not check_url(endpoint)): logger.warning('invalid_snowplow_endpoint', endpoint=endpoint) continue parsed_url = urlparse(endpoint) emitters.append(Emitter(endpoint=(parsed_url.hostname + parsed_url.path), protocol=(parsed_url.scheme or 'http'), port=parsed_url.port, request_timeout=request_timeout)) super().__init__(emitters=emitters, **kwargs)
def __init__(self, project: Project, *, request_timeout: int=2.0, **kwargs: Any): 'Create a Snowplow Tracker for the Meltano project.\n\n Args:\n project: The Meltano project.\n request_timeout: The timeout for all the event emitters.\n kwargs: Additional arguments to pass to the parent snowplow Tracker class.\n ' settings_service = ProjectSettingsService(project) endpoints = settings_service.get('snowplow.collector_endpoints') emitters: list[Emitter] = [] for endpoint in endpoints: if (not check_url(endpoint)): logger.warning('invalid_snowplow_endpoint', endpoint=endpoint) continue parsed_url = urlparse(endpoint) emitters.append(Emitter(endpoint=(parsed_url.hostname + parsed_url.path), protocol=(parsed_url.scheme or 'http'), port=parsed_url.port, request_timeout=request_timeout)) super().__init__(emitters=emitters, **kwargs)<|docstring|>Create a Snowplow Tracker for the Meltano project. Args: project: The Meltano project. request_timeout: The timeout for all the event emitters. kwargs: Additional arguments to pass to the parent snowplow Tracker class.<|endoftext|>
a83fc3d302f842819c3117a4ce0a456d95769561951eb8f59a8ab8085fbb1758
def read_vcf(fh) -> pd.DataFrame: 'Read VCF file into a DataFrame' vcf_cols = [] for line in fh: if line.startswith('#CHROM'): vcf_cols = line[1:].strip().split('\t') break df = pd.read_table(fh, comment='#', header=None, names=vcf_cols, dtype=VCF_COL_DTYPES, na_filter=False) return df
Read VCF file into a DataFrame
vcf_consensus_builder/vcf_io.py
read_vcf
leoisl/vcf_consensus_builder
0
python
def read_vcf(fh) -> pd.DataFrame: vcf_cols = [] for line in fh: if line.startswith('#CHROM'): vcf_cols = line[1:].strip().split('\t') break df = pd.read_table(fh, comment='#', header=None, names=vcf_cols, dtype=VCF_COL_DTYPES, na_filter=False) return df
def read_vcf(fh) -> pd.DataFrame: vcf_cols = [] for line in fh: if line.startswith('#CHROM'): vcf_cols = line[1:].strip().split('\t') break df = pd.read_table(fh, comment='#', header=None, names=vcf_cols, dtype=VCF_COL_DTYPES, na_filter=False) return df<|docstring|>Read VCF file into a DataFrame<|endoftext|>
2ba9f2665fe0d7fb70af290aebc093ba38fa7841915f97ff5ceb0b31295e8635
@app.expanded_callback(Output('chart-holder-1', 'children'), [Input('figure-button-1', 'n_clicks')], [State('chart-type-1', 'value'), State('chart-top-n-1', 'value'), State('chart-transpose-1', 'on'), State('example-wordclass-dropdown', 'value')]) def _new_chart(n_clicks, chart_type, top_n, transpose, wordclass, **kwargs): '\n Make new chart by kind. Do it 5 times, once for each chart space\n ' if (n_clicks is None): return no_update try: corpus = _get_corpus(slug) except TypeError: return ([], []) (df, _, _) = _quick_freq(corpus, wordclass=wordclass) if transpose: df = df.T df = df.iloc[(:, :top_n)] figure = _df_to_figure(df, kind=chart_type, width='100%') chart_data = dict(id='chart-1', figure=figure, style=dict(width='100%', height='400px')) chart = dcc.Graph(**chart_data) return chart
Make new chart by kind. Do it 5 times, once for each chart space
example/callbacks.py
_new_chart
interrogator/buzzword
3
python
@app.expanded_callback(Output('chart-holder-1', 'children'), [Input('figure-button-1', 'n_clicks')], [State('chart-type-1', 'value'), State('chart-top-n-1', 'value'), State('chart-transpose-1', 'on'), State('example-wordclass-dropdown', 'value')]) def _new_chart(n_clicks, chart_type, top_n, transpose, wordclass, **kwargs): '\n \n ' if (n_clicks is None): return no_update try: corpus = _get_corpus(slug) except TypeError: return ([], []) (df, _, _) = _quick_freq(corpus, wordclass=wordclass) if transpose: df = df.T df = df.iloc[(:, :top_n)] figure = _df_to_figure(df, kind=chart_type, width='100%') chart_data = dict(id='chart-1', figure=figure, style=dict(width='100%', height='400px')) chart = dcc.Graph(**chart_data) return chart
@app.expanded_callback(Output('chart-holder-1', 'children'), [Input('figure-button-1', 'n_clicks')], [State('chart-type-1', 'value'), State('chart-top-n-1', 'value'), State('chart-transpose-1', 'on'), State('example-wordclass-dropdown', 'value')]) def _new_chart(n_clicks, chart_type, top_n, transpose, wordclass, **kwargs): '\n \n ' if (n_clicks is None): return no_update try: corpus = _get_corpus(slug) except TypeError: return ([], []) (df, _, _) = _quick_freq(corpus, wordclass=wordclass) if transpose: df = df.T df = df.iloc[(:, :top_n)] figure = _df_to_figure(df, kind=chart_type, width='100%') chart_data = dict(id='chart-1', figure=figure, style=dict(width='100%', height='400px')) chart = dcc.Graph(**chart_data) return chart<|docstring|>Make new chart by kind. Do it 5 times, once for each chart space<|endoftext|>
6ee874392c7dc4f7c692ad1d38c0d024d17c4f643432849004289b08f4248d1f
def generate_dataset(path_org, path_trg): '\n Function that generates a dataset in the format\n that we will use in the dataloader from torchtext\n input:\n path_org (string): location of the dataset file i\n in the SCAN format\n path_trg (string): location of where we will save \n the new files\n ' lines = open(path_org, 'r').readlines() with open((path_trg + '.in'), 'w+') as file_in_out, open((path_trg + '.out'), 'w+') as file_out_out: for line in lines: line_list = line.split(':') line_in = line_list[1][1:(- 4)] line_out = line_list[2][1:] file_in_out.write((line_in + '\n')) file_out_out.write(line_out)
Function that generates a dataset in the format that we will use in the dataloader from torchtext input: path_org (string): location of the dataset file i in the SCAN format path_trg (string): location of where we will save the new files
build_dataset.py
generate_dataset
hec44/SCAN-reproduction
0
python
def generate_dataset(path_org, path_trg): '\n Function that generates a dataset in the format\n that we will use in the dataloader from torchtext\n input:\n path_org (string): location of the dataset file i\n in the SCAN format\n path_trg (string): location of where we will save \n the new files\n ' lines = open(path_org, 'r').readlines() with open((path_trg + '.in'), 'w+') as file_in_out, open((path_trg + '.out'), 'w+') as file_out_out: for line in lines: line_list = line.split(':') line_in = line_list[1][1:(- 4)] line_out = line_list[2][1:] file_in_out.write((line_in + '\n')) file_out_out.write(line_out)
def generate_dataset(path_org, path_trg): '\n Function that generates a dataset in the format\n that we will use in the dataloader from torchtext\n input:\n path_org (string): location of the dataset file i\n in the SCAN format\n path_trg (string): location of where we will save \n the new files\n ' lines = open(path_org, 'r').readlines() with open((path_trg + '.in'), 'w+') as file_in_out, open((path_trg + '.out'), 'w+') as file_out_out: for line in lines: line_list = line.split(':') line_in = line_list[1][1:(- 4)] line_out = line_list[2][1:] file_in_out.write((line_in + '\n')) file_out_out.write(line_out)<|docstring|>Function that generates a dataset in the format that we will use in the dataloader from torchtext input: path_org (string): location of the dataset file i in the SCAN format path_trg (string): location of where we will save the new files<|endoftext|>
260c7a0eac50d148f42bbc09df68e1cad65953be2f3bfd36143df175d829500c
def __new__(metacls, cls, bases, namespace, lazy_method: str='get', auto_wire: Union[(bool, Iterable[str])]=None, dependencies: DEPENDENCIES_TYPE=None, use_names: Union[(bool, Iterable[str])]=None, use_type_hints: Union[(bool, Iterable[str])]=None, container: DependencyContainer=None): "\n Metaclass used to generate class with constant dependencies.\n\n This should be used for configuration or external static resources.\n Only public uppercase class attributes will be converted to dependencies.\n\n .. doctest::\n\n >>> import antidote\n >>> class Conf(metaclass=antidote.LazyConstantsMeta):\n ... DOMAIN = 'domain'\n ... _A = 'unchanged'\n ... a = 'unchanged'\n ...\n ... def __init__(self):\n ... self._data = {'domain': 'example.com'}\n ...\n ... def get(self, key):\n ... return self._data[key]\n ...\n >>> Conf._A\n 'unchanged'\n >>> Conf.a\n 'unchanged'\n >>> Conf().DOMAIN\n 'example.com'\n >>> Conf.DOMAIN\n LazyMethodCallDependency(...)\n >>> antidote.world.get(Conf.DOMAIN)\n 'example.com'\n >>> @antidote.inject(dependencies=(Conf.DOMAIN,))\n ... def f(a):\n ... return a\n >>> f()\n 'example.com'\n\n As one can see, neither :code:`a` nor :code:`_A` are changed,\n only :code:`DOMAIN`. Constant's initial value becomes the argument given\n to the lazy method, by default :code:`__call__()`. It has two different\n behaviors depending how it is retrieved:\n\n - Used as a instance attribute, :code:`Conf().DOMAIN`, is is equivalent\n to :code:`Conf().__call__('domain')`. This lets your code stay easy to\n manipulate and test.\n - Used as a class attribute, :code:`Conf.DOMAIN`, it becomes a special\n object used by Antidote to identify a dependency. This lets you inject\n :code:`Conf.DOMAIN` anywhere in your code.\n\n The advantage of using this is that Antidote will only instantiate\n :code:`Conf` once, if and only if necessary. The same is applied for\n every constant, those are singletons. Defining your static resources or\n configuration as class constants also makes your code more maintainable,\n as any decent IDE will refactor / find the usage of those in a blink of\n an eye.\n\n Underneath it uses :py:class:`.LazyMethodCall` and :py:func:`.register`.\n It is equivalent to:\n\n .. testcode::\n\n from antidote import LazyMethodCall, register\n\n @register(auto_wire=('__init__', '__call__'))\n class Conf:\n # Required for the example as we specify __init__() explicitly\n # for auto wiring, so it has to exist.\n def __init__(self):\n pass\n\n def __call__(self, key):\n return config[key]\n\n DOMAIN = LazyMethodCall(__call__)('domain')\n\n Args:\n lazy_method: Name of the lazy method to use for the constants.\n Defaults to :code:`'__call__'`.\n auto_wire: Injects automatically the dependencies of the methods\n specified, or only of :code:`__init__()` and :code:`__call__()`\n if True.\n dependencies: Can be either a mapping of arguments name to their\n dependency, an iterable of dependencies or a function which returns\n the dependency given the arguments name. If an iterable is specified,\n the position of the arguments is used to determine their respective\n dependency. An argument may be skipped by using :code:`None` as a\n placeholder. The first argument is always ignored for methods (self)\n and class methods (cls).Type hints are overridden. Defaults to\n :code:`None`.\n use_names: Whether or not the arguments' name should be used as their\n respective dependency. An iterable of argument names may also be\n supplied to restrict this to those. Defaults to :code:`False`.\n use_type_hints: Whether or not the type hints (annotations) should be\n used as the arguments dependency. An iterable of argument names may\n also be specified to restrict this to those. Any type hints from\n the builtins (str, int...) or the typing (:py:class:`~typing.Optional`,\n ...) are ignored. Defaults to :code:`True`.\n container: :py:class:`~.core.container.DependencyContainer` to which the\n dependency should be attached. Defaults to the global container,\n :code:`antidote.world`.\n " if (lazy_method not in namespace): raise ValueError('Lazy method {}() is no defined in {}'.format(lazy_method, cls)) resource_class = super().__new__(metacls, cls, bases, namespace) wire_raise_on_missing = True if ((auto_wire is None) or isinstance(auto_wire, bool)): if (auto_wire is False): methods = () else: methods = (lazy_method, '__init__') wire_raise_on_missing = False else: methods = auto_wire if methods: resource_class = wire(resource_class, methods=methods, dependencies=dependencies, use_names=use_names, use_type_hints=use_type_hints, container=container, raise_on_missing=wire_raise_on_missing) resource_class = register(resource_class, auto_wire=False, singleton=True, container=container) func = resource_class.__dict__[lazy_method] for (name, v) in list(resource_class.__dict__.items()): if ((not name.startswith('_')) and name.isupper()): setattr(resource_class, name, LazyMethodCall(func, singleton=True)(v)) return resource_class
Metaclass used to generate class with constant dependencies. This should be used for configuration or external static resources. Only public uppercase class attributes will be converted to dependencies. .. doctest:: >>> import antidote >>> class Conf(metaclass=antidote.LazyConstantsMeta): ... DOMAIN = 'domain' ... _A = 'unchanged' ... a = 'unchanged' ... ... def __init__(self): ... self._data = {'domain': 'example.com'} ... ... def get(self, key): ... return self._data[key] ... >>> Conf._A 'unchanged' >>> Conf.a 'unchanged' >>> Conf().DOMAIN 'example.com' >>> Conf.DOMAIN LazyMethodCallDependency(...) >>> antidote.world.get(Conf.DOMAIN) 'example.com' >>> @antidote.inject(dependencies=(Conf.DOMAIN,)) ... def f(a): ... return a >>> f() 'example.com' As one can see, neither :code:`a` nor :code:`_A` are changed, only :code:`DOMAIN`. Constant's initial value becomes the argument given to the lazy method, by default :code:`__call__()`. It has two different behaviors depending how it is retrieved: - Used as a instance attribute, :code:`Conf().DOMAIN`, is is equivalent to :code:`Conf().__call__('domain')`. This lets your code stay easy to manipulate and test. - Used as a class attribute, :code:`Conf.DOMAIN`, it becomes a special object used by Antidote to identify a dependency. This lets you inject :code:`Conf.DOMAIN` anywhere in your code. The advantage of using this is that Antidote will only instantiate :code:`Conf` once, if and only if necessary. The same is applied for every constant, those are singletons. Defining your static resources or configuration as class constants also makes your code more maintainable, as any decent IDE will refactor / find the usage of those in a blink of an eye. Underneath it uses :py:class:`.LazyMethodCall` and :py:func:`.register`. It is equivalent to: .. testcode:: from antidote import LazyMethodCall, register @register(auto_wire=('__init__', '__call__')) class Conf: # Required for the example as we specify __init__() explicitly # for auto wiring, so it has to exist. def __init__(self): pass def __call__(self, key): return config[key] DOMAIN = LazyMethodCall(__call__)('domain') Args: lazy_method: Name of the lazy method to use for the constants. Defaults to :code:`'__call__'`. auto_wire: Injects automatically the dependencies of the methods specified, or only of :code:`__init__()` and :code:`__call__()` if True. dependencies: Can be either a mapping of arguments name to their dependency, an iterable of dependencies or a function which returns the dependency given the arguments name. If an iterable is specified, the position of the arguments is used to determine their respective dependency. An argument may be skipped by using :code:`None` as a placeholder. The first argument is always ignored for methods (self) and class methods (cls).Type hints are overridden. Defaults to :code:`None`. use_names: Whether or not the arguments' name should be used as their respective dependency. An iterable of argument names may also be supplied to restrict this to those. Defaults to :code:`False`. use_type_hints: Whether or not the type hints (annotations) should be used as the arguments dependency. An iterable of argument names may also be specified to restrict this to those. Any type hints from the builtins (str, int...) or the typing (:py:class:`~typing.Optional`, ...) are ignored. Defaults to :code:`True`. container: :py:class:`~.core.container.DependencyContainer` to which the dependency should be attached. Defaults to the global container, :code:`antidote.world`.
src/antidote/helpers/constants.py
__new__
keelerm84/antidote
0
python
def __new__(metacls, cls, bases, namespace, lazy_method: str='get', auto_wire: Union[(bool, Iterable[str])]=None, dependencies: DEPENDENCIES_TYPE=None, use_names: Union[(bool, Iterable[str])]=None, use_type_hints: Union[(bool, Iterable[str])]=None, container: DependencyContainer=None): "\n Metaclass used to generate class with constant dependencies.\n\n This should be used for configuration or external static resources.\n Only public uppercase class attributes will be converted to dependencies.\n\n .. doctest::\n\n >>> import antidote\n >>> class Conf(metaclass=antidote.LazyConstantsMeta):\n ... DOMAIN = 'domain'\n ... _A = 'unchanged'\n ... a = 'unchanged'\n ...\n ... def __init__(self):\n ... self._data = {'domain': 'example.com'}\n ...\n ... def get(self, key):\n ... return self._data[key]\n ...\n >>> Conf._A\n 'unchanged'\n >>> Conf.a\n 'unchanged'\n >>> Conf().DOMAIN\n 'example.com'\n >>> Conf.DOMAIN\n LazyMethodCallDependency(...)\n >>> antidote.world.get(Conf.DOMAIN)\n 'example.com'\n >>> @antidote.inject(dependencies=(Conf.DOMAIN,))\n ... def f(a):\n ... return a\n >>> f()\n 'example.com'\n\n As one can see, neither :code:`a` nor :code:`_A` are changed,\n only :code:`DOMAIN`. Constant's initial value becomes the argument given\n to the lazy method, by default :code:`__call__()`. It has two different\n behaviors depending how it is retrieved:\n\n - Used as a instance attribute, :code:`Conf().DOMAIN`, is is equivalent\n to :code:`Conf().__call__('domain')`. This lets your code stay easy to\n manipulate and test.\n - Used as a class attribute, :code:`Conf.DOMAIN`, it becomes a special\n object used by Antidote to identify a dependency. This lets you inject\n :code:`Conf.DOMAIN` anywhere in your code.\n\n The advantage of using this is that Antidote will only instantiate\n :code:`Conf` once, if and only if necessary. The same is applied for\n every constant, those are singletons. Defining your static resources or\n configuration as class constants also makes your code more maintainable,\n as any decent IDE will refactor / find the usage of those in a blink of\n an eye.\n\n Underneath it uses :py:class:`.LazyMethodCall` and :py:func:`.register`.\n It is equivalent to:\n\n .. testcode::\n\n from antidote import LazyMethodCall, register\n\n @register(auto_wire=('__init__', '__call__'))\n class Conf:\n # Required for the example as we specify __init__() explicitly\n # for auto wiring, so it has to exist.\n def __init__(self):\n pass\n\n def __call__(self, key):\n return config[key]\n\n DOMAIN = LazyMethodCall(__call__)('domain')\n\n Args:\n lazy_method: Name of the lazy method to use for the constants.\n Defaults to :code:`'__call__'`.\n auto_wire: Injects automatically the dependencies of the methods\n specified, or only of :code:`__init__()` and :code:`__call__()`\n if True.\n dependencies: Can be either a mapping of arguments name to their\n dependency, an iterable of dependencies or a function which returns\n the dependency given the arguments name. If an iterable is specified,\n the position of the arguments is used to determine their respective\n dependency. An argument may be skipped by using :code:`None` as a\n placeholder. The first argument is always ignored for methods (self)\n and class methods (cls).Type hints are overridden. Defaults to\n :code:`None`.\n use_names: Whether or not the arguments' name should be used as their\n respective dependency. An iterable of argument names may also be\n supplied to restrict this to those. Defaults to :code:`False`.\n use_type_hints: Whether or not the type hints (annotations) should be\n used as the arguments dependency. An iterable of argument names may\n also be specified to restrict this to those. Any type hints from\n the builtins (str, int...) or the typing (:py:class:`~typing.Optional`,\n ...) are ignored. Defaults to :code:`True`.\n container: :py:class:`~.core.container.DependencyContainer` to which the\n dependency should be attached. Defaults to the global container,\n :code:`antidote.world`.\n " if (lazy_method not in namespace): raise ValueError('Lazy method {}() is no defined in {}'.format(lazy_method, cls)) resource_class = super().__new__(metacls, cls, bases, namespace) wire_raise_on_missing = True if ((auto_wire is None) or isinstance(auto_wire, bool)): if (auto_wire is False): methods = () else: methods = (lazy_method, '__init__') wire_raise_on_missing = False else: methods = auto_wire if methods: resource_class = wire(resource_class, methods=methods, dependencies=dependencies, use_names=use_names, use_type_hints=use_type_hints, container=container, raise_on_missing=wire_raise_on_missing) resource_class = register(resource_class, auto_wire=False, singleton=True, container=container) func = resource_class.__dict__[lazy_method] for (name, v) in list(resource_class.__dict__.items()): if ((not name.startswith('_')) and name.isupper()): setattr(resource_class, name, LazyMethodCall(func, singleton=True)(v)) return resource_class
def __new__(metacls, cls, bases, namespace, lazy_method: str='get', auto_wire: Union[(bool, Iterable[str])]=None, dependencies: DEPENDENCIES_TYPE=None, use_names: Union[(bool, Iterable[str])]=None, use_type_hints: Union[(bool, Iterable[str])]=None, container: DependencyContainer=None): "\n Metaclass used to generate class with constant dependencies.\n\n This should be used for configuration or external static resources.\n Only public uppercase class attributes will be converted to dependencies.\n\n .. doctest::\n\n >>> import antidote\n >>> class Conf(metaclass=antidote.LazyConstantsMeta):\n ... DOMAIN = 'domain'\n ... _A = 'unchanged'\n ... a = 'unchanged'\n ...\n ... def __init__(self):\n ... self._data = {'domain': 'example.com'}\n ...\n ... def get(self, key):\n ... return self._data[key]\n ...\n >>> Conf._A\n 'unchanged'\n >>> Conf.a\n 'unchanged'\n >>> Conf().DOMAIN\n 'example.com'\n >>> Conf.DOMAIN\n LazyMethodCallDependency(...)\n >>> antidote.world.get(Conf.DOMAIN)\n 'example.com'\n >>> @antidote.inject(dependencies=(Conf.DOMAIN,))\n ... def f(a):\n ... return a\n >>> f()\n 'example.com'\n\n As one can see, neither :code:`a` nor :code:`_A` are changed,\n only :code:`DOMAIN`. Constant's initial value becomes the argument given\n to the lazy method, by default :code:`__call__()`. It has two different\n behaviors depending how it is retrieved:\n\n - Used as a instance attribute, :code:`Conf().DOMAIN`, is is equivalent\n to :code:`Conf().__call__('domain')`. This lets your code stay easy to\n manipulate and test.\n - Used as a class attribute, :code:`Conf.DOMAIN`, it becomes a special\n object used by Antidote to identify a dependency. This lets you inject\n :code:`Conf.DOMAIN` anywhere in your code.\n\n The advantage of using this is that Antidote will only instantiate\n :code:`Conf` once, if and only if necessary. The same is applied for\n every constant, those are singletons. Defining your static resources or\n configuration as class constants also makes your code more maintainable,\n as any decent IDE will refactor / find the usage of those in a blink of\n an eye.\n\n Underneath it uses :py:class:`.LazyMethodCall` and :py:func:`.register`.\n It is equivalent to:\n\n .. testcode::\n\n from antidote import LazyMethodCall, register\n\n @register(auto_wire=('__init__', '__call__'))\n class Conf:\n # Required for the example as we specify __init__() explicitly\n # for auto wiring, so it has to exist.\n def __init__(self):\n pass\n\n def __call__(self, key):\n return config[key]\n\n DOMAIN = LazyMethodCall(__call__)('domain')\n\n Args:\n lazy_method: Name of the lazy method to use for the constants.\n Defaults to :code:`'__call__'`.\n auto_wire: Injects automatically the dependencies of the methods\n specified, or only of :code:`__init__()` and :code:`__call__()`\n if True.\n dependencies: Can be either a mapping of arguments name to their\n dependency, an iterable of dependencies or a function which returns\n the dependency given the arguments name. If an iterable is specified,\n the position of the arguments is used to determine their respective\n dependency. An argument may be skipped by using :code:`None` as a\n placeholder. The first argument is always ignored for methods (self)\n and class methods (cls).Type hints are overridden. Defaults to\n :code:`None`.\n use_names: Whether or not the arguments' name should be used as their\n respective dependency. An iterable of argument names may also be\n supplied to restrict this to those. Defaults to :code:`False`.\n use_type_hints: Whether or not the type hints (annotations) should be\n used as the arguments dependency. An iterable of argument names may\n also be specified to restrict this to those. Any type hints from\n the builtins (str, int...) or the typing (:py:class:`~typing.Optional`,\n ...) are ignored. Defaults to :code:`True`.\n container: :py:class:`~.core.container.DependencyContainer` to which the\n dependency should be attached. Defaults to the global container,\n :code:`antidote.world`.\n " if (lazy_method not in namespace): raise ValueError('Lazy method {}() is no defined in {}'.format(lazy_method, cls)) resource_class = super().__new__(metacls, cls, bases, namespace) wire_raise_on_missing = True if ((auto_wire is None) or isinstance(auto_wire, bool)): if (auto_wire is False): methods = () else: methods = (lazy_method, '__init__') wire_raise_on_missing = False else: methods = auto_wire if methods: resource_class = wire(resource_class, methods=methods, dependencies=dependencies, use_names=use_names, use_type_hints=use_type_hints, container=container, raise_on_missing=wire_raise_on_missing) resource_class = register(resource_class, auto_wire=False, singleton=True, container=container) func = resource_class.__dict__[lazy_method] for (name, v) in list(resource_class.__dict__.items()): if ((not name.startswith('_')) and name.isupper()): setattr(resource_class, name, LazyMethodCall(func, singleton=True)(v)) return resource_class<|docstring|>Metaclass used to generate class with constant dependencies. This should be used for configuration or external static resources. Only public uppercase class attributes will be converted to dependencies. .. doctest:: >>> import antidote >>> class Conf(metaclass=antidote.LazyConstantsMeta): ... DOMAIN = 'domain' ... _A = 'unchanged' ... a = 'unchanged' ... ... def __init__(self): ... self._data = {'domain': 'example.com'} ... ... def get(self, key): ... return self._data[key] ... >>> Conf._A 'unchanged' >>> Conf.a 'unchanged' >>> Conf().DOMAIN 'example.com' >>> Conf.DOMAIN LazyMethodCallDependency(...) >>> antidote.world.get(Conf.DOMAIN) 'example.com' >>> @antidote.inject(dependencies=(Conf.DOMAIN,)) ... def f(a): ... return a >>> f() 'example.com' As one can see, neither :code:`a` nor :code:`_A` are changed, only :code:`DOMAIN`. Constant's initial value becomes the argument given to the lazy method, by default :code:`__call__()`. It has two different behaviors depending how it is retrieved: - Used as a instance attribute, :code:`Conf().DOMAIN`, is is equivalent to :code:`Conf().__call__('domain')`. This lets your code stay easy to manipulate and test. - Used as a class attribute, :code:`Conf.DOMAIN`, it becomes a special object used by Antidote to identify a dependency. This lets you inject :code:`Conf.DOMAIN` anywhere in your code. The advantage of using this is that Antidote will only instantiate :code:`Conf` once, if and only if necessary. The same is applied for every constant, those are singletons. Defining your static resources or configuration as class constants also makes your code more maintainable, as any decent IDE will refactor / find the usage of those in a blink of an eye. Underneath it uses :py:class:`.LazyMethodCall` and :py:func:`.register`. It is equivalent to: .. testcode:: from antidote import LazyMethodCall, register @register(auto_wire=('__init__', '__call__')) class Conf: # Required for the example as we specify __init__() explicitly # for auto wiring, so it has to exist. def __init__(self): pass def __call__(self, key): return config[key] DOMAIN = LazyMethodCall(__call__)('domain') Args: lazy_method: Name of the lazy method to use for the constants. Defaults to :code:`'__call__'`. auto_wire: Injects automatically the dependencies of the methods specified, or only of :code:`__init__()` and :code:`__call__()` if True. dependencies: Can be either a mapping of arguments name to their dependency, an iterable of dependencies or a function which returns the dependency given the arguments name. If an iterable is specified, the position of the arguments is used to determine their respective dependency. An argument may be skipped by using :code:`None` as a placeholder. The first argument is always ignored for methods (self) and class methods (cls).Type hints are overridden. Defaults to :code:`None`. use_names: Whether or not the arguments' name should be used as their respective dependency. An iterable of argument names may also be supplied to restrict this to those. Defaults to :code:`False`. use_type_hints: Whether or not the type hints (annotations) should be used as the arguments dependency. An iterable of argument names may also be specified to restrict this to those. Any type hints from the builtins (str, int...) or the typing (:py:class:`~typing.Optional`, ...) are ignored. Defaults to :code:`True`. container: :py:class:`~.core.container.DependencyContainer` to which the dependency should be attached. Defaults to the global container, :code:`antidote.world`.<|endoftext|>
bfa60b07c8e25564054a2c9836ed79b57bb4293baaa809863a7a953ff86de751
@commands.command() async def export(self, ctx, *emoji: Union[(discord.PartialEmoji, discord.Emoji)]): '\n Insult the user.\n Usage: [p]insult <Member>\n Example: [p]insult @Eris#0001\n ' if (len(emoji) == 0): (await ctx.send('No emoji to download!')) return buf = io.BytesIO() with zipfile.ZipFile(buf, 'w') as zf: for e in emoji: asset = e.url url = str(asset) name = f'{e.name}.gif' new_buf = io.BytesIO() (await asset.save(new_buf)) zf.writestr(name, new_buf.getvalue()) buf.seek(0) (await ctx.send(file=discord.File(buf, filename='export.zip')))
Insult the user. Usage: [p]insult <Member> Example: [p]insult @Eris#0001
export_emoji/export_emoji.py
export
edma8378/Eris-Cogs
0
python
@commands.command() async def export(self, ctx, *emoji: Union[(discord.PartialEmoji, discord.Emoji)]): '\n Insult the user.\n Usage: [p]insult <Member>\n Example: [p]insult @Eris#0001\n ' if (len(emoji) == 0): (await ctx.send('No emoji to download!')) return buf = io.BytesIO() with zipfile.ZipFile(buf, 'w') as zf: for e in emoji: asset = e.url url = str(asset) name = f'{e.name}.gif' new_buf = io.BytesIO() (await asset.save(new_buf)) zf.writestr(name, new_buf.getvalue()) buf.seek(0) (await ctx.send(file=discord.File(buf, filename='export.zip')))
@commands.command() async def export(self, ctx, *emoji: Union[(discord.PartialEmoji, discord.Emoji)]): '\n Insult the user.\n Usage: [p]insult <Member>\n Example: [p]insult @Eris#0001\n ' if (len(emoji) == 0): (await ctx.send('No emoji to download!')) return buf = io.BytesIO() with zipfile.ZipFile(buf, 'w') as zf: for e in emoji: asset = e.url url = str(asset) name = f'{e.name}.gif' new_buf = io.BytesIO() (await asset.save(new_buf)) zf.writestr(name, new_buf.getvalue()) buf.seek(0) (await ctx.send(file=discord.File(buf, filename='export.zip')))<|docstring|>Insult the user. Usage: [p]insult <Member> Example: [p]insult @Eris#0001<|endoftext|>
2371328274a0ab4ccba0e68142be003068c7f05da6a5bc34d5e950c0d3ceb87d
@abstractmethod def _get_class(self, item: str): '\n The subclass must have the following implementation of this method\n\n def _get_class(self, item: str):\n return globals()[item]\n\n globals() only contains the objects in the same module where self.__class__ is defined\n ' pass
The subclass must have the following implementation of this method def _get_class(self, item: str): return globals()[item] globals() only contains the objects in the same module where self.__class__ is defined
stringchain/baseclass.py
_get_class
zhangyi-hu/stringchain
1
python
@abstractmethod def _get_class(self, item: str): '\n The subclass must have the following implementation of this method\n\n def _get_class(self, item: str):\n return globals()[item]\n\n globals() only contains the objects in the same module where self.__class__ is defined\n ' pass
@abstractmethod def _get_class(self, item: str): '\n The subclass must have the following implementation of this method\n\n def _get_class(self, item: str):\n return globals()[item]\n\n globals() only contains the objects in the same module where self.__class__ is defined\n ' pass<|docstring|>The subclass must have the following implementation of this method def _get_class(self, item: str): return globals()[item] globals() only contains the objects in the same module where self.__class__ is defined<|endoftext|>
dd5944ac396d1153022dfaafb634793a0c77227e4db79047fc7ab63a95148c33
def clamp(val, valmin, valmax): 'Simple clamping function, limits to [min, max]' if (val < valmin): return valmin if (val > valmax): return valmax return val
Simple clamping function, limits to [min, max]
src/tracking_turtlebot/utils.py
clamp
Christophe-Foyer/tracking_turtlebot
0
python
def clamp(val, valmin, valmax): if (val < valmin): return valmin if (val > valmax): return valmax return val
def clamp(val, valmin, valmax): if (val < valmin): return valmin if (val > valmax): return valmax return val<|docstring|>Simple clamping function, limits to [min, max]<|endoftext|>
d3d5f89bcca0e5b32944239577e89ab2fd04a8cc2aa26fb329c728da02636111
def makeSimpleProfile(output, input, slop): '\n From trutlebot_teleop, adds a bit of smoothing to startup/slowdown\n ' if (input > output): output = min(input, (output + slop)) elif (input < output): output = max(input, (output - slop)) else: output = input return output
From trutlebot_teleop, adds a bit of smoothing to startup/slowdown
src/tracking_turtlebot/utils.py
makeSimpleProfile
Christophe-Foyer/tracking_turtlebot
0
python
def makeSimpleProfile(output, input, slop): '\n \n ' if (input > output): output = min(input, (output + slop)) elif (input < output): output = max(input, (output - slop)) else: output = input return output
def makeSimpleProfile(output, input, slop): '\n \n ' if (input > output): output = min(input, (output + slop)) elif (input < output): output = max(input, (output - slop)) else: output = input return output<|docstring|>From trutlebot_teleop, adds a bit of smoothing to startup/slowdown<|endoftext|>
ce5f937a9ec454111e8b6d3f68d8388a02cfb8dbda144257f1d0b25413df010c
def derivative(self): 'Calculate the derivative, discretely' if (len(self.state_list) > 1): return (self.state_list[(- 1)] - self.state_list[(- 2)]) else: return 0
Calculate the derivative, discretely
src/tracking_turtlebot/utils.py
derivative
Christophe-Foyer/tracking_turtlebot
0
python
def derivative(self): if (len(self.state_list) > 1): return (self.state_list[(- 1)] - self.state_list[(- 2)]) else: return 0
def derivative(self): if (len(self.state_list) > 1): return (self.state_list[(- 1)] - self.state_list[(- 2)]) else: return 0<|docstring|>Calculate the derivative, discretely<|endoftext|>
9a937ba27c0e031d98a3ea105d8c49d5c1eebd8c56b0b3c1116afdbe6c446d07
def _cycle_over_sample_range(start, end, sample_size): '\n Given a range (start, end), returns a generator that will cycle over a population\n sample with size specified by ``sample_size``\n ' return itertools.cycle(random.sample(xrange(start, end), sample_size))
Given a range (start, end), returns a generator that will cycle over a population sample with size specified by ``sample_size``
anon/utils.py
_cycle_over_sample_range
Tesorio/django-anon
146
python
def _cycle_over_sample_range(start, end, sample_size): '\n Given a range (start, end), returns a generator that will cycle over a population\n sample with size specified by ``sample_size``\n ' return itertools.cycle(random.sample(xrange(start, end), sample_size))
def _cycle_over_sample_range(start, end, sample_size): '\n Given a range (start, end), returns a generator that will cycle over a population\n sample with size specified by ``sample_size``\n ' return itertools.cycle(random.sample(xrange(start, end), sample_size))<|docstring|>Given a range (start, end), returns a generator that will cycle over a population sample with size specified by ``sample_size``<|endoftext|>
ef332bc3bb9dbf33a55dff71571c62a3592649444749bec10276b41b982fc31b
def fake_word(min_size=_min_word_size, max_size=20): ' Return fake word\n\n :min_size: Minimum number of chars\n :max_size: Maximum number of chars\n\n Example:\n\n >>> import django_anon as anon\n >>> print(anon.fake_word())\n adipisci\n\n ' if (min_size < _min_word_size): raise ValueError('no such word with this size < min_size') for word in _word_generator: if (min_size <= len(word) <= max_size): return word
Return fake word :min_size: Minimum number of chars :max_size: Maximum number of chars Example: >>> import django_anon as anon >>> print(anon.fake_word()) adipisci
anon/utils.py
fake_word
Tesorio/django-anon
146
python
def fake_word(min_size=_min_word_size, max_size=20): ' Return fake word\n\n :min_size: Minimum number of chars\n :max_size: Maximum number of chars\n\n Example:\n\n >>> import django_anon as anon\n >>> print(anon.fake_word())\n adipisci\n\n ' if (min_size < _min_word_size): raise ValueError('no such word with this size < min_size') for word in _word_generator: if (min_size <= len(word) <= max_size): return word
def fake_word(min_size=_min_word_size, max_size=20): ' Return fake word\n\n :min_size: Minimum number of chars\n :max_size: Maximum number of chars\n\n Example:\n\n >>> import django_anon as anon\n >>> print(anon.fake_word())\n adipisci\n\n ' if (min_size < _min_word_size): raise ValueError('no such word with this size < min_size') for word in _word_generator: if (min_size <= len(word) <= max_size): return word<|docstring|>Return fake word :min_size: Minimum number of chars :max_size: Maximum number of chars Example: >>> import django_anon as anon >>> print(anon.fake_word()) adipisci<|endoftext|>
64c1f97c011df8525f95e89abcf7325a2d0f0d71e4a1a849050598956816a5f6
def fake_text(max_size=255, max_diff_allowed=5, separator=' '): ' Return fake text\n\n :max_size: Maximum number of chars\n :max_diff_allowed: Maximum difference (fidelity) allowed, in chars number\n :separator: Word separator\n\n Example:\n\n >>> print(anon.fake_text())\n alias aliquam aliquid amet animi aperiam architecto asperiores aspernatur assumenda at atque aut autem beatae blanditiis commodi consectetur consequatur consequuntur corporis corrupti culpa cum cumque cupiditate debitis delectus deleniti deserunt dicta\n\n ' if (max_diff_allowed < 1): raise ValueError('max_diff_allowed must be > 0') num_words = max(1, int((max_size / _max_word_size))) words = itertools.islice(_word_generator, num_words) text = separator.join(words) try: if (len(text) > max_size): text = _trim_text(text, separator, max_size) except ValueError: text = text[:max_size] return text
Return fake text :max_size: Maximum number of chars :max_diff_allowed: Maximum difference (fidelity) allowed, in chars number :separator: Word separator Example: >>> print(anon.fake_text()) alias aliquam aliquid amet animi aperiam architecto asperiores aspernatur assumenda at atque aut autem beatae blanditiis commodi consectetur consequatur consequuntur corporis corrupti culpa cum cumque cupiditate debitis delectus deleniti deserunt dicta
anon/utils.py
fake_text
Tesorio/django-anon
146
python
def fake_text(max_size=255, max_diff_allowed=5, separator=' '): ' Return fake text\n\n :max_size: Maximum number of chars\n :max_diff_allowed: Maximum difference (fidelity) allowed, in chars number\n :separator: Word separator\n\n Example:\n\n >>> print(anon.fake_text())\n alias aliquam aliquid amet animi aperiam architecto asperiores aspernatur assumenda at atque aut autem beatae blanditiis commodi consectetur consequatur consequuntur corporis corrupti culpa cum cumque cupiditate debitis delectus deleniti deserunt dicta\n\n ' if (max_diff_allowed < 1): raise ValueError('max_diff_allowed must be > 0') num_words = max(1, int((max_size / _max_word_size))) words = itertools.islice(_word_generator, num_words) text = separator.join(words) try: if (len(text) > max_size): text = _trim_text(text, separator, max_size) except ValueError: text = text[:max_size] return text
def fake_text(max_size=255, max_diff_allowed=5, separator=' '): ' Return fake text\n\n :max_size: Maximum number of chars\n :max_diff_allowed: Maximum difference (fidelity) allowed, in chars number\n :separator: Word separator\n\n Example:\n\n >>> print(anon.fake_text())\n alias aliquam aliquid amet animi aperiam architecto asperiores aspernatur assumenda at atque aut autem beatae blanditiis commodi consectetur consequatur consequuntur corporis corrupti culpa cum cumque cupiditate debitis delectus deleniti deserunt dicta\n\n ' if (max_diff_allowed < 1): raise ValueError('max_diff_allowed must be > 0') num_words = max(1, int((max_size / _max_word_size))) words = itertools.islice(_word_generator, num_words) text = separator.join(words) try: if (len(text) > max_size): text = _trim_text(text, separator, max_size) except ValueError: text = text[:max_size] return text<|docstring|>Return fake text :max_size: Maximum number of chars :max_diff_allowed: Maximum difference (fidelity) allowed, in chars number :separator: Word separator Example: >>> print(anon.fake_text()) alias aliquam aliquid amet animi aperiam architecto asperiores aspernatur assumenda at atque aut autem beatae blanditiis commodi consectetur consequatur consequuntur corporis corrupti culpa cum cumque cupiditate debitis delectus deleniti deserunt dicta<|endoftext|>
b668c54138b88a73822b6e215eccbd7fecff8379d23cbd6e00d06ff778506de0
def fake_small_text(max_size=50): ' Preset for fake_text.\n\n :max_size: Maximum number of chars\n\n Example:\n\n >>> print(anon.fake_small_text())\n Distinctio Dolor Dolore Dolorem Doloremque Dolores\n\n ' return fake_text(max_size=max_size).title()
Preset for fake_text. :max_size: Maximum number of chars Example: >>> print(anon.fake_small_text()) Distinctio Dolor Dolore Dolorem Doloremque Dolores
anon/utils.py
fake_small_text
Tesorio/django-anon
146
python
def fake_small_text(max_size=50): ' Preset for fake_text.\n\n :max_size: Maximum number of chars\n\n Example:\n\n >>> print(anon.fake_small_text())\n Distinctio Dolor Dolore Dolorem Doloremque Dolores\n\n ' return fake_text(max_size=max_size).title()
def fake_small_text(max_size=50): ' Preset for fake_text.\n\n :max_size: Maximum number of chars\n\n Example:\n\n >>> print(anon.fake_small_text())\n Distinctio Dolor Dolore Dolorem Doloremque Dolores\n\n ' return fake_text(max_size=max_size).title()<|docstring|>Preset for fake_text. :max_size: Maximum number of chars Example: >>> print(anon.fake_small_text()) Distinctio Dolor Dolore Dolorem Doloremque Dolores<|endoftext|>
7609f36dd98d77561a5520207ac961695adf5f544fc878dca3301f41048f7759
def fake_name(max_size=15): ' Preset for fake_text. Also returns capitalized words.\n\n :max_size: Maximum number of chars\n\n Example:\n\n >>> print(anon.fake_name())\n Doloribus Ea\n\n ' return fake_text(max_size=max_size).title()
Preset for fake_text. Also returns capitalized words. :max_size: Maximum number of chars Example: >>> print(anon.fake_name()) Doloribus Ea
anon/utils.py
fake_name
Tesorio/django-anon
146
python
def fake_name(max_size=15): ' Preset for fake_text. Also returns capitalized words.\n\n :max_size: Maximum number of chars\n\n Example:\n\n >>> print(anon.fake_name())\n Doloribus Ea\n\n ' return fake_text(max_size=max_size).title()
def fake_name(max_size=15): ' Preset for fake_text. Also returns capitalized words.\n\n :max_size: Maximum number of chars\n\n Example:\n\n >>> print(anon.fake_name())\n Doloribus Ea\n\n ' return fake_text(max_size=max_size).title()<|docstring|>Preset for fake_text. Also returns capitalized words. :max_size: Maximum number of chars Example: >>> print(anon.fake_name()) Doloribus Ea<|endoftext|>
f3d36fd3b197d67d876deeb8b44843b3e1b523eb512192f1ab89e0291eb85220
def fake_username(max_size=10, separator=''): ' Returns fake username\n\n :max_size: Maximum number of chars\n :separator: Word separator\n :rand_range: Range to use when generating random number\n\n Example:\n\n >>> print(anon.fake_username())\n eius54455\n\n ' random_number = str(next(_small_int_generator)) min_size_allowed = (_min_word_size + len(random_number)) if (max_size < min_size_allowed): raise ValueError('username must be >= {}'.format(min_size_allowed)) else: max_size -= len(random_number) return (fake_text(max_size, separator=separator) + random_number)
Returns fake username :max_size: Maximum number of chars :separator: Word separator :rand_range: Range to use when generating random number Example: >>> print(anon.fake_username()) eius54455
anon/utils.py
fake_username
Tesorio/django-anon
146
python
def fake_username(max_size=10, separator=): ' Returns fake username\n\n :max_size: Maximum number of chars\n :separator: Word separator\n :rand_range: Range to use when generating random number\n\n Example:\n\n >>> print(anon.fake_username())\n eius54455\n\n ' random_number = str(next(_small_int_generator)) min_size_allowed = (_min_word_size + len(random_number)) if (max_size < min_size_allowed): raise ValueError('username must be >= {}'.format(min_size_allowed)) else: max_size -= len(random_number) return (fake_text(max_size, separator=separator) + random_number)
def fake_username(max_size=10, separator=): ' Returns fake username\n\n :max_size: Maximum number of chars\n :separator: Word separator\n :rand_range: Range to use when generating random number\n\n Example:\n\n >>> print(anon.fake_username())\n eius54455\n\n ' random_number = str(next(_small_int_generator)) min_size_allowed = (_min_word_size + len(random_number)) if (max_size < min_size_allowed): raise ValueError('username must be >= {}'.format(min_size_allowed)) else: max_size -= len(random_number) return (fake_text(max_size, separator=separator) + random_number)<|docstring|>Returns fake username :max_size: Maximum number of chars :separator: Word separator :rand_range: Range to use when generating random number Example: >>> print(anon.fake_username()) eius54455<|endoftext|>
f3f6ab3c703081f8712cfe020c640b1e2ffab4eb60ac6575d95c05fc3a229fb3
def fake_email(max_size=40, suffix='@example.com'): ' Returns fake email address\n\n :max_size: Maximum number of chars\n :suffix: Suffix to add to email addresses (including @)\n\n Example:\n\n >>> print(anon.fake_email())\n example@example.com\n\n ' min_size_allowed = (_min_word_size + len(suffix)) if ((max_size + len(suffix)) > 254): raise ValueError('email address must not exceed 254 chars') elif (max_size < min_size_allowed): raise ValueError('max_size must be >= {}'.format(min_size_allowed)) else: max_size -= len(suffix) return (fake_username(max_size, separator='.') + suffix)
Returns fake email address :max_size: Maximum number of chars :suffix: Suffix to add to email addresses (including @) Example: >>> print(anon.fake_email()) example@example.com
anon/utils.py
fake_email
Tesorio/django-anon
146
python
def fake_email(max_size=40, suffix='@example.com'): ' Returns fake email address\n\n :max_size: Maximum number of chars\n :suffix: Suffix to add to email addresses (including @)\n\n Example:\n\n >>> print(anon.fake_email())\n example@example.com\n\n ' min_size_allowed = (_min_word_size + len(suffix)) if ((max_size + len(suffix)) > 254): raise ValueError('email address must not exceed 254 chars') elif (max_size < min_size_allowed): raise ValueError('max_size must be >= {}'.format(min_size_allowed)) else: max_size -= len(suffix) return (fake_username(max_size, separator='.') + suffix)
def fake_email(max_size=40, suffix='@example.com'): ' Returns fake email address\n\n :max_size: Maximum number of chars\n :suffix: Suffix to add to email addresses (including @)\n\n Example:\n\n >>> print(anon.fake_email())\n example@example.com\n\n ' min_size_allowed = (_min_word_size + len(suffix)) if ((max_size + len(suffix)) > 254): raise ValueError('email address must not exceed 254 chars') elif (max_size < min_size_allowed): raise ValueError('max_size must be >= {}'.format(min_size_allowed)) else: max_size -= len(suffix) return (fake_username(max_size, separator='.') + suffix)<|docstring|>Returns fake email address :max_size: Maximum number of chars :suffix: Suffix to add to email addresses (including @) Example: >>> print(anon.fake_email()) example@example.com<|endoftext|>
c24b154715f74bf0a856677ab1d5ad7459c468f236e53e23b9ed1a1b997a4621
def fake_url(max_size=50, scheme='http://', suffix='.com'): ' Returns fake URL\n\n :max_size: Maximum number of chars\n :scheme: URL scheme (http://)\n :suffix: Suffix to add to domain (including dot)\n\n Example:\n\n >>> print(anon.fake_url())\n http://facilis.fuga.fugiat.fugit.harum.hic.id.com\n\n ' min_size_allowed = ((_min_word_size + len(scheme)) + len(suffix)) if (max_size < min_size_allowed): raise ValueError('max_size must be >= {}'.format(min_size_allowed)) else: max_size -= (len(scheme) + len(suffix)) domain = (fake_text(max_size=max_size, separator='.') + suffix) return (scheme + domain)
Returns fake URL :max_size: Maximum number of chars :scheme: URL scheme (http://) :suffix: Suffix to add to domain (including dot) Example: >>> print(anon.fake_url()) http://facilis.fuga.fugiat.fugit.harum.hic.id.com
anon/utils.py
fake_url
Tesorio/django-anon
146
python
def fake_url(max_size=50, scheme='http://', suffix='.com'): ' Returns fake URL\n\n :max_size: Maximum number of chars\n :scheme: URL scheme (http://)\n :suffix: Suffix to add to domain (including dot)\n\n Example:\n\n >>> print(anon.fake_url())\n http://facilis.fuga.fugiat.fugit.harum.hic.id.com\n\n ' min_size_allowed = ((_min_word_size + len(scheme)) + len(suffix)) if (max_size < min_size_allowed): raise ValueError('max_size must be >= {}'.format(min_size_allowed)) else: max_size -= (len(scheme) + len(suffix)) domain = (fake_text(max_size=max_size, separator='.') + suffix) return (scheme + domain)
def fake_url(max_size=50, scheme='http://', suffix='.com'): ' Returns fake URL\n\n :max_size: Maximum number of chars\n :scheme: URL scheme (http://)\n :suffix: Suffix to add to domain (including dot)\n\n Example:\n\n >>> print(anon.fake_url())\n http://facilis.fuga.fugiat.fugit.harum.hic.id.com\n\n ' min_size_allowed = ((_min_word_size + len(scheme)) + len(suffix)) if (max_size < min_size_allowed): raise ValueError('max_size must be >= {}'.format(min_size_allowed)) else: max_size -= (len(scheme) + len(suffix)) domain = (fake_text(max_size=max_size, separator='.') + suffix) return (scheme + domain)<|docstring|>Returns fake URL :max_size: Maximum number of chars :scheme: URL scheme (http://) :suffix: Suffix to add to domain (including dot) Example: >>> print(anon.fake_url()) http://facilis.fuga.fugiat.fugit.harum.hic.id.com<|endoftext|>
5c354b775139b293f34347243929537cd0e9315b0bf2d30ab6ecaea659106b53
def fake_phone_number(format='999-999-9999'): ' Returns a fake phone number in the desired format\n\n :format: Format of phone number to generate\n\n Example:\n\n >>> print(anon.fake_phone_number())\n 863-068-9424\n\n ' number = [] for char in format: if (char == '9'): n = next(_number_generator) if (not number): while (n == '0'): n = next(_number_generator) number.append(n) else: number.append(char) return ''.join(number)
Returns a fake phone number in the desired format :format: Format of phone number to generate Example: >>> print(anon.fake_phone_number()) 863-068-9424
anon/utils.py
fake_phone_number
Tesorio/django-anon
146
python
def fake_phone_number(format='999-999-9999'): ' Returns a fake phone number in the desired format\n\n :format: Format of phone number to generate\n\n Example:\n\n >>> print(anon.fake_phone_number())\n 863-068-9424\n\n ' number = [] for char in format: if (char == '9'): n = next(_number_generator) if (not number): while (n == '0'): n = next(_number_generator) number.append(n) else: number.append(char) return .join(number)
def fake_phone_number(format='999-999-9999'): ' Returns a fake phone number in the desired format\n\n :format: Format of phone number to generate\n\n Example:\n\n >>> print(anon.fake_phone_number())\n 863-068-9424\n\n ' number = [] for char in format: if (char == '9'): n = next(_number_generator) if (not number): while (n == '0'): n = next(_number_generator) number.append(n) else: number.append(char) return .join(number)<|docstring|>Returns a fake phone number in the desired format :format: Format of phone number to generate Example: >>> print(anon.fake_phone_number()) 863-068-9424<|endoftext|>
cbdd5bc2666302b57c1cce8c2ce04835dd34de09582de77cc15e7e19d537e499
def _get_show_clock(self): '\n Getter method for show_clock, mapped from YANG variable /brocade_clock_rpc/show_clock (rpc)\n\n YANG Description: display current time for the cluster or specified switch\n ' return self.__show_clock
Getter method for show_clock, mapped from YANG variable /brocade_clock_rpc/show_clock (rpc) YANG Description: display current time for the cluster or specified switch
pybind/slxos/v16r_1_00b/brocade_clock_rpc/__init__.py
_get_show_clock
shivharis/pybind
0
python
def _get_show_clock(self): '\n Getter method for show_clock, mapped from YANG variable /brocade_clock_rpc/show_clock (rpc)\n\n YANG Description: display current time for the cluster or specified switch\n ' return self.__show_clock
def _get_show_clock(self): '\n Getter method for show_clock, mapped from YANG variable /brocade_clock_rpc/show_clock (rpc)\n\n YANG Description: display current time for the cluster or specified switch\n ' return self.__show_clock<|docstring|>Getter method for show_clock, mapped from YANG variable /brocade_clock_rpc/show_clock (rpc) YANG Description: display current time for the cluster or specified switch<|endoftext|>
4b23e017503c32612d1987addd25f09eff0a95d2fa68526856416aabaa737673
def _set_show_clock(self, v, load=False): '\n Setter method for show_clock, mapped from YANG variable /brocade_clock_rpc/show_clock (rpc)\n If this variable is read-only (config: false) in the\n source YANG file, then _set_show_clock is considered as a private\n method. Backends looking to populate this variable should\n do so via calling thisObj._set_show_clock() directly.\n\n YANG Description: display current time for the cluster or specified switch\n ' if hasattr(v, '_utype'): v = v._utype(v) try: t = YANGDynClass(v, base=show_clock.show_clock, is_leaf=True, yang_name='show-clock', rest_name='show-clock', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'clock-get'}}, namespace='urn:brocade.com:mgmt:brocade-clock', defining_module='brocade-clock', yang_type='rpc', is_config=True) except (TypeError, ValueError): raise ValueError({'error-string': 'show_clock must be of a type compatible with rpc', 'defined-type': 'rpc', 'generated-type': 'YANGDynClass(base=show_clock.show_clock, is_leaf=True, yang_name="show-clock", rest_name="show-clock", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u\'tailf-common\': {u\'hidden\': u\'rpccmd\', u\'actionpoint\': u\'clock-get\'}}, namespace=\'urn:brocade.com:mgmt:brocade-clock\', defining_module=\'brocade-clock\', yang_type=\'rpc\', is_config=True)'}) self.__show_clock = t if hasattr(self, '_set'): self._set()
Setter method for show_clock, mapped from YANG variable /brocade_clock_rpc/show_clock (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_show_clock is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_show_clock() directly. YANG Description: display current time for the cluster or specified switch
pybind/slxos/v16r_1_00b/brocade_clock_rpc/__init__.py
_set_show_clock
shivharis/pybind
0
python
def _set_show_clock(self, v, load=False): '\n Setter method for show_clock, mapped from YANG variable /brocade_clock_rpc/show_clock (rpc)\n If this variable is read-only (config: false) in the\n source YANG file, then _set_show_clock is considered as a private\n method. Backends looking to populate this variable should\n do so via calling thisObj._set_show_clock() directly.\n\n YANG Description: display current time for the cluster or specified switch\n ' if hasattr(v, '_utype'): v = v._utype(v) try: t = YANGDynClass(v, base=show_clock.show_clock, is_leaf=True, yang_name='show-clock', rest_name='show-clock', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'clock-get'}}, namespace='urn:brocade.com:mgmt:brocade-clock', defining_module='brocade-clock', yang_type='rpc', is_config=True) except (TypeError, ValueError): raise ValueError({'error-string': 'show_clock must be of a type compatible with rpc', 'defined-type': 'rpc', 'generated-type': 'YANGDynClass(base=show_clock.show_clock, is_leaf=True, yang_name="show-clock", rest_name="show-clock", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u\'tailf-common\': {u\'hidden\': u\'rpccmd\', u\'actionpoint\': u\'clock-get\'}}, namespace=\'urn:brocade.com:mgmt:brocade-clock\', defining_module=\'brocade-clock\', yang_type=\'rpc\', is_config=True)'}) self.__show_clock = t if hasattr(self, '_set'): self._set()
def _set_show_clock(self, v, load=False): '\n Setter method for show_clock, mapped from YANG variable /brocade_clock_rpc/show_clock (rpc)\n If this variable is read-only (config: false) in the\n source YANG file, then _set_show_clock is considered as a private\n method. Backends looking to populate this variable should\n do so via calling thisObj._set_show_clock() directly.\n\n YANG Description: display current time for the cluster or specified switch\n ' if hasattr(v, '_utype'): v = v._utype(v) try: t = YANGDynClass(v, base=show_clock.show_clock, is_leaf=True, yang_name='show-clock', rest_name='show-clock', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'clock-get'}}, namespace='urn:brocade.com:mgmt:brocade-clock', defining_module='brocade-clock', yang_type='rpc', is_config=True) except (TypeError, ValueError): raise ValueError({'error-string': 'show_clock must be of a type compatible with rpc', 'defined-type': 'rpc', 'generated-type': 'YANGDynClass(base=show_clock.show_clock, is_leaf=True, yang_name="show-clock", rest_name="show-clock", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u\'tailf-common\': {u\'hidden\': u\'rpccmd\', u\'actionpoint\': u\'clock-get\'}}, namespace=\'urn:brocade.com:mgmt:brocade-clock\', defining_module=\'brocade-clock\', yang_type=\'rpc\', is_config=True)'}) self.__show_clock = t if hasattr(self, '_set'): self._set()<|docstring|>Setter method for show_clock, mapped from YANG variable /brocade_clock_rpc/show_clock (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_show_clock is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_show_clock() directly. YANG Description: display current time for the cluster or specified switch<|endoftext|>
6920710088569a497b9cf64126bd0e68ae455fcd9208259f3cb8d4fe355cb9b6
def convex_upsample(self, flow, mask, rate=4): '[H/rate, W/rate, 2] -> [H, W, 2]' (N, _, H, W) = flow.shape mask = F.reshape(mask, (N, 1, 9, rate, rate, H, W)) mask = F.softmax(mask, axis=2) up_flow = self.unfold((rate * flow), [3, 3], padding=1) up_flow = F.reshape(up_flow, (N, 2, 9, 1, 1, H, W)) up_flow = F.sum((mask * up_flow), axis=2) up_flow = F.transpose(up_flow, (0, 1, 4, 2, 5, 3)) return F.reshape(up_flow, (N, 2, (rate * H), (rate * W)))
[H/rate, W/rate, 2] -> [H, W, 2]
nets/crestereo.py
convex_upsample
megvii-research/CREStereo
80
python
def convex_upsample(self, flow, mask, rate=4): (N, _, H, W) = flow.shape mask = F.reshape(mask, (N, 1, 9, rate, rate, H, W)) mask = F.softmax(mask, axis=2) up_flow = self.unfold((rate * flow), [3, 3], padding=1) up_flow = F.reshape(up_flow, (N, 2, 9, 1, 1, H, W)) up_flow = F.sum((mask * up_flow), axis=2) up_flow = F.transpose(up_flow, (0, 1, 4, 2, 5, 3)) return F.reshape(up_flow, (N, 2, (rate * H), (rate * W)))
def convex_upsample(self, flow, mask, rate=4): (N, _, H, W) = flow.shape mask = F.reshape(mask, (N, 1, 9, rate, rate, H, W)) mask = F.softmax(mask, axis=2) up_flow = self.unfold((rate * flow), [3, 3], padding=1) up_flow = F.reshape(up_flow, (N, 2, 9, 1, 1, H, W)) up_flow = F.sum((mask * up_flow), axis=2) up_flow = F.transpose(up_flow, (0, 1, 4, 2, 5, 3)) return F.reshape(up_flow, (N, 2, (rate * H), (rate * W)))<|docstring|>[H/rate, W/rate, 2] -> [H, W, 2]<|endoftext|>
e1ed6cdcf6479c5b5dbc88d76a1f5431eb7370040625a2aeae1137b1d2938a8d
def abort_now(): 'Abort the current process without doing any exception teardown' sys.stdout.flush() if win32api: win32api.TerminateProcess(win32api.GetCurrentProcess(), 3) else: os.kill(0, 9)
Abort the current process without doing any exception teardown
tests/lit/lit/run.py
abort_now
zhengyangl/alive2
1,771
python
def abort_now(): sys.stdout.flush() if win32api: win32api.TerminateProcess(win32api.GetCurrentProcess(), 3) else: os.kill(0, 9)
def abort_now(): sys.stdout.flush() if win32api: win32api.TerminateProcess(win32api.GetCurrentProcess(), 3) else: os.kill(0, 9)<|docstring|>Abort the current process without doing any exception teardown<|endoftext|>
7be087d6b056184bdddaaacb9a3c325c6dd0889383aec68472fb14a7079f7198
def _execute_test_impl(test, lit_config, parallelism_semaphores): 'Execute one test' pg = test.config.parallelism_group if callable(pg): pg = pg(test) result = None semaphore = None try: if pg: semaphore = parallelism_semaphores[pg] if semaphore: semaphore.acquire() start_time = time.time() result = test.config.test_format.execute(test, lit_config) if isinstance(result, tuple): (code, output) = result result = lit.Test.Result(code, output) elif (not isinstance(result, lit.Test.Result)): raise ValueError('unexpected result from test execution') result.elapsed = (time.time() - start_time) except KeyboardInterrupt: raise except: if lit_config.debug: raise output = 'Exception during script execution:\n' output += traceback.format_exc() output += '\n' result = lit.Test.Result(lit.Test.UNRESOLVED, output) finally: if semaphore: semaphore.release() test.setResult(result)
Execute one test
tests/lit/lit/run.py
_execute_test_impl
zhengyangl/alive2
1,771
python
def _execute_test_impl(test, lit_config, parallelism_semaphores): pg = test.config.parallelism_group if callable(pg): pg = pg(test) result = None semaphore = None try: if pg: semaphore = parallelism_semaphores[pg] if semaphore: semaphore.acquire() start_time = time.time() result = test.config.test_format.execute(test, lit_config) if isinstance(result, tuple): (code, output) = result result = lit.Test.Result(code, output) elif (not isinstance(result, lit.Test.Result)): raise ValueError('unexpected result from test execution') result.elapsed = (time.time() - start_time) except KeyboardInterrupt: raise except: if lit_config.debug: raise output = 'Exception during script execution:\n' output += traceback.format_exc() output += '\n' result = lit.Test.Result(lit.Test.UNRESOLVED, output) finally: if semaphore: semaphore.release() test.setResult(result)
def _execute_test_impl(test, lit_config, parallelism_semaphores): pg = test.config.parallelism_group if callable(pg): pg = pg(test) result = None semaphore = None try: if pg: semaphore = parallelism_semaphores[pg] if semaphore: semaphore.acquire() start_time = time.time() result = test.config.test_format.execute(test, lit_config) if isinstance(result, tuple): (code, output) = result result = lit.Test.Result(code, output) elif (not isinstance(result, lit.Test.Result)): raise ValueError('unexpected result from test execution') result.elapsed = (time.time() - start_time) except KeyboardInterrupt: raise except: if lit_config.debug: raise output = 'Exception during script execution:\n' output += traceback.format_exc() output += '\n' result = lit.Test.Result(lit.Test.UNRESOLVED, output) finally: if semaphore: semaphore.release() test.setResult(result)<|docstring|>Execute one test<|endoftext|>
4968dddfc8de0e54af2998fcf259553b027e6fa5f42bf4482c26c2968e361256
def worker_initializer(lit_config, parallelism_semaphores): 'Copy expensive repeated data into worker processes' global child_lit_config child_lit_config = lit_config global child_parallelism_semaphores child_parallelism_semaphores = parallelism_semaphores
Copy expensive repeated data into worker processes
tests/lit/lit/run.py
worker_initializer
zhengyangl/alive2
1,771
python
def worker_initializer(lit_config, parallelism_semaphores): global child_lit_config child_lit_config = lit_config global child_parallelism_semaphores child_parallelism_semaphores = parallelism_semaphores
def worker_initializer(lit_config, parallelism_semaphores): global child_lit_config child_lit_config = lit_config global child_parallelism_semaphores child_parallelism_semaphores = parallelism_semaphores<|docstring|>Copy expensive repeated data into worker processes<|endoftext|>
07021805273252b817b412bc89553cf78cfe82cad421115c859e0c8af85846c2
def worker_run_one_test(test_index, test): 'Run one test in a multiprocessing.Pool\n\n Side effects in this function and functions it calls are not visible in the\n main lit process.\n\n Arguments and results of this function are pickled, so they should be cheap\n to copy. For efficiency, we copy all data needed to execute all tests into\n each worker and store it in the child_* global variables. This reduces the\n cost of each task.\n\n Returns an index and a Result, which the parent process uses to update\n the display.\n ' try: _execute_test_impl(test, child_lit_config, child_parallelism_semaphores) return (test_index, test) except KeyboardInterrupt as e: abort_now() except: traceback.print_exc()
Run one test in a multiprocessing.Pool Side effects in this function and functions it calls are not visible in the main lit process. Arguments and results of this function are pickled, so they should be cheap to copy. For efficiency, we copy all data needed to execute all tests into each worker and store it in the child_* global variables. This reduces the cost of each task. Returns an index and a Result, which the parent process uses to update the display.
tests/lit/lit/run.py
worker_run_one_test
zhengyangl/alive2
1,771
python
def worker_run_one_test(test_index, test): 'Run one test in a multiprocessing.Pool\n\n Side effects in this function and functions it calls are not visible in the\n main lit process.\n\n Arguments and results of this function are pickled, so they should be cheap\n to copy. For efficiency, we copy all data needed to execute all tests into\n each worker and store it in the child_* global variables. This reduces the\n cost of each task.\n\n Returns an index and a Result, which the parent process uses to update\n the display.\n ' try: _execute_test_impl(test, child_lit_config, child_parallelism_semaphores) return (test_index, test) except KeyboardInterrupt as e: abort_now() except: traceback.print_exc()
def worker_run_one_test(test_index, test): 'Run one test in a multiprocessing.Pool\n\n Side effects in this function and functions it calls are not visible in the\n main lit process.\n\n Arguments and results of this function are pickled, so they should be cheap\n to copy. For efficiency, we copy all data needed to execute all tests into\n each worker and store it in the child_* global variables. This reduces the\n cost of each task.\n\n Returns an index and a Result, which the parent process uses to update\n the display.\n ' try: _execute_test_impl(test, child_lit_config, child_parallelism_semaphores) return (test_index, test) except KeyboardInterrupt as e: abort_now() except: traceback.print_exc()<|docstring|>Run one test in a multiprocessing.Pool Side effects in this function and functions it calls are not visible in the main lit process. Arguments and results of this function are pickled, so they should be cheap to copy. For efficiency, we copy all data needed to execute all tests into each worker and store it in the child_* global variables. This reduces the cost of each task. Returns an index and a Result, which the parent process uses to update the display.<|endoftext|>
1f20fc08b2d9f00b695ae0d00733d07ccaa83e39b799378a376b9f7dd5ac1ac4
def execute_tests(self, display, jobs, max_time=None): '\n execute_tests(display, jobs, [max_time])\n\n Execute each of the tests in the run, using up to jobs number of\n parallel tasks, and inform the display of each individual result. The\n provided tests should be a subset of the tests available in this run\n object.\n\n If max_time is non-None, it should be a time in seconds after which to\n stop executing tests.\n\n The display object will have its update method called with each test as\n it is completed. The calls are guaranteed to be locked with respect to\n one another, but are *not* guaranteed to be called on the same thread as\n this method was invoked on.\n\n Upon completion, each test in the run will have its result\n computed. Tests which were not actually executed (for any reason) will\n be given an UNRESOLVED result.\n ' if ((not self.tests) or (jobs == 0)): return self.display = display self.failure_count = 0 self.hit_max_failures = False if self.lit_config.singleProcess: global child_lit_config child_lit_config = self.lit_config for (test_index, test) in enumerate(self.tests): result = worker_run_one_test(test_index, test) self.consume_test_result(result) else: self.execute_tests_in_pool(jobs, max_time) for test in self.tests: if (test.result is None): test.setResult(lit.Test.Result(lit.Test.UNRESOLVED, '', 0.0))
execute_tests(display, jobs, [max_time]) Execute each of the tests in the run, using up to jobs number of parallel tasks, and inform the display of each individual result. The provided tests should be a subset of the tests available in this run object. If max_time is non-None, it should be a time in seconds after which to stop executing tests. The display object will have its update method called with each test as it is completed. The calls are guaranteed to be locked with respect to one another, but are *not* guaranteed to be called on the same thread as this method was invoked on. Upon completion, each test in the run will have its result computed. Tests which were not actually executed (for any reason) will be given an UNRESOLVED result.
tests/lit/lit/run.py
execute_tests
zhengyangl/alive2
1,771
python
def execute_tests(self, display, jobs, max_time=None): '\n execute_tests(display, jobs, [max_time])\n\n Execute each of the tests in the run, using up to jobs number of\n parallel tasks, and inform the display of each individual result. The\n provided tests should be a subset of the tests available in this run\n object.\n\n If max_time is non-None, it should be a time in seconds after which to\n stop executing tests.\n\n The display object will have its update method called with each test as\n it is completed. The calls are guaranteed to be locked with respect to\n one another, but are *not* guaranteed to be called on the same thread as\n this method was invoked on.\n\n Upon completion, each test in the run will have its result\n computed. Tests which were not actually executed (for any reason) will\n be given an UNRESOLVED result.\n ' if ((not self.tests) or (jobs == 0)): return self.display = display self.failure_count = 0 self.hit_max_failures = False if self.lit_config.singleProcess: global child_lit_config child_lit_config = self.lit_config for (test_index, test) in enumerate(self.tests): result = worker_run_one_test(test_index, test) self.consume_test_result(result) else: self.execute_tests_in_pool(jobs, max_time) for test in self.tests: if (test.result is None): test.setResult(lit.Test.Result(lit.Test.UNRESOLVED, , 0.0))
def execute_tests(self, display, jobs, max_time=None): '\n execute_tests(display, jobs, [max_time])\n\n Execute each of the tests in the run, using up to jobs number of\n parallel tasks, and inform the display of each individual result. The\n provided tests should be a subset of the tests available in this run\n object.\n\n If max_time is non-None, it should be a time in seconds after which to\n stop executing tests.\n\n The display object will have its update method called with each test as\n it is completed. The calls are guaranteed to be locked with respect to\n one another, but are *not* guaranteed to be called on the same thread as\n this method was invoked on.\n\n Upon completion, each test in the run will have its result\n computed. Tests which were not actually executed (for any reason) will\n be given an UNRESOLVED result.\n ' if ((not self.tests) or (jobs == 0)): return self.display = display self.failure_count = 0 self.hit_max_failures = False if self.lit_config.singleProcess: global child_lit_config child_lit_config = self.lit_config for (test_index, test) in enumerate(self.tests): result = worker_run_one_test(test_index, test) self.consume_test_result(result) else: self.execute_tests_in_pool(jobs, max_time) for test in self.tests: if (test.result is None): test.setResult(lit.Test.Result(lit.Test.UNRESOLVED, , 0.0))<|docstring|>execute_tests(display, jobs, [max_time]) Execute each of the tests in the run, using up to jobs number of parallel tasks, and inform the display of each individual result. The provided tests should be a subset of the tests available in this run object. If max_time is non-None, it should be a time in seconds after which to stop executing tests. The display object will have its update method called with each test as it is completed. The calls are guaranteed to be locked with respect to one another, but are *not* guaranteed to be called on the same thread as this method was invoked on. Upon completion, each test in the run will have its result computed. Tests which were not actually executed (for any reason) will be given an UNRESOLVED result.<|endoftext|>
1b83190623f33513471c14881be5cb113d039d40ec154ffe9708a3d98131e818
def consume_test_result(self, pool_result): 'Test completion callback for worker_run_one_test\n\n Updates the test result status in the parent process. Each task in the\n pool returns the test index and the result, and we use the index to look\n up the original test object. Also updates the progress bar as tasks\n complete.\n ' if self.hit_max_failures: return (test_index, test_with_result) = pool_result assert (self.tests[test_index].file_path == test_with_result.file_path), 'parent and child disagree on test path' self.tests[test_index] = test_with_result self.display.update(test_with_result) self.failure_count += (test_with_result.result.code == lit.Test.FAIL) if (self.lit_config.maxFailures and (self.failure_count == self.lit_config.maxFailures)): self.hit_max_failures = True
Test completion callback for worker_run_one_test Updates the test result status in the parent process. Each task in the pool returns the test index and the result, and we use the index to look up the original test object. Also updates the progress bar as tasks complete.
tests/lit/lit/run.py
consume_test_result
zhengyangl/alive2
1,771
python
def consume_test_result(self, pool_result): 'Test completion callback for worker_run_one_test\n\n Updates the test result status in the parent process. Each task in the\n pool returns the test index and the result, and we use the index to look\n up the original test object. Also updates the progress bar as tasks\n complete.\n ' if self.hit_max_failures: return (test_index, test_with_result) = pool_result assert (self.tests[test_index].file_path == test_with_result.file_path), 'parent and child disagree on test path' self.tests[test_index] = test_with_result self.display.update(test_with_result) self.failure_count += (test_with_result.result.code == lit.Test.FAIL) if (self.lit_config.maxFailures and (self.failure_count == self.lit_config.maxFailures)): self.hit_max_failures = True
def consume_test_result(self, pool_result): 'Test completion callback for worker_run_one_test\n\n Updates the test result status in the parent process. Each task in the\n pool returns the test index and the result, and we use the index to look\n up the original test object. Also updates the progress bar as tasks\n complete.\n ' if self.hit_max_failures: return (test_index, test_with_result) = pool_result assert (self.tests[test_index].file_path == test_with_result.file_path), 'parent and child disagree on test path' self.tests[test_index] = test_with_result self.display.update(test_with_result) self.failure_count += (test_with_result.result.code == lit.Test.FAIL) if (self.lit_config.maxFailures and (self.failure_count == self.lit_config.maxFailures)): self.hit_max_failures = True<|docstring|>Test completion callback for worker_run_one_test Updates the test result status in the parent process. Each task in the pool returns the test index and the result, and we use the index to look up the original test object. Also updates the progress bar as tasks complete.<|endoftext|>
a86b9c57543f03d689e779901e04c4f0d2fbc34e27437790d60ac002015c7aa8
def generate_hangul_images(label_file, fonts_dir, output_dir): 'Generate Hangul image files.\n\n This will take in the passed in labels file and will generate several\n images using the font files provided in the font directory. The font\n directory is expected to be populated with *.ttf (True Type Font) files.\n The generated images will be stored in the given output directory. Image\n paths will have their corresponding labels listed in a CSV file.\n ' with io.open(label_file, 'r', encoding='utf-8') as f: labels = f.read().splitlines() image_dir = os.path.join(output_dir, 'hangul-images') if (not os.path.exists(image_dir)): os.makedirs(os.path.join(image_dir)) fonts = glob.glob(os.path.join(fonts_dir, '*.ttf')) labels_csv = io.open(os.path.join(output_dir, 'labels-map.csv'), 'w', encoding='utf-8') total_count = 0 prev_count = 0 for character in labels: if ((total_count - prev_count) > 5000): prev_count = total_count print('{} images generated...'.format(total_count)) for font in fonts: total_count += 1 image = Image.new('L', (IMAGE_WIDTH, IMAGE_HEIGHT), color=0) font = ImageFont.truetype(font, 48) drawing = ImageDraw.Draw(image) (w, h) = drawing.textsize(character, font=font) drawing.text((((IMAGE_WIDTH - w) / 2), ((IMAGE_HEIGHT - h) / 2)), character, fill=255, font=font) file_string = 'hangul_{}.jpeg'.format(total_count) file_path = os.path.join(image_dir, file_string) image.save(file_path, 'JPEG') labels_csv.write(u'{},{}\n'.format(file_path, character)) for i in range(DISTORTION_COUNT): total_count += 1 file_string = 'hangul_{}.jpeg'.format(total_count) file_path = os.path.join(image_dir, file_string) arr = numpy.array(image) distorted_array = elastic_distort(arr, alpha=random.randint(30, 36), sigma=random.randint(5, 6)) distorted_image = Image.fromarray(distorted_array) distorted_image.save(file_path, 'JPEG') labels_csv.write(u'{},{}\n'.format(file_path, character)) print('Finished generating {} images.'.format(total_count)) labels_csv.close()
Generate Hangul image files. This will take in the passed in labels file and will generate several images using the font files provided in the font directory. The font directory is expected to be populated with *.ttf (True Type Font) files. The generated images will be stored in the given output directory. Image paths will have their corresponding labels listed in a CSV file.
tools/hangul-image-generator.py
generate_hangul_images
g-may/tensorflow-hangul-recognition
243
python
def generate_hangul_images(label_file, fonts_dir, output_dir): 'Generate Hangul image files.\n\n This will take in the passed in labels file and will generate several\n images using the font files provided in the font directory. The font\n directory is expected to be populated with *.ttf (True Type Font) files.\n The generated images will be stored in the given output directory. Image\n paths will have their corresponding labels listed in a CSV file.\n ' with io.open(label_file, 'r', encoding='utf-8') as f: labels = f.read().splitlines() image_dir = os.path.join(output_dir, 'hangul-images') if (not os.path.exists(image_dir)): os.makedirs(os.path.join(image_dir)) fonts = glob.glob(os.path.join(fonts_dir, '*.ttf')) labels_csv = io.open(os.path.join(output_dir, 'labels-map.csv'), 'w', encoding='utf-8') total_count = 0 prev_count = 0 for character in labels: if ((total_count - prev_count) > 5000): prev_count = total_count print('{} images generated...'.format(total_count)) for font in fonts: total_count += 1 image = Image.new('L', (IMAGE_WIDTH, IMAGE_HEIGHT), color=0) font = ImageFont.truetype(font, 48) drawing = ImageDraw.Draw(image) (w, h) = drawing.textsize(character, font=font) drawing.text((((IMAGE_WIDTH - w) / 2), ((IMAGE_HEIGHT - h) / 2)), character, fill=255, font=font) file_string = 'hangul_{}.jpeg'.format(total_count) file_path = os.path.join(image_dir, file_string) image.save(file_path, 'JPEG') labels_csv.write(u'{},{}\n'.format(file_path, character)) for i in range(DISTORTION_COUNT): total_count += 1 file_string = 'hangul_{}.jpeg'.format(total_count) file_path = os.path.join(image_dir, file_string) arr = numpy.array(image) distorted_array = elastic_distort(arr, alpha=random.randint(30, 36), sigma=random.randint(5, 6)) distorted_image = Image.fromarray(distorted_array) distorted_image.save(file_path, 'JPEG') labels_csv.write(u'{},{}\n'.format(file_path, character)) print('Finished generating {} images.'.format(total_count)) labels_csv.close()
def generate_hangul_images(label_file, fonts_dir, output_dir): 'Generate Hangul image files.\n\n This will take in the passed in labels file and will generate several\n images using the font files provided in the font directory. The font\n directory is expected to be populated with *.ttf (True Type Font) files.\n The generated images will be stored in the given output directory. Image\n paths will have their corresponding labels listed in a CSV file.\n ' with io.open(label_file, 'r', encoding='utf-8') as f: labels = f.read().splitlines() image_dir = os.path.join(output_dir, 'hangul-images') if (not os.path.exists(image_dir)): os.makedirs(os.path.join(image_dir)) fonts = glob.glob(os.path.join(fonts_dir, '*.ttf')) labels_csv = io.open(os.path.join(output_dir, 'labels-map.csv'), 'w', encoding='utf-8') total_count = 0 prev_count = 0 for character in labels: if ((total_count - prev_count) > 5000): prev_count = total_count print('{} images generated...'.format(total_count)) for font in fonts: total_count += 1 image = Image.new('L', (IMAGE_WIDTH, IMAGE_HEIGHT), color=0) font = ImageFont.truetype(font, 48) drawing = ImageDraw.Draw(image) (w, h) = drawing.textsize(character, font=font) drawing.text((((IMAGE_WIDTH - w) / 2), ((IMAGE_HEIGHT - h) / 2)), character, fill=255, font=font) file_string = 'hangul_{}.jpeg'.format(total_count) file_path = os.path.join(image_dir, file_string) image.save(file_path, 'JPEG') labels_csv.write(u'{},{}\n'.format(file_path, character)) for i in range(DISTORTION_COUNT): total_count += 1 file_string = 'hangul_{}.jpeg'.format(total_count) file_path = os.path.join(image_dir, file_string) arr = numpy.array(image) distorted_array = elastic_distort(arr, alpha=random.randint(30, 36), sigma=random.randint(5, 6)) distorted_image = Image.fromarray(distorted_array) distorted_image.save(file_path, 'JPEG') labels_csv.write(u'{},{}\n'.format(file_path, character)) print('Finished generating {} images.'.format(total_count)) labels_csv.close()<|docstring|>Generate Hangul image files. This will take in the passed in labels file and will generate several images using the font files provided in the font directory. The font directory is expected to be populated with *.ttf (True Type Font) files. The generated images will be stored in the given output directory. Image paths will have their corresponding labels listed in a CSV file.<|endoftext|>
ed32d9f4d8b83e38e1ee645247199a2c54f68c4b74de8f45f5c9b6880af64e9e
def elastic_distort(image, alpha, sigma): 'Perform elastic distortion on an image.\n\n Here, alpha refers to the scaling factor that controls the intensity of the\n deformation. The sigma variable refers to the Gaussian filter standard\n deviation.\n ' random_state = numpy.random.RandomState(None) shape = image.shape dx = (gaussian_filter(((random_state.rand(*shape) * 2) - 1), sigma, mode='constant') * alpha) dy = (gaussian_filter(((random_state.rand(*shape) * 2) - 1), sigma, mode='constant') * alpha) (x, y) = numpy.meshgrid(numpy.arange(shape[0]), numpy.arange(shape[1])) indices = (numpy.reshape((y + dy), ((- 1), 1)), numpy.reshape((x + dx), ((- 1), 1))) return map_coordinates(image, indices, order=1).reshape(shape)
Perform elastic distortion on an image. Here, alpha refers to the scaling factor that controls the intensity of the deformation. The sigma variable refers to the Gaussian filter standard deviation.
tools/hangul-image-generator.py
elastic_distort
g-may/tensorflow-hangul-recognition
243
python
def elastic_distort(image, alpha, sigma): 'Perform elastic distortion on an image.\n\n Here, alpha refers to the scaling factor that controls the intensity of the\n deformation. The sigma variable refers to the Gaussian filter standard\n deviation.\n ' random_state = numpy.random.RandomState(None) shape = image.shape dx = (gaussian_filter(((random_state.rand(*shape) * 2) - 1), sigma, mode='constant') * alpha) dy = (gaussian_filter(((random_state.rand(*shape) * 2) - 1), sigma, mode='constant') * alpha) (x, y) = numpy.meshgrid(numpy.arange(shape[0]), numpy.arange(shape[1])) indices = (numpy.reshape((y + dy), ((- 1), 1)), numpy.reshape((x + dx), ((- 1), 1))) return map_coordinates(image, indices, order=1).reshape(shape)
def elastic_distort(image, alpha, sigma): 'Perform elastic distortion on an image.\n\n Here, alpha refers to the scaling factor that controls the intensity of the\n deformation. The sigma variable refers to the Gaussian filter standard\n deviation.\n ' random_state = numpy.random.RandomState(None) shape = image.shape dx = (gaussian_filter(((random_state.rand(*shape) * 2) - 1), sigma, mode='constant') * alpha) dy = (gaussian_filter(((random_state.rand(*shape) * 2) - 1), sigma, mode='constant') * alpha) (x, y) = numpy.meshgrid(numpy.arange(shape[0]), numpy.arange(shape[1])) indices = (numpy.reshape((y + dy), ((- 1), 1)), numpy.reshape((x + dx), ((- 1), 1))) return map_coordinates(image, indices, order=1).reshape(shape)<|docstring|>Perform elastic distortion on an image. Here, alpha refers to the scaling factor that controls the intensity of the deformation. The sigma variable refers to the Gaussian filter standard deviation.<|endoftext|>
1489320bb5e3797fb4887f9919f13b7383d81cba56339104e28e41d4bfd897b8
def __init__(self, text): '\n 初始化\n :param text: \n ' self.text = text self.pos = 0 self.current_char = self.text[self.pos]
初始化 :param text:
Python/lsbasi/pascal1.py
__init__
InnoFang/misc-code
4
python
def __init__(self, text): '\n 初始化\n :param text: \n ' self.text = text self.pos = 0 self.current_char = self.text[self.pos]
def __init__(self, text): '\n 初始化\n :param text: \n ' self.text = text self.pos = 0 self.current_char = self.text[self.pos]<|docstring|>初始化 :param text:<|endoftext|>
4ab01d54628d223ba446af0fcc27c32ee7f53f36bf3ad44cf3d5b240edd3e720
def error(self): '\n 内置异常\n :return: \n ' raise Exception('Invalid character')
内置异常 :return:
Python/lsbasi/pascal1.py
error
InnoFang/misc-code
4
python
def error(self): '\n 内置异常\n :return: \n ' raise Exception('Invalid character')
def error(self): '\n 内置异常\n :return: \n ' raise Exception('Invalid character')<|docstring|>内置异常 :return:<|endoftext|>
88c2eebf6156dd87554eecd8625ae9b6b25f5d014dde89e9b14fcbd2319bcd44
def advance(self): '\n 字符下标加一,得到下一个字符\n :return: \n ' self.pos += 1 if (self.pos > (len(self.text) - 1)): self.current_char = None else: self.current_char = self.text[self.pos]
字符下标加一,得到下一个字符 :return:
Python/lsbasi/pascal1.py
advance
InnoFang/misc-code
4
python
def advance(self): '\n 字符下标加一,得到下一个字符\n :return: \n ' self.pos += 1 if (self.pos > (len(self.text) - 1)): self.current_char = None else: self.current_char = self.text[self.pos]
def advance(self): '\n 字符下标加一,得到下一个字符\n :return: \n ' self.pos += 1 if (self.pos > (len(self.text) - 1)): self.current_char = None else: self.current_char = self.text[self.pos]<|docstring|>字符下标加一,得到下一个字符 :return:<|endoftext|>
7bcb361d68096f24c5aa5c96fe29f6f9271351c17d9b217cdabe7292e74e3a1e
def peek(self): '\n 得到下一个字符,但是字符下标不变\n :return: \n ' peek_pos = (self.pos + 1) if (peek_pos > (len(self.text) - 1)): return None else: return self.text[peek_pos]
得到下一个字符,但是字符下标不变 :return:
Python/lsbasi/pascal1.py
peek
InnoFang/misc-code
4
python
def peek(self): '\n 得到下一个字符,但是字符下标不变\n :return: \n ' peek_pos = (self.pos + 1) if (peek_pos > (len(self.text) - 1)): return None else: return self.text[peek_pos]
def peek(self): '\n 得到下一个字符,但是字符下标不变\n :return: \n ' peek_pos = (self.pos + 1) if (peek_pos > (len(self.text) - 1)): return None else: return self.text[peek_pos]<|docstring|>得到下一个字符,但是字符下标不变 :return:<|endoftext|>
0c40d3fb286d291296a61c78ee55f9b9e600b12b37ffb4f12980111a97af85dd
def skip_whitespace(self): '\n 跳过空字符\n :return: \n ' while ((self.current_char is not None) and self.current_char.isspace()): self.advance()
跳过空字符 :return:
Python/lsbasi/pascal1.py
skip_whitespace
InnoFang/misc-code
4
python
def skip_whitespace(self): '\n 跳过空字符\n :return: \n ' while ((self.current_char is not None) and self.current_char.isspace()): self.advance()
def skip_whitespace(self): '\n 跳过空字符\n :return: \n ' while ((self.current_char is not None) and self.current_char.isspace()): self.advance()<|docstring|>跳过空字符 :return:<|endoftext|>
92ee14919658740841fee99eb0bea6503a78f3afd050a6b115b0a84bfd5667f8
def integer(self): '\n 读取数字\n :return: \n ' result = '' while ((self.current_char is not None) and self.current_char.isdigit()): result += self.current_char self.advance() return int(result)
读取数字 :return:
Python/lsbasi/pascal1.py
integer
InnoFang/misc-code
4
python
def integer(self): '\n 读取数字\n :return: \n ' result = while ((self.current_char is not None) and self.current_char.isdigit()): result += self.current_char self.advance() return int(result)
def integer(self): '\n 读取数字\n :return: \n ' result = while ((self.current_char is not None) and self.current_char.isdigit()): result += self.current_char self.advance() return int(result)<|docstring|>读取数字 :return:<|endoftext|>
3e2d008b95532a369ee7ef3f79ac62e4dbb1e718e5861fb6e10d6010d3b37bc4
def _id(self): '\n 读取标识符(变量和保留字(即BEGIN,END))\n :return: \n ' result = '' while ((self.current_char is not None) and self.current_char.isalnum()): result += self.current_char self.advance() token = RESERVED_KEYWORDS.get(result, Token(ID, result)) return token
读取标识符(变量和保留字(即BEGIN,END)) :return:
Python/lsbasi/pascal1.py
_id
InnoFang/misc-code
4
python
def _id(self): '\n 读取标识符(变量和保留字(即BEGIN,END))\n :return: \n ' result = while ((self.current_char is not None) and self.current_char.isalnum()): result += self.current_char self.advance() token = RESERVED_KEYWORDS.get(result, Token(ID, result)) return token
def _id(self): '\n 读取标识符(变量和保留字(即BEGIN,END))\n :return: \n ' result = while ((self.current_char is not None) and self.current_char.isalnum()): result += self.current_char self.advance() token = RESERVED_KEYWORDS.get(result, Token(ID, result)) return token<|docstring|>读取标识符(变量和保留字(即BEGIN,END)) :return:<|endoftext|>
8b620753aee5844e3b322417d6a245ccef3e9fd281c2c4c5f882a1d8922f566a
def get_next_token(self): '\n 把句子分割成token,一次一个token\n :return: \n ' while (self.current_char is not None): if self.current_char.isspace(): self.skip_whitespace() continue if self.current_char.isalpha(): return self._id() if self.current_char.isdigit(): return Token(INTEGER, self.integer()) if ((self.current_char == ':') and (self.peek() == '=')): self.advance() self.advance() return Token(ASSIGN, ':=') if (self.current_char == ';'): self.advance() return Token(SEMI, ';') if (self.current_char == '+'): self.advance() return Token(PLUS, '+') if (self.current_char == '-'): self.advance() return Token(MINUS, '-') if (self.current_char == '*'): self.advance() return Token(MUL, '*') if (self.current_char == '/'): self.advance() return Token(DIV, '/') if (self.current_char == '('): self.advance() return Token(LPAREN, '(') if (self.current_char == ')'): self.advance() return Token(RPAREN, ')') if (self.current_char == '.'): self.advance() return Token(DOT, '.') self.error() return Token(EOF, None)
把句子分割成token,一次一个token :return:
Python/lsbasi/pascal1.py
get_next_token
InnoFang/misc-code
4
python
def get_next_token(self): '\n 把句子分割成token,一次一个token\n :return: \n ' while (self.current_char is not None): if self.current_char.isspace(): self.skip_whitespace() continue if self.current_char.isalpha(): return self._id() if self.current_char.isdigit(): return Token(INTEGER, self.integer()) if ((self.current_char == ':') and (self.peek() == '=')): self.advance() self.advance() return Token(ASSIGN, ':=') if (self.current_char == ';'): self.advance() return Token(SEMI, ';') if (self.current_char == '+'): self.advance() return Token(PLUS, '+') if (self.current_char == '-'): self.advance() return Token(MINUS, '-') if (self.current_char == '*'): self.advance() return Token(MUL, '*') if (self.current_char == '/'): self.advance() return Token(DIV, '/') if (self.current_char == '('): self.advance() return Token(LPAREN, '(') if (self.current_char == ')'): self.advance() return Token(RPAREN, ')') if (self.current_char == '.'): self.advance() return Token(DOT, '.') self.error() return Token(EOF, None)
def get_next_token(self): '\n 把句子分割成token,一次一个token\n :return: \n ' while (self.current_char is not None): if self.current_char.isspace(): self.skip_whitespace() continue if self.current_char.isalpha(): return self._id() if self.current_char.isdigit(): return Token(INTEGER, self.integer()) if ((self.current_char == ':') and (self.peek() == '=')): self.advance() self.advance() return Token(ASSIGN, ':=') if (self.current_char == ';'): self.advance() return Token(SEMI, ';') if (self.current_char == '+'): self.advance() return Token(PLUS, '+') if (self.current_char == '-'): self.advance() return Token(MINUS, '-') if (self.current_char == '*'): self.advance() return Token(MUL, '*') if (self.current_char == '/'): self.advance() return Token(DIV, '/') if (self.current_char == '('): self.advance() return Token(LPAREN, '(') if (self.current_char == ')'): self.advance() return Token(RPAREN, ')') if (self.current_char == '.'): self.advance() return Token(DOT, '.') self.error() return Token(EOF, None)<|docstring|>把句子分割成token,一次一个token :return:<|endoftext|>
ae3d4e16b328db288476238431309e327b64b28a6df133018e29805afc7f3ed8
def program(self): 'program : compound_statement DOT' node = self.compound_statement() self.eat(DOT) return node
program : compound_statement DOT
Python/lsbasi/pascal1.py
program
InnoFang/misc-code
4
python
def program(self): node = self.compound_statement() self.eat(DOT) return node
def program(self): node = self.compound_statement() self.eat(DOT) return node<|docstring|>program : compound_statement DOT<|endoftext|>
03a1884e4c0ddf1c04d095859ea9dd37e97985e4e5f3e883294b7b3f7d6b29fd
def compound_statement(self): 'compound_statement : BEGIN statement_list END' self.eat(BEGIN) nodes = self.statement_list() self.eat(END) root = Compound() for node in nodes: root.children.append(node) return root
compound_statement : BEGIN statement_list END
Python/lsbasi/pascal1.py
compound_statement
InnoFang/misc-code
4
python
def compound_statement(self): self.eat(BEGIN) nodes = self.statement_list() self.eat(END) root = Compound() for node in nodes: root.children.append(node) return root
def compound_statement(self): self.eat(BEGIN) nodes = self.statement_list() self.eat(END) root = Compound() for node in nodes: root.children.append(node) return root<|docstring|>compound_statement : BEGIN statement_list END<|endoftext|>
e127493312c499cad94cdec6eb21036547e403d5cea39761ad3641be88c94e3b
def statement_list(self): '\n statement_list : statement\n | statement SEMI statement_list\n ' node = self.statement() results = [node] while (self.current_token.type == SEMI): self.eat(SEMI) results.append(self.statement()) if (self.current_token.type == ID): self.error() return results
statement_list : statement | statement SEMI statement_list
Python/lsbasi/pascal1.py
statement_list
InnoFang/misc-code
4
python
def statement_list(self): '\n statement_list : statement\n | statement SEMI statement_list\n ' node = self.statement() results = [node] while (self.current_token.type == SEMI): self.eat(SEMI) results.append(self.statement()) if (self.current_token.type == ID): self.error() return results
def statement_list(self): '\n statement_list : statement\n | statement SEMI statement_list\n ' node = self.statement() results = [node] while (self.current_token.type == SEMI): self.eat(SEMI) results.append(self.statement()) if (self.current_token.type == ID): self.error() return results<|docstring|>statement_list : statement | statement SEMI statement_list<|endoftext|>
ac019ece37abd33ee1ecf724653290a0799e39f0792b7b47f8462ab7388cc1d3
def statement(self): '\n statement : compound_statement\n | assignment_statement\n | empty\n ' if (self.current_token.type == BEGIN): node = self.compound_statement() elif (self.current_token.type == ID): node = self.assignment_statement() else: node = self.empty() return node
statement : compound_statement | assignment_statement | empty
Python/lsbasi/pascal1.py
statement
InnoFang/misc-code
4
python
def statement(self): '\n statement : compound_statement\n | assignment_statement\n | empty\n ' if (self.current_token.type == BEGIN): node = self.compound_statement() elif (self.current_token.type == ID): node = self.assignment_statement() else: node = self.empty() return node
def statement(self): '\n statement : compound_statement\n | assignment_statement\n | empty\n ' if (self.current_token.type == BEGIN): node = self.compound_statement() elif (self.current_token.type == ID): node = self.assignment_statement() else: node = self.empty() return node<|docstring|>statement : compound_statement | assignment_statement | empty<|endoftext|>
b1b724fe52e31039526df630f021b957fc9ed992c8b8e609aabd9bdb3c8ebbc8
def assignment_statement(self): 'assignment_statement : variable ASSIGN expr' left = self.variable() token = self.current_token self.eat(ASSIGN) right = self.expr() node = Assign(left, token, right) return node
assignment_statement : variable ASSIGN expr
Python/lsbasi/pascal1.py
assignment_statement
InnoFang/misc-code
4
python
def assignment_statement(self): left = self.variable() token = self.current_token self.eat(ASSIGN) right = self.expr() node = Assign(left, token, right) return node
def assignment_statement(self): left = self.variable() token = self.current_token self.eat(ASSIGN) right = self.expr() node = Assign(left, token, right) return node<|docstring|>assignment_statement : variable ASSIGN expr<|endoftext|>
20eeb5ea56608d39e5fc11ad62a20fa65db31bb647833d90f93a9dcf6dedc384
def variable(self): 'variable : ID' node = Var(self.current_token) self.eat(ID) return node
variable : ID
Python/lsbasi/pascal1.py
variable
InnoFang/misc-code
4
python
def variable(self): node = Var(self.current_token) self.eat(ID) return node
def variable(self): node = Var(self.current_token) self.eat(ID) return node<|docstring|>variable : ID<|endoftext|>
cb40daee72466b237fa3fd098cb63473de368d0cf49a33a95eafd38e27cf3fe1
def expr(self): '\n expr : term ((PLUS | MINUS) term) *\n ' node = self.term() while (self.current_token.type in (PLUS, MINUS)): token = self.current_token if (token.type == PLUS): self.eat(PLUS) elif (token.type == MINUS): self.eat(MINUS) node = BinOp(left=node, op=token, right=self.term()) return node
expr : term ((PLUS | MINUS) term) *
Python/lsbasi/pascal1.py
expr
InnoFang/misc-code
4
python
def expr(self): '\n \n ' node = self.term() while (self.current_token.type in (PLUS, MINUS)): token = self.current_token if (token.type == PLUS): self.eat(PLUS) elif (token.type == MINUS): self.eat(MINUS) node = BinOp(left=node, op=token, right=self.term()) return node
def expr(self): '\n \n ' node = self.term() while (self.current_token.type in (PLUS, MINUS)): token = self.current_token if (token.type == PLUS): self.eat(PLUS) elif (token.type == MINUS): self.eat(MINUS) node = BinOp(left=node, op=token, right=self.term()) return node<|docstring|>expr : term ((PLUS | MINUS) term) *<|endoftext|>
33d021f98a3fda1c542a06fc2f55a2f2f7b93695711d57f628248da121053e39
def term(self): '\n term : factor ((MUL | DIV) factor) *\n ' node = self.factor() while (self.current_token.type in (MUL, DIV)): token = self.current_token if (token.type == MUL): self.eat(MUL) elif (token.type == DIV): self.eat(DIV) node = BinOp(left=node, op=token, right=self.factor()) return node
term : factor ((MUL | DIV) factor) *
Python/lsbasi/pascal1.py
term
InnoFang/misc-code
4
python
def term(self): '\n \n ' node = self.factor() while (self.current_token.type in (MUL, DIV)): token = self.current_token if (token.type == MUL): self.eat(MUL) elif (token.type == DIV): self.eat(DIV) node = BinOp(left=node, op=token, right=self.factor()) return node
def term(self): '\n \n ' node = self.factor() while (self.current_token.type in (MUL, DIV)): token = self.current_token if (token.type == MUL): self.eat(MUL) elif (token.type == DIV): self.eat(DIV) node = BinOp(left=node, op=token, right=self.factor()) return node<|docstring|>term : factor ((MUL | DIV) factor) *<|endoftext|>
162afa4af143113767ff8001091df50b95542dbbdaa73eaf1a769ebd2233f83f
def factor(self): '\n factor : PLUS factor\n | MINUS factor\n | INTEGER\n | LPAREN expr RPAReN\n | variable\n ' token = self.current_token if (token.type == PLUS): self.eat(PLUS) node = UnaryOp(op=token, expr=self.factor()) return node elif (token.type == MINUS): self.eat(MINUS) node = UnaryOp(op=token, expr=self.factor()) return node elif (token.type == INTEGER): self.eat(INTEGER) return Num(token) elif (token.type == LPAREN): self.eat(LPAREN) node = self.expr() self.eat(RPAREN) return node else: node = self.variable() return node
factor : PLUS factor | MINUS factor | INTEGER | LPAREN expr RPAReN | variable
Python/lsbasi/pascal1.py
factor
InnoFang/misc-code
4
python
def factor(self): '\n factor : PLUS factor\n | MINUS factor\n | INTEGER\n | LPAREN expr RPAReN\n | variable\n ' token = self.current_token if (token.type == PLUS): self.eat(PLUS) node = UnaryOp(op=token, expr=self.factor()) return node elif (token.type == MINUS): self.eat(MINUS) node = UnaryOp(op=token, expr=self.factor()) return node elif (token.type == INTEGER): self.eat(INTEGER) return Num(token) elif (token.type == LPAREN): self.eat(LPAREN) node = self.expr() self.eat(RPAREN) return node else: node = self.variable() return node
def factor(self): '\n factor : PLUS factor\n | MINUS factor\n | INTEGER\n | LPAREN expr RPAReN\n | variable\n ' token = self.current_token if (token.type == PLUS): self.eat(PLUS) node = UnaryOp(op=token, expr=self.factor()) return node elif (token.type == MINUS): self.eat(MINUS) node = UnaryOp(op=token, expr=self.factor()) return node elif (token.type == INTEGER): self.eat(INTEGER) return Num(token) elif (token.type == LPAREN): self.eat(LPAREN) node = self.expr() self.eat(RPAREN) return node else: node = self.variable() return node<|docstring|>factor : PLUS factor | MINUS factor | INTEGER | LPAREN expr RPAReN | variable<|endoftext|>
3469a047b6dfc8524d63c3b206d4c1b1a994dc3ec685262e951fddc34f9bf6f1
def parse(self): '\n program : compound_statement DOT\n compound_statement : BEGIN statement_list END\n statement_list : statement\n | statement SEMI statement_list\n statement : compound_statement\n | assignment_statement\n | empty\n assignment_statement : variable ASSIGN expr\n empty :\n expr: term ((PLUS | MINUS) term)*\n term: factor ((MUL | DIV) factor)*\n factor : PLUS factor\n | MINUS factor\n | INTEGER\n | LPAREN expr RPAREN\n | variable\n variable: ID\n ' node = self.program() if (self.current_token.type != EOF): self.error() return node
program : compound_statement DOT compound_statement : BEGIN statement_list END statement_list : statement | statement SEMI statement_list statement : compound_statement | assignment_statement | empty assignment_statement : variable ASSIGN expr empty : expr: term ((PLUS | MINUS) term)* term: factor ((MUL | DIV) factor)* factor : PLUS factor | MINUS factor | INTEGER | LPAREN expr RPAREN | variable variable: ID
Python/lsbasi/pascal1.py
parse
InnoFang/misc-code
4
python
def parse(self): '\n program : compound_statement DOT\n compound_statement : BEGIN statement_list END\n statement_list : statement\n | statement SEMI statement_list\n statement : compound_statement\n | assignment_statement\n | empty\n assignment_statement : variable ASSIGN expr\n empty :\n expr: term ((PLUS | MINUS) term)*\n term: factor ((MUL | DIV) factor)*\n factor : PLUS factor\n | MINUS factor\n | INTEGER\n | LPAREN expr RPAREN\n | variable\n variable: ID\n ' node = self.program() if (self.current_token.type != EOF): self.error() return node
def parse(self): '\n program : compound_statement DOT\n compound_statement : BEGIN statement_list END\n statement_list : statement\n | statement SEMI statement_list\n statement : compound_statement\n | assignment_statement\n | empty\n assignment_statement : variable ASSIGN expr\n empty :\n expr: term ((PLUS | MINUS) term)*\n term: factor ((MUL | DIV) factor)*\n factor : PLUS factor\n | MINUS factor\n | INTEGER\n | LPAREN expr RPAREN\n | variable\n variable: ID\n ' node = self.program() if (self.current_token.type != EOF): self.error() return node<|docstring|>program : compound_statement DOT compound_statement : BEGIN statement_list END statement_list : statement | statement SEMI statement_list statement : compound_statement | assignment_statement | empty assignment_statement : variable ASSIGN expr empty : expr: term ((PLUS | MINUS) term)* term: factor ((MUL | DIV) factor)* factor : PLUS factor | MINUS factor | INTEGER | LPAREN expr RPAREN | variable variable: ID<|endoftext|>
1a78458e0500bb4c82efe4e7f8dd0909796b1e7d0b76f68e8adc35a6bc3b2752
def export_material_property(self, b_mat, flags=1): 'Return existing material property with given settings, or create\n a new one if a material property with these settings is not found.' if (bpy.context.scene.niftools_scene.game in ('SKYRIM',)): return name = block_store.get_full_name(b_mat) n_mat_prop = NifFormat.NiMaterialProperty() specialnames = ('EnvMap2', 'EnvMap', 'skin', 'Hair', 'dynalpha', 'HideSecret', 'Lava') if (bpy.context.scene.niftools_scene.game in ('OBLIVION', 'FALLOUT_3', 'SKYRIM')): for specialname in specialnames: if ((name.lower() == specialname.lower()) or name.lower().startswith((specialname.lower() + '.'))): if (name != specialname): NifLog.warn(f"Renaming material '{name}' to '{specialname}'") name = specialname if name.lower().startswith('noname'): NifLog.warn(f"Renaming material '{name}' to ''") name = '' n_mat_prop.name = name n_mat_prop.flags = flags ambient = b_mat.niftools.ambient_color n_mat_prop.ambient_color.r = ambient.r n_mat_prop.ambient_color.g = ambient.g n_mat_prop.ambient_color.b = ambient.b (n_mat_prop.diffuse_color.r, n_mat_prop.diffuse_color.g, n_mat_prop.diffuse_color.b, _) = b_mat.diffuse_color (n_mat_prop.specular_color.r, n_mat_prop.specular_color.g, n_mat_prop.specular_color.b) = b_mat.specular_color emissive = b_mat.niftools.emissive_color n_mat_prop.emissive_color.r = emissive.r n_mat_prop.emissive_color.g = emissive.g n_mat_prop.emissive_color.b = emissive.b n_mat_prop.glossiness = (min(((1 / b_mat.roughness) - 1), 128) if (b_mat.roughness != 0) else 128) n_mat_prop.alpha = b_mat.niftools.emissive_alpha.v for n_block in block_store.block_to_obj: if (not isinstance(n_block, NifFormat.NiMaterialProperty)): continue if EXPORT_OPTIMIZE_MATERIALS: ignore_strings = (not (n_block.name in specialnames)) else: ignore_strings = False first_index = (1 if ignore_strings else 0) if (n_block.get_hash()[first_index:] == n_mat_prop.get_hash()[first_index:]): NifLog.warn(f"Merging materials '{n_mat_prop.name}' and '{n_block.name}' (they are identical in nif)") n_mat_prop = n_block break block_store.register_block(n_mat_prop) self.material_anim.export_material(b_mat, n_mat_prop) return n_mat_prop
Return existing material property with given settings, or create a new one if a material property with these settings is not found.
io_scene_niftools/modules/nif_export/property/material/__init__.py
export_material_property
BlenderAddonsArchive/blender_niftools_addon
94
python
def export_material_property(self, b_mat, flags=1): 'Return existing material property with given settings, or create\n a new one if a material property with these settings is not found.' if (bpy.context.scene.niftools_scene.game in ('SKYRIM',)): return name = block_store.get_full_name(b_mat) n_mat_prop = NifFormat.NiMaterialProperty() specialnames = ('EnvMap2', 'EnvMap', 'skin', 'Hair', 'dynalpha', 'HideSecret', 'Lava') if (bpy.context.scene.niftools_scene.game in ('OBLIVION', 'FALLOUT_3', 'SKYRIM')): for specialname in specialnames: if ((name.lower() == specialname.lower()) or name.lower().startswith((specialname.lower() + '.'))): if (name != specialname): NifLog.warn(f"Renaming material '{name}' to '{specialname}'") name = specialname if name.lower().startswith('noname'): NifLog.warn(f"Renaming material '{name}' to ") name = n_mat_prop.name = name n_mat_prop.flags = flags ambient = b_mat.niftools.ambient_color n_mat_prop.ambient_color.r = ambient.r n_mat_prop.ambient_color.g = ambient.g n_mat_prop.ambient_color.b = ambient.b (n_mat_prop.diffuse_color.r, n_mat_prop.diffuse_color.g, n_mat_prop.diffuse_color.b, _) = b_mat.diffuse_color (n_mat_prop.specular_color.r, n_mat_prop.specular_color.g, n_mat_prop.specular_color.b) = b_mat.specular_color emissive = b_mat.niftools.emissive_color n_mat_prop.emissive_color.r = emissive.r n_mat_prop.emissive_color.g = emissive.g n_mat_prop.emissive_color.b = emissive.b n_mat_prop.glossiness = (min(((1 / b_mat.roughness) - 1), 128) if (b_mat.roughness != 0) else 128) n_mat_prop.alpha = b_mat.niftools.emissive_alpha.v for n_block in block_store.block_to_obj: if (not isinstance(n_block, NifFormat.NiMaterialProperty)): continue if EXPORT_OPTIMIZE_MATERIALS: ignore_strings = (not (n_block.name in specialnames)) else: ignore_strings = False first_index = (1 if ignore_strings else 0) if (n_block.get_hash()[first_index:] == n_mat_prop.get_hash()[first_index:]): NifLog.warn(f"Merging materials '{n_mat_prop.name}' and '{n_block.name}' (they are identical in nif)") n_mat_prop = n_block break block_store.register_block(n_mat_prop) self.material_anim.export_material(b_mat, n_mat_prop) return n_mat_prop
def export_material_property(self, b_mat, flags=1): 'Return existing material property with given settings, or create\n a new one if a material property with these settings is not found.' if (bpy.context.scene.niftools_scene.game in ('SKYRIM',)): return name = block_store.get_full_name(b_mat) n_mat_prop = NifFormat.NiMaterialProperty() specialnames = ('EnvMap2', 'EnvMap', 'skin', 'Hair', 'dynalpha', 'HideSecret', 'Lava') if (bpy.context.scene.niftools_scene.game in ('OBLIVION', 'FALLOUT_3', 'SKYRIM')): for specialname in specialnames: if ((name.lower() == specialname.lower()) or name.lower().startswith((specialname.lower() + '.'))): if (name != specialname): NifLog.warn(f"Renaming material '{name}' to '{specialname}'") name = specialname if name.lower().startswith('noname'): NifLog.warn(f"Renaming material '{name}' to ") name = n_mat_prop.name = name n_mat_prop.flags = flags ambient = b_mat.niftools.ambient_color n_mat_prop.ambient_color.r = ambient.r n_mat_prop.ambient_color.g = ambient.g n_mat_prop.ambient_color.b = ambient.b (n_mat_prop.diffuse_color.r, n_mat_prop.diffuse_color.g, n_mat_prop.diffuse_color.b, _) = b_mat.diffuse_color (n_mat_prop.specular_color.r, n_mat_prop.specular_color.g, n_mat_prop.specular_color.b) = b_mat.specular_color emissive = b_mat.niftools.emissive_color n_mat_prop.emissive_color.r = emissive.r n_mat_prop.emissive_color.g = emissive.g n_mat_prop.emissive_color.b = emissive.b n_mat_prop.glossiness = (min(((1 / b_mat.roughness) - 1), 128) if (b_mat.roughness != 0) else 128) n_mat_prop.alpha = b_mat.niftools.emissive_alpha.v for n_block in block_store.block_to_obj: if (not isinstance(n_block, NifFormat.NiMaterialProperty)): continue if EXPORT_OPTIMIZE_MATERIALS: ignore_strings = (not (n_block.name in specialnames)) else: ignore_strings = False first_index = (1 if ignore_strings else 0) if (n_block.get_hash()[first_index:] == n_mat_prop.get_hash()[first_index:]): NifLog.warn(f"Merging materials '{n_mat_prop.name}' and '{n_block.name}' (they are identical in nif)") n_mat_prop = n_block break block_store.register_block(n_mat_prop) self.material_anim.export_material(b_mat, n_mat_prop) return n_mat_prop<|docstring|>Return existing material property with given settings, or create a new one if a material property with these settings is not found.<|endoftext|>
c5a2a441c5016388e7920f68ae8913fae37047a0d722662cb03f0e0af72513e9
@tree.command() async def help(intr: dc.Interaction): 'Show help.' help_str = "Use `/measure` to have the bot join your current voice channel, and measure everyone's voice loudness. While measuring, everyone should talk at their usual loudness. (Talking at the same is ok.)\nAfter that, bot recommends percentages that you should set everyone else's volume at. (You can ignore your own percentage.)" (await intr.response.send_message(help_str))
Show help.
voice_eq_bot/__init__.py
help
OdielDomanie/voice_eq_bot
0
python
@tree.command() async def help(intr: dc.Interaction): help_str = "Use `/measure` to have the bot join your current voice channel, and measure everyone's voice loudness. While measuring, everyone should talk at their usual loudness. (Talking at the same is ok.)\nAfter that, bot recommends percentages that you should set everyone else's volume at. (You can ignore your own percentage.)" (await intr.response.send_message(help_str))
@tree.command() async def help(intr: dc.Interaction): help_str = "Use `/measure` to have the bot join your current voice channel, and measure everyone's voice loudness. While measuring, everyone should talk at their usual loudness. (Talking at the same is ok.)\nAfter that, bot recommends percentages that you should set everyone else's volume at. (You can ignore your own percentage.)" (await intr.response.send_message(help_str))<|docstring|>Show help.<|endoftext|>
6bafddbf2e9b2a21aab84788363222408f5520315dfe4ac8ca4bce2459512c30
@tree.command(guild=test_guild) async def measure(intr: dc.Interaction, duration: int=10): "Join the voice channel to measure each member's voice level,\n and recommend volume percentages.\n " if (not intr.guild): return duration = min(duration, 30) try: voice_chn = intr.user.voice.channel assert voice_chn except (AttributeError, AssertionError): (await intr.response.send_message('You need to be in a voice channel', ephemeral=True)) return if intr.guild.voice_client: (await intr.response.send_message('Already measuring.', ephemeral=True)) return permissions = voice_chn.permissions_for(intr.guild.me) if (not permissions.connect): (await intr.response.send_message("The bot doesn't have permission to join the voice channel.", ephemeral=True)) return async with (await voice_chn.connect(timeout=5, cls=dc.VoiceClient)) as voice_client: resp = intr.response.send_message('Measuring voice levels, everyone should speak now.') resp_task = asyncio.create_task(resp) voice_receiver: dc.VoiceReceiver = (await voice_client.start_receiving(buffer=10, output_type='float')) (await resp_task) user_pcms: dict[(int, MemberPCM)] = {} async for (member, _, pcm) in voice_receiver(duration): member_pcm = user_pcms.setdefault(member.id, MemberPCM(member, bytearray())) member_pcm.member = member member_pcm.pcm.extend(pcm) loudnesses = {m_pcm.member: loudness(float_to_array(m_pcm.pcm, voice_receiver.channels), voice_receiver.sampling_rate) for m_pcm in user_pcms.values()} adjustments = {user: db_to_dc_percent((TARGET_LUFS - loud)) for (user, loud) in loudnesses.items()} reply_lines = [] for (vc_user, adj) in adjustments.items(): ADJ_CUTOFF = 3.0 if (adj > ADJ_CUTOFF): pass else: adj_perc_str = f'{adj:.0%}' rel_loudness = (loudnesses[vc_user] - TARGET_LUFS) if isinstance(vc_user, dc.Object): try: member = (await intr.guild.fetch_member(vc_user.id)) except (dc.NotFound, dc.HTTPException): continue name = member.display_name else: name = vc_user.display_name reply_lines.append(f"`{name}`: `{adj_perc_str}` (` {(- rel_loudness):+3.1f} dB {('🔉' if (rel_loudness > 0) else '🔊')}`)") reply_lines.sort() if (len(reply_lines) == 0): (await intr.followup.send('No one talked.')) else: (await intr.followup.send(('__Optimal volume settings:__\n\n' + '\n'.join(reply_lines))))
Join the voice channel to measure each member's voice level, and recommend volume percentages.
voice_eq_bot/__init__.py
measure
OdielDomanie/voice_eq_bot
0
python
@tree.command(guild=test_guild) async def measure(intr: dc.Interaction, duration: int=10): "Join the voice channel to measure each member's voice level,\n and recommend volume percentages.\n " if (not intr.guild): return duration = min(duration, 30) try: voice_chn = intr.user.voice.channel assert voice_chn except (AttributeError, AssertionError): (await intr.response.send_message('You need to be in a voice channel', ephemeral=True)) return if intr.guild.voice_client: (await intr.response.send_message('Already measuring.', ephemeral=True)) return permissions = voice_chn.permissions_for(intr.guild.me) if (not permissions.connect): (await intr.response.send_message("The bot doesn't have permission to join the voice channel.", ephemeral=True)) return async with (await voice_chn.connect(timeout=5, cls=dc.VoiceClient)) as voice_client: resp = intr.response.send_message('Measuring voice levels, everyone should speak now.') resp_task = asyncio.create_task(resp) voice_receiver: dc.VoiceReceiver = (await voice_client.start_receiving(buffer=10, output_type='float')) (await resp_task) user_pcms: dict[(int, MemberPCM)] = {} async for (member, _, pcm) in voice_receiver(duration): member_pcm = user_pcms.setdefault(member.id, MemberPCM(member, bytearray())) member_pcm.member = member member_pcm.pcm.extend(pcm) loudnesses = {m_pcm.member: loudness(float_to_array(m_pcm.pcm, voice_receiver.channels), voice_receiver.sampling_rate) for m_pcm in user_pcms.values()} adjustments = {user: db_to_dc_percent((TARGET_LUFS - loud)) for (user, loud) in loudnesses.items()} reply_lines = [] for (vc_user, adj) in adjustments.items(): ADJ_CUTOFF = 3.0 if (adj > ADJ_CUTOFF): pass else: adj_perc_str = f'{adj:.0%}' rel_loudness = (loudnesses[vc_user] - TARGET_LUFS) if isinstance(vc_user, dc.Object): try: member = (await intr.guild.fetch_member(vc_user.id)) except (dc.NotFound, dc.HTTPException): continue name = member.display_name else: name = vc_user.display_name reply_lines.append(f"`{name}`: `{adj_perc_str}` (` {(- rel_loudness):+3.1f} dB {('🔉' if (rel_loudness > 0) else '🔊')}`)") reply_lines.sort() if (len(reply_lines) == 0): (await intr.followup.send('No one talked.')) else: (await intr.followup.send(('__Optimal volume settings:__\n\n' + '\n'.join(reply_lines))))
@tree.command(guild=test_guild) async def measure(intr: dc.Interaction, duration: int=10): "Join the voice channel to measure each member's voice level,\n and recommend volume percentages.\n " if (not intr.guild): return duration = min(duration, 30) try: voice_chn = intr.user.voice.channel assert voice_chn except (AttributeError, AssertionError): (await intr.response.send_message('You need to be in a voice channel', ephemeral=True)) return if intr.guild.voice_client: (await intr.response.send_message('Already measuring.', ephemeral=True)) return permissions = voice_chn.permissions_for(intr.guild.me) if (not permissions.connect): (await intr.response.send_message("The bot doesn't have permission to join the voice channel.", ephemeral=True)) return async with (await voice_chn.connect(timeout=5, cls=dc.VoiceClient)) as voice_client: resp = intr.response.send_message('Measuring voice levels, everyone should speak now.') resp_task = asyncio.create_task(resp) voice_receiver: dc.VoiceReceiver = (await voice_client.start_receiving(buffer=10, output_type='float')) (await resp_task) user_pcms: dict[(int, MemberPCM)] = {} async for (member, _, pcm) in voice_receiver(duration): member_pcm = user_pcms.setdefault(member.id, MemberPCM(member, bytearray())) member_pcm.member = member member_pcm.pcm.extend(pcm) loudnesses = {m_pcm.member: loudness(float_to_array(m_pcm.pcm, voice_receiver.channels), voice_receiver.sampling_rate) for m_pcm in user_pcms.values()} adjustments = {user: db_to_dc_percent((TARGET_LUFS - loud)) for (user, loud) in loudnesses.items()} reply_lines = [] for (vc_user, adj) in adjustments.items(): ADJ_CUTOFF = 3.0 if (adj > ADJ_CUTOFF): pass else: adj_perc_str = f'{adj:.0%}' rel_loudness = (loudnesses[vc_user] - TARGET_LUFS) if isinstance(vc_user, dc.Object): try: member = (await intr.guild.fetch_member(vc_user.id)) except (dc.NotFound, dc.HTTPException): continue name = member.display_name else: name = vc_user.display_name reply_lines.append(f"`{name}`: `{adj_perc_str}` (` {(- rel_loudness):+3.1f} dB {('🔉' if (rel_loudness > 0) else '🔊')}`)") reply_lines.sort() if (len(reply_lines) == 0): (await intr.followup.send('No one talked.')) else: (await intr.followup.send(('__Optimal volume settings:__\n\n' + '\n'.join(reply_lines))))<|docstring|>Join the voice channel to measure each member's voice level, and recommend volume percentages.<|endoftext|>
f897c7328dc36a44dc66ca67cb13d23eec7f766af1c478369211d9dd0909e8a7
def __init__(self, description, errorMessage, resolutionMessage, show=True, messageLevel=1): 'Initialize a new L{RunningAction}\n\n @param description: Action description\n @type description: string\n @param resolutionMessage: Action resolution message\n @type resolutionMessage: string\n @param show: Display action\n @type show: bool\n @param messageLevel: Message level\n @type messageLevel: number\n ' self.description = description self.resolutionMessage = resolutionMessage self.errorMessage = errorMessage self.show = show self.messageLevel = messageLevel
Initialize a new L{RunningAction} @param description: Action description @type description: string @param resolutionMessage: Action resolution message @type resolutionMessage: string @param show: Display action @type show: bool @param messageLevel: Message level @type messageLevel: number
lib/JumpScale/baselib/actionsold/action/RunningAction.py
__init__
rudecs/jumpscale_core7
1
python
def __init__(self, description, errorMessage, resolutionMessage, show=True, messageLevel=1): 'Initialize a new L{RunningAction}\n\n @param description: Action description\n @type description: string\n @param resolutionMessage: Action resolution message\n @type resolutionMessage: string\n @param show: Display action\n @type show: bool\n @param messageLevel: Message level\n @type messageLevel: number\n ' self.description = description self.resolutionMessage = resolutionMessage self.errorMessage = errorMessage self.show = show self.messageLevel = messageLevel
def __init__(self, description, errorMessage, resolutionMessage, show=True, messageLevel=1): 'Initialize a new L{RunningAction}\n\n @param description: Action description\n @type description: string\n @param resolutionMessage: Action resolution message\n @type resolutionMessage: string\n @param show: Display action\n @type show: bool\n @param messageLevel: Message level\n @type messageLevel: number\n ' self.description = description self.resolutionMessage = resolutionMessage self.errorMessage = errorMessage self.show = show self.messageLevel = messageLevel<|docstring|>Initialize a new L{RunningAction} @param description: Action description @type description: string @param resolutionMessage: Action resolution message @type resolutionMessage: string @param show: Display action @type show: bool @param messageLevel: Message level @type messageLevel: number<|endoftext|>
ff43b5dd856bef54edad2131696508b9ffe15a0fe324a6feaa6e30abf0482692
@pytest.fixture def source_path(): 'Get the xonsh source path.' pwd = os.path.dirname(__file__) return os.path.dirname(pwd)
Get the xonsh source path.
tests/conftest.py
source_path
jmoranos/xonsh
3
python
@pytest.fixture def source_path(): pwd = os.path.dirname(__file__) return os.path.dirname(pwd)
@pytest.fixture def source_path(): pwd = os.path.dirname(__file__) return os.path.dirname(pwd)<|docstring|>Get the xonsh source path.<|endoftext|>