repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
poldracklab/niworkflows | niworkflows/interfaces/surf.py | vertex_normals | def vertex_normals(vertices, faces):
"""Calculates the normals of a triangular mesh"""
def normalize_v3(arr):
''' Normalize a numpy array of 3 component vectors shape=(n,3) '''
lens = np.sqrt(arr[:, 0]**2 + arr[:, 1]**2 + arr[:, 2]**2)
arr /= lens[:, np.newaxis]
tris = vertices[faces]
facenorms = np.cross(tris[::, 1] - tris[::, 0], tris[::, 2] - tris[::, 0])
normalize_v3(facenorms)
norm = np.zeros(vertices.shape, dtype=vertices.dtype)
norm[faces[:, 0]] += facenorms
norm[faces[:, 1]] += facenorms
norm[faces[:, 2]] += facenorms
normalize_v3(norm)
return norm | python | def vertex_normals(vertices, faces):
"""Calculates the normals of a triangular mesh"""
def normalize_v3(arr):
''' Normalize a numpy array of 3 component vectors shape=(n,3) '''
lens = np.sqrt(arr[:, 0]**2 + arr[:, 1]**2 + arr[:, 2]**2)
arr /= lens[:, np.newaxis]
tris = vertices[faces]
facenorms = np.cross(tris[::, 1] - tris[::, 0], tris[::, 2] - tris[::, 0])
normalize_v3(facenorms)
norm = np.zeros(vertices.shape, dtype=vertices.dtype)
norm[faces[:, 0]] += facenorms
norm[faces[:, 1]] += facenorms
norm[faces[:, 2]] += facenorms
normalize_v3(norm)
return norm | [
"def",
"vertex_normals",
"(",
"vertices",
",",
"faces",
")",
":",
"def",
"normalize_v3",
"(",
"arr",
")",
":",
"''' Normalize a numpy array of 3 component vectors shape=(n,3) '''",
"lens",
"=",
"np",
".",
"sqrt",
"(",
"arr",
"[",
":",
",",
"0",
"]",
"**",
"2",... | Calculates the normals of a triangular mesh | [
"Calculates",
"the",
"normals",
"of",
"a",
"triangular",
"mesh"
] | 254f4b4fcc5e6ecb29d2f4602a30786b913ecce5 | https://github.com/poldracklab/niworkflows/blob/254f4b4fcc5e6ecb29d2f4602a30786b913ecce5/niworkflows/interfaces/surf.py#L487-L504 | train | 211,800 |
poldracklab/niworkflows | niworkflows/interfaces/surf.py | pointcloud2ply | def pointcloud2ply(vertices, normals, out_file=None):
"""Converts the file to PLY format"""
from pathlib import Path
import pandas as pd
from pyntcloud import PyntCloud
df = pd.DataFrame(np.hstack((vertices, normals)))
df.columns = ['x', 'y', 'z', 'nx', 'ny', 'nz']
cloud = PyntCloud(df)
if out_file is None:
out_file = Path('pointcloud.ply').resolve()
cloud.to_file(str(out_file))
return out_file | python | def pointcloud2ply(vertices, normals, out_file=None):
"""Converts the file to PLY format"""
from pathlib import Path
import pandas as pd
from pyntcloud import PyntCloud
df = pd.DataFrame(np.hstack((vertices, normals)))
df.columns = ['x', 'y', 'z', 'nx', 'ny', 'nz']
cloud = PyntCloud(df)
if out_file is None:
out_file = Path('pointcloud.ply').resolve()
cloud.to_file(str(out_file))
return out_file | [
"def",
"pointcloud2ply",
"(",
"vertices",
",",
"normals",
",",
"out_file",
"=",
"None",
")",
":",
"from",
"pathlib",
"import",
"Path",
"import",
"pandas",
"as",
"pd",
"from",
"pyntcloud",
"import",
"PyntCloud",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"np",... | Converts the file to PLY format | [
"Converts",
"the",
"file",
"to",
"PLY",
"format"
] | 254f4b4fcc5e6ecb29d2f4602a30786b913ecce5 | https://github.com/poldracklab/niworkflows/blob/254f4b4fcc5e6ecb29d2f4602a30786b913ecce5/niworkflows/interfaces/surf.py#L507-L520 | train | 211,801 |
poldracklab/niworkflows | niworkflows/interfaces/surf.py | ply2gii | def ply2gii(in_file, metadata, out_file=None):
"""Convert from ply to GIfTI"""
from pathlib import Path
from numpy import eye
from nibabel.gifti import (
GiftiMetaData, GiftiCoordSystem, GiftiImage, GiftiDataArray,
)
from pyntcloud import PyntCloud
in_file = Path(in_file)
surf = PyntCloud.from_file(str(in_file))
# Update centroid metadata
metadata.update(
zip(('SurfaceCenterX', 'SurfaceCenterY', 'SurfaceCenterZ'),
['%.4f' % c for c in surf.centroid])
)
# Prepare data arrays
da = (
GiftiDataArray(
data=surf.xyz.astype('float32'),
datatype='NIFTI_TYPE_FLOAT32',
intent='NIFTI_INTENT_POINTSET',
meta=GiftiMetaData.from_dict(metadata),
coordsys=GiftiCoordSystem(xform=eye(4), xformspace=3)),
GiftiDataArray(
data=surf.mesh.values,
datatype='NIFTI_TYPE_INT32',
intent='NIFTI_INTENT_TRIANGLE',
coordsys=None))
surfgii = GiftiImage(darrays=da)
if out_file is None:
out_file = fname_presuffix(
in_file.name, suffix='.gii', use_ext=False, newpath=str(Path.cwd()))
surfgii.to_filename(str(out_file))
return out_file | python | def ply2gii(in_file, metadata, out_file=None):
"""Convert from ply to GIfTI"""
from pathlib import Path
from numpy import eye
from nibabel.gifti import (
GiftiMetaData, GiftiCoordSystem, GiftiImage, GiftiDataArray,
)
from pyntcloud import PyntCloud
in_file = Path(in_file)
surf = PyntCloud.from_file(str(in_file))
# Update centroid metadata
metadata.update(
zip(('SurfaceCenterX', 'SurfaceCenterY', 'SurfaceCenterZ'),
['%.4f' % c for c in surf.centroid])
)
# Prepare data arrays
da = (
GiftiDataArray(
data=surf.xyz.astype('float32'),
datatype='NIFTI_TYPE_FLOAT32',
intent='NIFTI_INTENT_POINTSET',
meta=GiftiMetaData.from_dict(metadata),
coordsys=GiftiCoordSystem(xform=eye(4), xformspace=3)),
GiftiDataArray(
data=surf.mesh.values,
datatype='NIFTI_TYPE_INT32',
intent='NIFTI_INTENT_TRIANGLE',
coordsys=None))
surfgii = GiftiImage(darrays=da)
if out_file is None:
out_file = fname_presuffix(
in_file.name, suffix='.gii', use_ext=False, newpath=str(Path.cwd()))
surfgii.to_filename(str(out_file))
return out_file | [
"def",
"ply2gii",
"(",
"in_file",
",",
"metadata",
",",
"out_file",
"=",
"None",
")",
":",
"from",
"pathlib",
"import",
"Path",
"from",
"numpy",
"import",
"eye",
"from",
"nibabel",
".",
"gifti",
"import",
"(",
"GiftiMetaData",
",",
"GiftiCoordSystem",
",",
... | Convert from ply to GIfTI | [
"Convert",
"from",
"ply",
"to",
"GIfTI"
] | 254f4b4fcc5e6ecb29d2f4602a30786b913ecce5 | https://github.com/poldracklab/niworkflows/blob/254f4b4fcc5e6ecb29d2f4602a30786b913ecce5/niworkflows/interfaces/surf.py#L523-L561 | train | 211,802 |
poldracklab/niworkflows | niworkflows/utils/misc.py | fix_multi_T1w_source_name | def fix_multi_T1w_source_name(in_files):
"""
Make up a generic source name when there are multiple T1s
>>> fix_multi_T1w_source_name([
... '/path/to/sub-045_ses-test_T1w.nii.gz',
... '/path/to/sub-045_ses-retest_T1w.nii.gz'])
'/path/to/sub-045_T1w.nii.gz'
"""
import os
from nipype.utils.filemanip import filename_to_list
base, in_file = os.path.split(filename_to_list(in_files)[0])
subject_label = in_file.split("_", 1)[0].split("-")[1]
return os.path.join(base, "sub-%s_T1w.nii.gz" % subject_label) | python | def fix_multi_T1w_source_name(in_files):
"""
Make up a generic source name when there are multiple T1s
>>> fix_multi_T1w_source_name([
... '/path/to/sub-045_ses-test_T1w.nii.gz',
... '/path/to/sub-045_ses-retest_T1w.nii.gz'])
'/path/to/sub-045_T1w.nii.gz'
"""
import os
from nipype.utils.filemanip import filename_to_list
base, in_file = os.path.split(filename_to_list(in_files)[0])
subject_label = in_file.split("_", 1)[0].split("-")[1]
return os.path.join(base, "sub-%s_T1w.nii.gz" % subject_label) | [
"def",
"fix_multi_T1w_source_name",
"(",
"in_files",
")",
":",
"import",
"os",
"from",
"nipype",
".",
"utils",
".",
"filemanip",
"import",
"filename_to_list",
"base",
",",
"in_file",
"=",
"os",
".",
"path",
".",
"split",
"(",
"filename_to_list",
"(",
"in_files... | Make up a generic source name when there are multiple T1s
>>> fix_multi_T1w_source_name([
... '/path/to/sub-045_ses-test_T1w.nii.gz',
... '/path/to/sub-045_ses-retest_T1w.nii.gz'])
'/path/to/sub-045_T1w.nii.gz' | [
"Make",
"up",
"a",
"generic",
"source",
"name",
"when",
"there",
"are",
"multiple",
"T1s"
] | 254f4b4fcc5e6ecb29d2f4602a30786b913ecce5 | https://github.com/poldracklab/niworkflows/blob/254f4b4fcc5e6ecb29d2f4602a30786b913ecce5/niworkflows/utils/misc.py#L14-L28 | train | 211,803 |
poldracklab/niworkflows | niworkflows/utils/misc.py | add_suffix | def add_suffix(in_files, suffix):
"""
Wrap nipype's fname_presuffix to conveniently just add a prefix
>>> add_suffix([
... '/path/to/sub-045_ses-test_T1w.nii.gz',
... '/path/to/sub-045_ses-retest_T1w.nii.gz'], '_test')
'sub-045_ses-test_T1w_test.nii.gz'
"""
import os.path as op
from nipype.utils.filemanip import fname_presuffix, filename_to_list
return op.basename(fname_presuffix(filename_to_list(in_files)[0],
suffix=suffix)) | python | def add_suffix(in_files, suffix):
"""
Wrap nipype's fname_presuffix to conveniently just add a prefix
>>> add_suffix([
... '/path/to/sub-045_ses-test_T1w.nii.gz',
... '/path/to/sub-045_ses-retest_T1w.nii.gz'], '_test')
'sub-045_ses-test_T1w_test.nii.gz'
"""
import os.path as op
from nipype.utils.filemanip import fname_presuffix, filename_to_list
return op.basename(fname_presuffix(filename_to_list(in_files)[0],
suffix=suffix)) | [
"def",
"add_suffix",
"(",
"in_files",
",",
"suffix",
")",
":",
"import",
"os",
".",
"path",
"as",
"op",
"from",
"nipype",
".",
"utils",
".",
"filemanip",
"import",
"fname_presuffix",
",",
"filename_to_list",
"return",
"op",
".",
"basename",
"(",
"fname_presu... | Wrap nipype's fname_presuffix to conveniently just add a prefix
>>> add_suffix([
... '/path/to/sub-045_ses-test_T1w.nii.gz',
... '/path/to/sub-045_ses-retest_T1w.nii.gz'], '_test')
'sub-045_ses-test_T1w_test.nii.gz' | [
"Wrap",
"nipype",
"s",
"fname_presuffix",
"to",
"conveniently",
"just",
"add",
"a",
"prefix"
] | 254f4b4fcc5e6ecb29d2f4602a30786b913ecce5 | https://github.com/poldracklab/niworkflows/blob/254f4b4fcc5e6ecb29d2f4602a30786b913ecce5/niworkflows/utils/misc.py#L31-L44 | train | 211,804 |
poldracklab/niworkflows | niworkflows/utils/misc.py | _read_txt | def _read_txt(path):
"""Read a txt crashfile
>>> new_path = Path(__file__).resolve().parent.parent
>>> test_data_path = new_path / 'data' / 'tests'
>>> info = _read_txt(test_data_path / 'crashfile.txt')
>>> info['node'] # doctest: +ELLIPSIS
'...func_preproc_task_machinegame_run_02_wf.carpetplot_wf.conf_plot'
>>> info['traceback'] # doctest: +ELLIPSIS
'...ValueError: zero-size array to reduction operation minimum which has no identity'
"""
from pathlib import Path
lines = Path(path).read_text().splitlines()
data = {'file': str(path)}
traceback_start = 0
if lines[0].startswith('Node'):
data['node'] = lines[0].split(': ', 1)[1].strip()
data['node_dir'] = lines[1].split(': ', 1)[1].strip()
inputs = []
cur_key = ''
cur_val = ''
for i, line in enumerate(lines[5:]):
if not line.strip():
continue
if line[0].isspace():
cur_val += line
continue
if cur_val:
inputs.append((cur_key, cur_val.strip()))
if line.startswith("Traceback ("):
traceback_start = i + 5
break
cur_key, cur_val = tuple(line.split(' = ', 1))
data['inputs'] = sorted(inputs)
else:
data['node_dir'] = "Node crashed before execution"
data['traceback'] = '\n'.join(lines[traceback_start:]).strip()
return data | python | def _read_txt(path):
"""Read a txt crashfile
>>> new_path = Path(__file__).resolve().parent.parent
>>> test_data_path = new_path / 'data' / 'tests'
>>> info = _read_txt(test_data_path / 'crashfile.txt')
>>> info['node'] # doctest: +ELLIPSIS
'...func_preproc_task_machinegame_run_02_wf.carpetplot_wf.conf_plot'
>>> info['traceback'] # doctest: +ELLIPSIS
'...ValueError: zero-size array to reduction operation minimum which has no identity'
"""
from pathlib import Path
lines = Path(path).read_text().splitlines()
data = {'file': str(path)}
traceback_start = 0
if lines[0].startswith('Node'):
data['node'] = lines[0].split(': ', 1)[1].strip()
data['node_dir'] = lines[1].split(': ', 1)[1].strip()
inputs = []
cur_key = ''
cur_val = ''
for i, line in enumerate(lines[5:]):
if not line.strip():
continue
if line[0].isspace():
cur_val += line
continue
if cur_val:
inputs.append((cur_key, cur_val.strip()))
if line.startswith("Traceback ("):
traceback_start = i + 5
break
cur_key, cur_val = tuple(line.split(' = ', 1))
data['inputs'] = sorted(inputs)
else:
data['node_dir'] = "Node crashed before execution"
data['traceback'] = '\n'.join(lines[traceback_start:]).strip()
return data | [
"def",
"_read_txt",
"(",
"path",
")",
":",
"from",
"pathlib",
"import",
"Path",
"lines",
"=",
"Path",
"(",
"path",
")",
".",
"read_text",
"(",
")",
".",
"splitlines",
"(",
")",
"data",
"=",
"{",
"'file'",
":",
"str",
"(",
"path",
")",
"}",
"traceba... | Read a txt crashfile
>>> new_path = Path(__file__).resolve().parent.parent
>>> test_data_path = new_path / 'data' / 'tests'
>>> info = _read_txt(test_data_path / 'crashfile.txt')
>>> info['node'] # doctest: +ELLIPSIS
'...func_preproc_task_machinegame_run_02_wf.carpetplot_wf.conf_plot'
>>> info['traceback'] # doctest: +ELLIPSIS
'...ValueError: zero-size array to reduction operation minimum which has no identity' | [
"Read",
"a",
"txt",
"crashfile"
] | 254f4b4fcc5e6ecb29d2f4602a30786b913ecce5 | https://github.com/poldracklab/niworkflows/blob/254f4b4fcc5e6ecb29d2f4602a30786b913ecce5/niworkflows/utils/misc.py#L70-L113 | train | 211,805 |
poldracklab/niworkflows | niworkflows/anat/ants.py | _conform_mask | def _conform_mask(in_mask, in_reference):
"""Ensures the mask headers make sense and match those of the T1w"""
from pathlib import Path
import nibabel as nb
from nipype.utils.filemanip import fname_presuffix
ref = nb.load(in_reference)
nii = nb.load(in_mask)
hdr = nii.header.copy()
hdr.set_data_dtype('int16')
hdr.set_slope_inter(1, 0)
qform, qcode = ref.header.get_qform(coded=True)
if qcode is not None:
hdr.set_qform(qform, int(qcode))
sform, scode = ref.header.get_sform(coded=True)
if scode is not None:
hdr.set_sform(sform, int(scode))
if '_maths' in in_mask: # Cut the name at first _maths occurrence
ext = ''.join(Path(in_mask).suffixes)
basename = Path(in_mask).name
in_mask = basename.split('_maths')[0] + ext
out_file = fname_presuffix(in_mask, suffix='_mask',
newpath=str(Path()))
nii.__class__(nii.get_data().astype('int16'), ref.affine,
hdr).to_filename(out_file)
return out_file | python | def _conform_mask(in_mask, in_reference):
"""Ensures the mask headers make sense and match those of the T1w"""
from pathlib import Path
import nibabel as nb
from nipype.utils.filemanip import fname_presuffix
ref = nb.load(in_reference)
nii = nb.load(in_mask)
hdr = nii.header.copy()
hdr.set_data_dtype('int16')
hdr.set_slope_inter(1, 0)
qform, qcode = ref.header.get_qform(coded=True)
if qcode is not None:
hdr.set_qform(qform, int(qcode))
sform, scode = ref.header.get_sform(coded=True)
if scode is not None:
hdr.set_sform(sform, int(scode))
if '_maths' in in_mask: # Cut the name at first _maths occurrence
ext = ''.join(Path(in_mask).suffixes)
basename = Path(in_mask).name
in_mask = basename.split('_maths')[0] + ext
out_file = fname_presuffix(in_mask, suffix='_mask',
newpath=str(Path()))
nii.__class__(nii.get_data().astype('int16'), ref.affine,
hdr).to_filename(out_file)
return out_file | [
"def",
"_conform_mask",
"(",
"in_mask",
",",
"in_reference",
")",
":",
"from",
"pathlib",
"import",
"Path",
"import",
"nibabel",
"as",
"nb",
"from",
"nipype",
".",
"utils",
".",
"filemanip",
"import",
"fname_presuffix",
"ref",
"=",
"nb",
".",
"load",
"(",
... | Ensures the mask headers make sense and match those of the T1w | [
"Ensures",
"the",
"mask",
"headers",
"make",
"sense",
"and",
"match",
"those",
"of",
"the",
"T1w"
] | 254f4b4fcc5e6ecb29d2f4602a30786b913ecce5 | https://github.com/poldracklab/niworkflows/blob/254f4b4fcc5e6ecb29d2f4602a30786b913ecce5/niworkflows/anat/ants.py#L602-L631 | train | 211,806 |
poldracklab/niworkflows | niworkflows/data/getters.py | get_template | def get_template(template_name, data_dir=None, url=None, resume=True, verbose=1):
"""Download and load a template"""
warn(DEPRECATION_MSG)
if template_name.startswith('tpl-'):
template_name = template_name[4:]
# An aliasing mechanism. Please avoid
template_name = TEMPLATE_ALIASES.get(template_name, template_name)
return get_dataset(template_name, dataset_prefix='tpl-', data_dir=data_dir,
url=url, resume=resume, verbose=verbose) | python | def get_template(template_name, data_dir=None, url=None, resume=True, verbose=1):
"""Download and load a template"""
warn(DEPRECATION_MSG)
if template_name.startswith('tpl-'):
template_name = template_name[4:]
# An aliasing mechanism. Please avoid
template_name = TEMPLATE_ALIASES.get(template_name, template_name)
return get_dataset(template_name, dataset_prefix='tpl-', data_dir=data_dir,
url=url, resume=resume, verbose=verbose) | [
"def",
"get_template",
"(",
"template_name",
",",
"data_dir",
"=",
"None",
",",
"url",
"=",
"None",
",",
"resume",
"=",
"True",
",",
"verbose",
"=",
"1",
")",
":",
"warn",
"(",
"DEPRECATION_MSG",
")",
"if",
"template_name",
".",
"startswith",
"(",
"'tpl-... | Download and load a template | [
"Download",
"and",
"load",
"a",
"template"
] | 254f4b4fcc5e6ecb29d2f4602a30786b913ecce5 | https://github.com/poldracklab/niworkflows/blob/254f4b4fcc5e6ecb29d2f4602a30786b913ecce5/niworkflows/data/getters.py#L69-L78 | train | 211,807 |
poldracklab/niworkflows | niworkflows/data/getters.py | get_bids_examples | def get_bids_examples(data_dir=None, url=None, resume=True, verbose=1,
variant='BIDS-examples-1-1.0.0-rc3u5'):
"""Download BIDS-examples-1"""
warn(DEPRECATION_MSG)
variant = 'BIDS-examples-1-1.0.0-rc3u5' if variant not in BIDS_EXAMPLES else variant
if url is None:
url = BIDS_EXAMPLES[variant][0]
md5 = BIDS_EXAMPLES[variant][1]
return fetch_file(variant, url, data_dir, resume=resume, verbose=verbose,
md5sum=md5) | python | def get_bids_examples(data_dir=None, url=None, resume=True, verbose=1,
variant='BIDS-examples-1-1.0.0-rc3u5'):
"""Download BIDS-examples-1"""
warn(DEPRECATION_MSG)
variant = 'BIDS-examples-1-1.0.0-rc3u5' if variant not in BIDS_EXAMPLES else variant
if url is None:
url = BIDS_EXAMPLES[variant][0]
md5 = BIDS_EXAMPLES[variant][1]
return fetch_file(variant, url, data_dir, resume=resume, verbose=verbose,
md5sum=md5) | [
"def",
"get_bids_examples",
"(",
"data_dir",
"=",
"None",
",",
"url",
"=",
"None",
",",
"resume",
"=",
"True",
",",
"verbose",
"=",
"1",
",",
"variant",
"=",
"'BIDS-examples-1-1.0.0-rc3u5'",
")",
":",
"warn",
"(",
"DEPRECATION_MSG",
")",
"variant",
"=",
"'... | Download BIDS-examples-1 | [
"Download",
"BIDS",
"-",
"examples",
"-",
"1"
] | 254f4b4fcc5e6ecb29d2f4602a30786b913ecce5 | https://github.com/poldracklab/niworkflows/blob/254f4b4fcc5e6ecb29d2f4602a30786b913ecce5/niworkflows/data/getters.py#L107-L116 | train | 211,808 |
poldracklab/niworkflows | niworkflows/viz/utils.py | svg2str | def svg2str(display_object, dpi=300):
"""
Serializes a nilearn display object as a string
"""
from io import StringIO
image_buf = StringIO()
display_object.frame_axes.figure.savefig(
image_buf, dpi=dpi, format='svg',
facecolor='k', edgecolor='k')
return image_buf.getvalue() | python | def svg2str(display_object, dpi=300):
"""
Serializes a nilearn display object as a string
"""
from io import StringIO
image_buf = StringIO()
display_object.frame_axes.figure.savefig(
image_buf, dpi=dpi, format='svg',
facecolor='k', edgecolor='k')
return image_buf.getvalue() | [
"def",
"svg2str",
"(",
"display_object",
",",
"dpi",
"=",
"300",
")",
":",
"from",
"io",
"import",
"StringIO",
"image_buf",
"=",
"StringIO",
"(",
")",
"display_object",
".",
"frame_axes",
".",
"figure",
".",
"savefig",
"(",
"image_buf",
",",
"dpi",
"=",
... | Serializes a nilearn display object as a string | [
"Serializes",
"a",
"nilearn",
"display",
"object",
"as",
"a",
"string"
] | 254f4b4fcc5e6ecb29d2f4602a30786b913ecce5 | https://github.com/poldracklab/niworkflows/blob/254f4b4fcc5e6ecb29d2f4602a30786b913ecce5/niworkflows/viz/utils.py#L164-L173 | train | 211,809 |
poldracklab/niworkflows | niworkflows/viz/utils.py | extract_svg | def extract_svg(display_object, dpi=300, compress='auto'):
"""
Removes the preamble of the svg files generated with nilearn
"""
image_svg = svg2str(display_object, dpi)
if compress is True or compress == 'auto':
image_svg = svg_compress(image_svg, compress)
image_svg = re.sub(' height="[0-9]+[a-z]*"', '', image_svg, count=1)
image_svg = re.sub(' width="[0-9]+[a-z]*"', '', image_svg, count=1)
image_svg = re.sub(' viewBox',
' preseveAspectRation="xMidYMid meet" viewBox',
image_svg, count=1)
start_tag = '<svg '
start_idx = image_svg.find(start_tag)
end_tag = '</svg>'
end_idx = image_svg.rfind(end_tag)
if start_idx is -1 or end_idx is -1:
NIWORKFLOWS_LOG.info('svg tags not found in extract_svg')
# rfind gives the start index of the substr. We want this substr
# included in our return value so we add its length to the index.
end_idx += len(end_tag)
return image_svg[start_idx:end_idx] | python | def extract_svg(display_object, dpi=300, compress='auto'):
"""
Removes the preamble of the svg files generated with nilearn
"""
image_svg = svg2str(display_object, dpi)
if compress is True or compress == 'auto':
image_svg = svg_compress(image_svg, compress)
image_svg = re.sub(' height="[0-9]+[a-z]*"', '', image_svg, count=1)
image_svg = re.sub(' width="[0-9]+[a-z]*"', '', image_svg, count=1)
image_svg = re.sub(' viewBox',
' preseveAspectRation="xMidYMid meet" viewBox',
image_svg, count=1)
start_tag = '<svg '
start_idx = image_svg.find(start_tag)
end_tag = '</svg>'
end_idx = image_svg.rfind(end_tag)
if start_idx is -1 or end_idx is -1:
NIWORKFLOWS_LOG.info('svg tags not found in extract_svg')
# rfind gives the start index of the substr. We want this substr
# included in our return value so we add its length to the index.
end_idx += len(end_tag)
return image_svg[start_idx:end_idx] | [
"def",
"extract_svg",
"(",
"display_object",
",",
"dpi",
"=",
"300",
",",
"compress",
"=",
"'auto'",
")",
":",
"image_svg",
"=",
"svg2str",
"(",
"display_object",
",",
"dpi",
")",
"if",
"compress",
"is",
"True",
"or",
"compress",
"==",
"'auto'",
":",
"im... | Removes the preamble of the svg files generated with nilearn | [
"Removes",
"the",
"preamble",
"of",
"the",
"svg",
"files",
"generated",
"with",
"nilearn"
] | 254f4b4fcc5e6ecb29d2f4602a30786b913ecce5 | https://github.com/poldracklab/niworkflows/blob/254f4b4fcc5e6ecb29d2f4602a30786b913ecce5/niworkflows/viz/utils.py#L176-L197 | train | 211,810 |
poldracklab/niworkflows | niworkflows/viz/utils.py | cuts_from_bbox | def cuts_from_bbox(mask_nii, cuts=3):
"""Finds equi-spaced cuts for presenting images"""
from nibabel.affines import apply_affine
mask_data = mask_nii.get_data() > 0.0
# First, project the number of masked voxels on each axes
ijk_counts = [
mask_data.sum(2).sum(1), # project sagittal planes to transverse (i) axis
mask_data.sum(2).sum(0), # project coronal planes to to longitudinal (j) axis
mask_data.sum(1).sum(0), # project axial planes to vertical (k) axis
]
# If all voxels are masked in a slice (say that happens at k=10),
# then the value for ijk_counts for the projection to k (ie. ijk_counts[2])
# at that element of the orthogonal axes (ijk_counts[2][10]) is
# the total number of voxels in that slice (ie. Ni x Nj).
# Here we define some thresholds to consider the plane as "masked"
# The thresholds vary because of the shape of the brain
# I have manually found that for the axial view requiring 30%
# of the slice elements to be masked drops almost empty boxes
# in the mosaic of axial planes (and also addresses #281)
ijk_th = [
int((mask_data.shape[1] * mask_data.shape[2]) * 0.2), # sagittal
int((mask_data.shape[0] * mask_data.shape[2]) * 0.0), # coronal
int((mask_data.shape[0] * mask_data.shape[1]) * 0.3), # axial
]
vox_coords = []
for ax, (c, th) in enumerate(zip(ijk_counts, ijk_th)):
B = np.argwhere(c > th)
if B.size:
smin, smax = B.min(), B.max()
# Avoid too narrow selections of cuts (very small masks)
if not B.size or (th > 0 and (smin + cuts + 1) >= smax):
B = np.argwhere(c > 0)
# Resort to full plane if mask is seemingly empty
smin, smax = B.min(), B.max() if B.size else (0, mask_data.shape[ax])
inc = (smax - smin) / (cuts + 1)
vox_coords.append([smin + (i + 1) * inc for i in range(cuts)])
ras_coords = []
for cross in np.array(vox_coords).T:
ras_coords.append(apply_affine(
mask_nii.affine, cross).tolist())
ras_cuts = [list(coords) for coords in np.transpose(ras_coords)]
return {k: v for k, v in zip(['x', 'y', 'z'], ras_cuts)} | python | def cuts_from_bbox(mask_nii, cuts=3):
"""Finds equi-spaced cuts for presenting images"""
from nibabel.affines import apply_affine
mask_data = mask_nii.get_data() > 0.0
# First, project the number of masked voxels on each axes
ijk_counts = [
mask_data.sum(2).sum(1), # project sagittal planes to transverse (i) axis
mask_data.sum(2).sum(0), # project coronal planes to to longitudinal (j) axis
mask_data.sum(1).sum(0), # project axial planes to vertical (k) axis
]
# If all voxels are masked in a slice (say that happens at k=10),
# then the value for ijk_counts for the projection to k (ie. ijk_counts[2])
# at that element of the orthogonal axes (ijk_counts[2][10]) is
# the total number of voxels in that slice (ie. Ni x Nj).
# Here we define some thresholds to consider the plane as "masked"
# The thresholds vary because of the shape of the brain
# I have manually found that for the axial view requiring 30%
# of the slice elements to be masked drops almost empty boxes
# in the mosaic of axial planes (and also addresses #281)
ijk_th = [
int((mask_data.shape[1] * mask_data.shape[2]) * 0.2), # sagittal
int((mask_data.shape[0] * mask_data.shape[2]) * 0.0), # coronal
int((mask_data.shape[0] * mask_data.shape[1]) * 0.3), # axial
]
vox_coords = []
for ax, (c, th) in enumerate(zip(ijk_counts, ijk_th)):
B = np.argwhere(c > th)
if B.size:
smin, smax = B.min(), B.max()
# Avoid too narrow selections of cuts (very small masks)
if not B.size or (th > 0 and (smin + cuts + 1) >= smax):
B = np.argwhere(c > 0)
# Resort to full plane if mask is seemingly empty
smin, smax = B.min(), B.max() if B.size else (0, mask_data.shape[ax])
inc = (smax - smin) / (cuts + 1)
vox_coords.append([smin + (i + 1) * inc for i in range(cuts)])
ras_coords = []
for cross in np.array(vox_coords).T:
ras_coords.append(apply_affine(
mask_nii.affine, cross).tolist())
ras_cuts = [list(coords) for coords in np.transpose(ras_coords)]
return {k: v for k, v in zip(['x', 'y', 'z'], ras_cuts)} | [
"def",
"cuts_from_bbox",
"(",
"mask_nii",
",",
"cuts",
"=",
"3",
")",
":",
"from",
"nibabel",
".",
"affines",
"import",
"apply_affine",
"mask_data",
"=",
"mask_nii",
".",
"get_data",
"(",
")",
">",
"0.0",
"# First, project the number of masked voxels on each axes",
... | Finds equi-spaced cuts for presenting images | [
"Finds",
"equi",
"-",
"spaced",
"cuts",
"for",
"presenting",
"images"
] | 254f4b4fcc5e6ecb29d2f4602a30786b913ecce5 | https://github.com/poldracklab/niworkflows/blob/254f4b4fcc5e6ecb29d2f4602a30786b913ecce5/niworkflows/viz/utils.py#L200-L248 | train | 211,811 |
poldracklab/niworkflows | niworkflows/viz/utils.py | _3d_in_file | def _3d_in_file(in_file):
''' if self.inputs.in_file is 3d, return it.
if 4d, pick an arbitrary volume and return that.
if in_file is a list of files, return an arbitrary file from
the list, and an arbitrary volume from that file
'''
in_file = filemanip.filename_to_list(in_file)[0]
try:
in_file = nb.load(in_file)
except AttributeError:
in_file = in_file
if in_file.get_data().ndim == 3:
return in_file
return nlimage.index_img(in_file, 0) | python | def _3d_in_file(in_file):
''' if self.inputs.in_file is 3d, return it.
if 4d, pick an arbitrary volume and return that.
if in_file is a list of files, return an arbitrary file from
the list, and an arbitrary volume from that file
'''
in_file = filemanip.filename_to_list(in_file)[0]
try:
in_file = nb.load(in_file)
except AttributeError:
in_file = in_file
if in_file.get_data().ndim == 3:
return in_file
return nlimage.index_img(in_file, 0) | [
"def",
"_3d_in_file",
"(",
"in_file",
")",
":",
"in_file",
"=",
"filemanip",
".",
"filename_to_list",
"(",
"in_file",
")",
"[",
"0",
"]",
"try",
":",
"in_file",
"=",
"nb",
".",
"load",
"(",
"in_file",
")",
"except",
"AttributeError",
":",
"in_file",
"=",... | if self.inputs.in_file is 3d, return it.
if 4d, pick an arbitrary volume and return that.
if in_file is a list of files, return an arbitrary file from
the list, and an arbitrary volume from that file | [
"if",
"self",
".",
"inputs",
".",
"in_file",
"is",
"3d",
"return",
"it",
".",
"if",
"4d",
"pick",
"an",
"arbitrary",
"volume",
"and",
"return",
"that",
"."
] | 254f4b4fcc5e6ecb29d2f4602a30786b913ecce5 | https://github.com/poldracklab/niworkflows/blob/254f4b4fcc5e6ecb29d2f4602a30786b913ecce5/niworkflows/viz/utils.py#L251-L269 | train | 211,812 |
poldracklab/niworkflows | niworkflows/viz/utils.py | transform_to_2d | def transform_to_2d(data, max_axis):
"""
Projects 3d data cube along one axis using maximum intensity with
preservation of the signs. Adapted from nilearn.
"""
import numpy as np
# get the shape of the array we are projecting to
new_shape = list(data.shape)
del new_shape[max_axis]
# generate a 3D indexing array that points to max abs value in the
# current projection
a1, a2 = np.indices(new_shape)
inds = [a1, a2]
inds.insert(max_axis, np.abs(data).argmax(axis=max_axis))
# take the values where the absolute value of the projection
# is the highest
maximum_intensity_data = data[inds]
return np.rot90(maximum_intensity_data) | python | def transform_to_2d(data, max_axis):
"""
Projects 3d data cube along one axis using maximum intensity with
preservation of the signs. Adapted from nilearn.
"""
import numpy as np
# get the shape of the array we are projecting to
new_shape = list(data.shape)
del new_shape[max_axis]
# generate a 3D indexing array that points to max abs value in the
# current projection
a1, a2 = np.indices(new_shape)
inds = [a1, a2]
inds.insert(max_axis, np.abs(data).argmax(axis=max_axis))
# take the values where the absolute value of the projection
# is the highest
maximum_intensity_data = data[inds]
return np.rot90(maximum_intensity_data) | [
"def",
"transform_to_2d",
"(",
"data",
",",
"max_axis",
")",
":",
"import",
"numpy",
"as",
"np",
"# get the shape of the array we are projecting to",
"new_shape",
"=",
"list",
"(",
"data",
".",
"shape",
")",
"del",
"new_shape",
"[",
"max_axis",
"]",
"# generate a ... | Projects 3d data cube along one axis using maximum intensity with
preservation of the signs. Adapted from nilearn. | [
"Projects",
"3d",
"data",
"cube",
"along",
"one",
"axis",
"using",
"maximum",
"intensity",
"with",
"preservation",
"of",
"the",
"signs",
".",
"Adapted",
"from",
"nilearn",
"."
] | 254f4b4fcc5e6ecb29d2f4602a30786b913ecce5 | https://github.com/poldracklab/niworkflows/blob/254f4b4fcc5e6ecb29d2f4602a30786b913ecce5/niworkflows/viz/utils.py#L496-L516 | train | 211,813 |
poldracklab/niworkflows | setup.py | main | def main():
""" Install entry-point """
from os import path as op
from inspect import getfile, currentframe
from setuptools import setup, find_packages
from niworkflows.__about__ import (
__packagename__,
__author__,
__email__,
__maintainer__,
__license__,
__description__,
__longdesc__,
__url__,
DOWNLOAD_URL,
CLASSIFIERS,
REQUIRES,
SETUP_REQUIRES,
LINKS_REQUIRES,
TESTS_REQUIRES,
EXTRA_REQUIRES,
)
pkg_data = {
'niworkflows': [
'data/t1-mni_registration*.json',
'data/bold-mni_registration*.json',
'reports/figures.json',
'reports/fmriprep.yml',
'reports/report.tpl',
]}
root_dir = op.dirname(op.abspath(getfile(currentframe())))
version = None
cmdclass = {}
if op.isfile(op.join(root_dir, __packagename__, 'VERSION')):
with open(op.join(root_dir, __packagename__, 'VERSION')) as vfile:
version = vfile.readline().strip()
pkg_data[__packagename__].insert(0, 'VERSION')
if version is None:
import versioneer
version = versioneer.get_version()
cmdclass = versioneer.get_cmdclass()
setup(
name=__packagename__,
version=version,
description=__description__,
long_description=__longdesc__,
author=__author__,
author_email=__email__,
maintainer=__maintainer__,
maintainer_email=__email__,
license=__license__,
url=__url__,
download_url=DOWNLOAD_URL,
classifiers=CLASSIFIERS,
packages=find_packages(exclude=['*.tests']),
zip_safe=False,
# Dependencies handling
setup_requires=SETUP_REQUIRES,
install_requires=list(set(REQUIRES)),
dependency_links=LINKS_REQUIRES,
tests_require=TESTS_REQUIRES,
extras_require=EXTRA_REQUIRES,
# Data
package_data=pkg_data,
include_package_data=True,
cmdclass=cmdclass,
) | python | def main():
""" Install entry-point """
from os import path as op
from inspect import getfile, currentframe
from setuptools import setup, find_packages
from niworkflows.__about__ import (
__packagename__,
__author__,
__email__,
__maintainer__,
__license__,
__description__,
__longdesc__,
__url__,
DOWNLOAD_URL,
CLASSIFIERS,
REQUIRES,
SETUP_REQUIRES,
LINKS_REQUIRES,
TESTS_REQUIRES,
EXTRA_REQUIRES,
)
pkg_data = {
'niworkflows': [
'data/t1-mni_registration*.json',
'data/bold-mni_registration*.json',
'reports/figures.json',
'reports/fmriprep.yml',
'reports/report.tpl',
]}
root_dir = op.dirname(op.abspath(getfile(currentframe())))
version = None
cmdclass = {}
if op.isfile(op.join(root_dir, __packagename__, 'VERSION')):
with open(op.join(root_dir, __packagename__, 'VERSION')) as vfile:
version = vfile.readline().strip()
pkg_data[__packagename__].insert(0, 'VERSION')
if version is None:
import versioneer
version = versioneer.get_version()
cmdclass = versioneer.get_cmdclass()
setup(
name=__packagename__,
version=version,
description=__description__,
long_description=__longdesc__,
author=__author__,
author_email=__email__,
maintainer=__maintainer__,
maintainer_email=__email__,
license=__license__,
url=__url__,
download_url=DOWNLOAD_URL,
classifiers=CLASSIFIERS,
packages=find_packages(exclude=['*.tests']),
zip_safe=False,
# Dependencies handling
setup_requires=SETUP_REQUIRES,
install_requires=list(set(REQUIRES)),
dependency_links=LINKS_REQUIRES,
tests_require=TESTS_REQUIRES,
extras_require=EXTRA_REQUIRES,
# Data
package_data=pkg_data,
include_package_data=True,
cmdclass=cmdclass,
) | [
"def",
"main",
"(",
")",
":",
"from",
"os",
"import",
"path",
"as",
"op",
"from",
"inspect",
"import",
"getfile",
",",
"currentframe",
"from",
"setuptools",
"import",
"setup",
",",
"find_packages",
"from",
"niworkflows",
".",
"__about__",
"import",
"(",
"__p... | Install entry-point | [
"Install",
"entry",
"-",
"point"
] | 254f4b4fcc5e6ecb29d2f4602a30786b913ecce5 | https://github.com/poldracklab/niworkflows/blob/254f4b4fcc5e6ecb29d2f4602a30786b913ecce5/setup.py#L9-L79 | train | 211,814 |
poldracklab/niworkflows | niworkflows/anat/skullstrip.py | afni_wf | def afni_wf(name='AFNISkullStripWorkflow', unifize=False, n4_nthreads=1):
"""
Skull-stripping workflow
Originally derived from the `codebase of the
QAP <https://github.com/preprocessed-connectomes-project/\
quality-assessment-protocol/blob/master/qap/anatomical_preproc.py#L105>`_.
Now, this workflow includes :abbr:`INU (intensity non-uniformity)` correction
using the N4 algorithm and (optionally) intensity harmonization using
ANFI's ``3dUnifize``.
"""
workflow = pe.Workflow(name=name)
inputnode = pe.Node(niu.IdentityInterface(fields=['in_file']),
name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(
fields=['bias_corrected', 'out_file', 'out_mask', 'bias_image']), name='outputnode')
inu_n4 = pe.Node(
ants.N4BiasFieldCorrection(dimension=3, save_bias=True, num_threads=n4_nthreads,
copy_header=True),
n_procs=n4_nthreads,
name='inu_n4')
sstrip = pe.Node(afni.SkullStrip(outputtype='NIFTI_GZ'), name='skullstrip')
sstrip_orig_vol = pe.Node(afni.Calc(
expr='a*step(b)', outputtype='NIFTI_GZ'), name='sstrip_orig_vol')
binarize = pe.Node(fsl.Threshold(args='-bin', thresh=1.e-3), name='binarize')
if unifize:
# Add two unifize steps, pre- and post- skullstripping.
inu_uni_0 = pe.Node(afni.Unifize(outputtype='NIFTI_GZ'),
name='unifize_pre_skullstrip')
inu_uni_1 = pe.Node(afni.Unifize(gm=True, outputtype='NIFTI_GZ'),
name='unifize_post_skullstrip')
workflow.connect([
(inu_n4, inu_uni_0, [('output_image', 'in_file')]),
(inu_uni_0, sstrip, [('out_file', 'in_file')]),
(inu_uni_0, sstrip_orig_vol, [('out_file', 'in_file_a')]),
(sstrip_orig_vol, inu_uni_1, [('out_file', 'in_file')]),
(inu_uni_1, outputnode, [('out_file', 'out_file')]),
(inu_uni_0, outputnode, [('out_file', 'bias_corrected')]),
])
else:
workflow.connect([
(inputnode, sstrip_orig_vol, [('in_file', 'in_file_a')]),
(inu_n4, sstrip, [('output_image', 'in_file')]),
(sstrip_orig_vol, outputnode, [('out_file', 'out_file')]),
(inu_n4, outputnode, [('output_image', 'bias_corrected')]),
])
# Remaining connections
workflow.connect([
(sstrip, sstrip_orig_vol, [('out_file', 'in_file_b')]),
(inputnode, inu_n4, [('in_file', 'input_image')]),
(sstrip_orig_vol, binarize, [('out_file', 'in_file')]),
(binarize, outputnode, [('out_file', 'out_mask')]),
(inu_n4, outputnode, [('bias_image', 'bias_image')]),
])
return workflow | python | def afni_wf(name='AFNISkullStripWorkflow', unifize=False, n4_nthreads=1):
"""
Skull-stripping workflow
Originally derived from the `codebase of the
QAP <https://github.com/preprocessed-connectomes-project/\
quality-assessment-protocol/blob/master/qap/anatomical_preproc.py#L105>`_.
Now, this workflow includes :abbr:`INU (intensity non-uniformity)` correction
using the N4 algorithm and (optionally) intensity harmonization using
ANFI's ``3dUnifize``.
"""
workflow = pe.Workflow(name=name)
inputnode = pe.Node(niu.IdentityInterface(fields=['in_file']),
name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(
fields=['bias_corrected', 'out_file', 'out_mask', 'bias_image']), name='outputnode')
inu_n4 = pe.Node(
ants.N4BiasFieldCorrection(dimension=3, save_bias=True, num_threads=n4_nthreads,
copy_header=True),
n_procs=n4_nthreads,
name='inu_n4')
sstrip = pe.Node(afni.SkullStrip(outputtype='NIFTI_GZ'), name='skullstrip')
sstrip_orig_vol = pe.Node(afni.Calc(
expr='a*step(b)', outputtype='NIFTI_GZ'), name='sstrip_orig_vol')
binarize = pe.Node(fsl.Threshold(args='-bin', thresh=1.e-3), name='binarize')
if unifize:
# Add two unifize steps, pre- and post- skullstripping.
inu_uni_0 = pe.Node(afni.Unifize(outputtype='NIFTI_GZ'),
name='unifize_pre_skullstrip')
inu_uni_1 = pe.Node(afni.Unifize(gm=True, outputtype='NIFTI_GZ'),
name='unifize_post_skullstrip')
workflow.connect([
(inu_n4, inu_uni_0, [('output_image', 'in_file')]),
(inu_uni_0, sstrip, [('out_file', 'in_file')]),
(inu_uni_0, sstrip_orig_vol, [('out_file', 'in_file_a')]),
(sstrip_orig_vol, inu_uni_1, [('out_file', 'in_file')]),
(inu_uni_1, outputnode, [('out_file', 'out_file')]),
(inu_uni_0, outputnode, [('out_file', 'bias_corrected')]),
])
else:
workflow.connect([
(inputnode, sstrip_orig_vol, [('in_file', 'in_file_a')]),
(inu_n4, sstrip, [('output_image', 'in_file')]),
(sstrip_orig_vol, outputnode, [('out_file', 'out_file')]),
(inu_n4, outputnode, [('output_image', 'bias_corrected')]),
])
# Remaining connections
workflow.connect([
(sstrip, sstrip_orig_vol, [('out_file', 'in_file_b')]),
(inputnode, inu_n4, [('in_file', 'input_image')]),
(sstrip_orig_vol, binarize, [('out_file', 'in_file')]),
(binarize, outputnode, [('out_file', 'out_mask')]),
(inu_n4, outputnode, [('bias_image', 'bias_image')]),
])
return workflow | [
"def",
"afni_wf",
"(",
"name",
"=",
"'AFNISkullStripWorkflow'",
",",
"unifize",
"=",
"False",
",",
"n4_nthreads",
"=",
"1",
")",
":",
"workflow",
"=",
"pe",
".",
"Workflow",
"(",
"name",
"=",
"name",
")",
"inputnode",
"=",
"pe",
".",
"Node",
"(",
"niu"... | Skull-stripping workflow
Originally derived from the `codebase of the
QAP <https://github.com/preprocessed-connectomes-project/\
quality-assessment-protocol/blob/master/qap/anatomical_preproc.py#L105>`_.
Now, this workflow includes :abbr:`INU (intensity non-uniformity)` correction
using the N4 algorithm and (optionally) intensity harmonization using
ANFI's ``3dUnifize``. | [
"Skull",
"-",
"stripping",
"workflow"
] | 254f4b4fcc5e6ecb29d2f4602a30786b913ecce5 | https://github.com/poldracklab/niworkflows/blob/254f4b4fcc5e6ecb29d2f4602a30786b913ecce5/niworkflows/anat/skullstrip.py#L9-L70 | train | 211,815 |
mLewisLogic/foursquare | foursquare/__init__.py | _get | def _get(url, headers={}, params=None):
"""Tries to GET data from an endpoint using retries"""
param_string = _foursquare_urlencode(params)
for i in xrange(NUM_REQUEST_RETRIES):
try:
try:
response = requests.get(url, headers=headers, params=param_string, verify=VERIFY_SSL)
return _process_response(response)
except requests.exceptions.RequestException as e:
_log_and_raise_exception('Error connecting with foursquare API', e)
except FoursquareException as e:
# Some errors don't bear repeating
if e.__class__ in [InvalidAuth, ParamError, EndpointError, NotAuthorized, Deprecated]: raise
# If we've reached our last try, re-raise
if ((i + 1) == NUM_REQUEST_RETRIES): raise
time.sleep(1) | python | def _get(url, headers={}, params=None):
"""Tries to GET data from an endpoint using retries"""
param_string = _foursquare_urlencode(params)
for i in xrange(NUM_REQUEST_RETRIES):
try:
try:
response = requests.get(url, headers=headers, params=param_string, verify=VERIFY_SSL)
return _process_response(response)
except requests.exceptions.RequestException as e:
_log_and_raise_exception('Error connecting with foursquare API', e)
except FoursquareException as e:
# Some errors don't bear repeating
if e.__class__ in [InvalidAuth, ParamError, EndpointError, NotAuthorized, Deprecated]: raise
# If we've reached our last try, re-raise
if ((i + 1) == NUM_REQUEST_RETRIES): raise
time.sleep(1) | [
"def",
"_get",
"(",
"url",
",",
"headers",
"=",
"{",
"}",
",",
"params",
"=",
"None",
")",
":",
"param_string",
"=",
"_foursquare_urlencode",
"(",
"params",
")",
"for",
"i",
"in",
"xrange",
"(",
"NUM_REQUEST_RETRIES",
")",
":",
"try",
":",
"try",
":",
... | Tries to GET data from an endpoint using retries | [
"Tries",
"to",
"GET",
"data",
"from",
"an",
"endpoint",
"using",
"retries"
] | 420f3b588b9af154688ec82649f24a70f96c1665 | https://github.com/mLewisLogic/foursquare/blob/420f3b588b9af154688ec82649f24a70f96c1665/foursquare/__init__.py#L751-L766 | train | 211,816 |
mLewisLogic/foursquare | foursquare/__init__.py | _post | def _post(url, headers={}, data=None, files=None):
"""Tries to POST data to an endpoint"""
try:
response = requests.post(url, headers=headers, data=data, files=files, verify=VERIFY_SSL)
return _process_response(response)
except requests.exceptions.RequestException as e:
_log_and_raise_exception('Error connecting with foursquare API', e) | python | def _post(url, headers={}, data=None, files=None):
"""Tries to POST data to an endpoint"""
try:
response = requests.post(url, headers=headers, data=data, files=files, verify=VERIFY_SSL)
return _process_response(response)
except requests.exceptions.RequestException as e:
_log_and_raise_exception('Error connecting with foursquare API', e) | [
"def",
"_post",
"(",
"url",
",",
"headers",
"=",
"{",
"}",
",",
"data",
"=",
"None",
",",
"files",
"=",
"None",
")",
":",
"try",
":",
"response",
"=",
"requests",
".",
"post",
"(",
"url",
",",
"headers",
"=",
"headers",
",",
"data",
"=",
"data",
... | Tries to POST data to an endpoint | [
"Tries",
"to",
"POST",
"data",
"to",
"an",
"endpoint"
] | 420f3b588b9af154688ec82649f24a70f96c1665 | https://github.com/mLewisLogic/foursquare/blob/420f3b588b9af154688ec82649f24a70f96c1665/foursquare/__init__.py#L768-L774 | train | 211,817 |
mLewisLogic/foursquare | foursquare/__init__.py | _process_response | def _process_response(response):
"""Make the request and handle exception processing"""
# Read the response as JSON
try:
data = response.json()
except ValueError:
_log_and_raise_exception('Invalid response', response.text)
# Default case, Got proper response
if response.status_code == 200:
return { 'headers': response.headers, 'data': data }
return _raise_error_from_response(data) | python | def _process_response(response):
"""Make the request and handle exception processing"""
# Read the response as JSON
try:
data = response.json()
except ValueError:
_log_and_raise_exception('Invalid response', response.text)
# Default case, Got proper response
if response.status_code == 200:
return { 'headers': response.headers, 'data': data }
return _raise_error_from_response(data) | [
"def",
"_process_response",
"(",
"response",
")",
":",
"# Read the response as JSON",
"try",
":",
"data",
"=",
"response",
".",
"json",
"(",
")",
"except",
"ValueError",
":",
"_log_and_raise_exception",
"(",
"'Invalid response'",
",",
"response",
".",
"text",
")",... | Make the request and handle exception processing | [
"Make",
"the",
"request",
"and",
"handle",
"exception",
"processing"
] | 420f3b588b9af154688ec82649f24a70f96c1665 | https://github.com/mLewisLogic/foursquare/blob/420f3b588b9af154688ec82649f24a70f96c1665/foursquare/__init__.py#L776-L787 | train | 211,818 |
mLewisLogic/foursquare | foursquare/__init__.py | _raise_error_from_response | def _raise_error_from_response(data):
"""Processes the response data"""
# Check the meta-data for why this request failed
meta = data.get('meta')
if meta:
# Account for foursquare conflicts
# see: https://developer.foursquare.com/overview/responses
if meta.get('code') in (200, 409): return data
exc = error_types.get(meta.get('errorType'))
if exc:
raise exc(meta.get('errorDetail'))
else:
_log_and_raise_exception('Unknown error. meta', meta)
else:
_log_and_raise_exception('Response format invalid, missing meta property. data', data) | python | def _raise_error_from_response(data):
"""Processes the response data"""
# Check the meta-data for why this request failed
meta = data.get('meta')
if meta:
# Account for foursquare conflicts
# see: https://developer.foursquare.com/overview/responses
if meta.get('code') in (200, 409): return data
exc = error_types.get(meta.get('errorType'))
if exc:
raise exc(meta.get('errorDetail'))
else:
_log_and_raise_exception('Unknown error. meta', meta)
else:
_log_and_raise_exception('Response format invalid, missing meta property. data', data) | [
"def",
"_raise_error_from_response",
"(",
"data",
")",
":",
"# Check the meta-data for why this request failed",
"meta",
"=",
"data",
".",
"get",
"(",
"'meta'",
")",
"if",
"meta",
":",
"# Account for foursquare conflicts",
"# see: https://developer.foursquare.com/overview/respo... | Processes the response data | [
"Processes",
"the",
"response",
"data"
] | 420f3b588b9af154688ec82649f24a70f96c1665 | https://github.com/mLewisLogic/foursquare/blob/420f3b588b9af154688ec82649f24a70f96c1665/foursquare/__init__.py#L789-L803 | train | 211,819 |
mLewisLogic/foursquare | foursquare/__init__.py | _foursquare_urlencode | def _foursquare_urlencode(query, doseq=0, safe_chars="&/,+"):
"""Gnarly hack because Foursquare doesn't properly handle standard url encoding"""
# Original doc: http://docs.python.org/2/library/urllib.html#urllib.urlencode
# Works the same way as urllib.urlencode except two differences -
# 1. it uses `quote()` instead of `quote_plus()`
# 2. it takes an extra parameter called `safe_chars` which is a string
# having the characters which should not be encoded.
#
# Courtesy of github.com/iambibhas
if hasattr(query,"items"):
# mapping objects
query = query.items()
else:
# it's a bother at times that strings and string-like objects are
# sequences...
try:
# non-sequence items should not work with len()
# non-empty strings will fail this
if len(query) and not isinstance(query[0], tuple):
raise TypeError
# zero-length sequences of all types will get here and succeed,
# but that's a minor nit - since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty,va,tb = sys.exc_info()
raise TypeError("not a valid non-string sequence or mapping object").with_traceback(tb)
l = []
if not doseq:
# preserve old behavior
for k, v in query:
k = parse.quote(_as_utf8(k), safe=safe_chars)
v = parse.quote(_as_utf8(v), safe=safe_chars)
l.append(k + '=' + v)
else:
for k, v in query:
k = parse.quote(_as_utf8(k), safe=safe_chars)
if isinstance(v, six.string_types):
v = parse.quote(_as_utf8(v), safe=safe_chars)
l.append(k + '=' + v)
else:
try:
# is this a sufficient test for sequence-ness?
len(v)
except TypeError:
# not a sequence
v = parse.quote(_as_utf8(v), safe=safe_chars)
l.append(k + '=' + v)
else:
# loop over the sequence
for elt in v:
l.append(k + '=' + parse.quote(_as_utf8(elt)))
return '&'.join(l) | python | def _foursquare_urlencode(query, doseq=0, safe_chars="&/,+"):
"""Gnarly hack because Foursquare doesn't properly handle standard url encoding"""
# Original doc: http://docs.python.org/2/library/urllib.html#urllib.urlencode
# Works the same way as urllib.urlencode except two differences -
# 1. it uses `quote()` instead of `quote_plus()`
# 2. it takes an extra parameter called `safe_chars` which is a string
# having the characters which should not be encoded.
#
# Courtesy of github.com/iambibhas
if hasattr(query,"items"):
# mapping objects
query = query.items()
else:
# it's a bother at times that strings and string-like objects are
# sequences...
try:
# non-sequence items should not work with len()
# non-empty strings will fail this
if len(query) and not isinstance(query[0], tuple):
raise TypeError
# zero-length sequences of all types will get here and succeed,
# but that's a minor nit - since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty,va,tb = sys.exc_info()
raise TypeError("not a valid non-string sequence or mapping object").with_traceback(tb)
l = []
if not doseq:
# preserve old behavior
for k, v in query:
k = parse.quote(_as_utf8(k), safe=safe_chars)
v = parse.quote(_as_utf8(v), safe=safe_chars)
l.append(k + '=' + v)
else:
for k, v in query:
k = parse.quote(_as_utf8(k), safe=safe_chars)
if isinstance(v, six.string_types):
v = parse.quote(_as_utf8(v), safe=safe_chars)
l.append(k + '=' + v)
else:
try:
# is this a sufficient test for sequence-ness?
len(v)
except TypeError:
# not a sequence
v = parse.quote(_as_utf8(v), safe=safe_chars)
l.append(k + '=' + v)
else:
# loop over the sequence
for elt in v:
l.append(k + '=' + parse.quote(_as_utf8(elt)))
return '&'.join(l) | [
"def",
"_foursquare_urlencode",
"(",
"query",
",",
"doseq",
"=",
"0",
",",
"safe_chars",
"=",
"\"&/,+\"",
")",
":",
"# Original doc: http://docs.python.org/2/library/urllib.html#urllib.urlencode",
"# Works the same way as urllib.urlencode except two differences -",
"# 1. it uses `quo... | Gnarly hack because Foursquare doesn't properly handle standard url encoding | [
"Gnarly",
"hack",
"because",
"Foursquare",
"doesn",
"t",
"properly",
"handle",
"standard",
"url",
"encoding"
] | 420f3b588b9af154688ec82649f24a70f96c1665 | https://github.com/mLewisLogic/foursquare/blob/420f3b588b9af154688ec82649f24a70f96c1665/foursquare/__init__.py#L811-L864 | train | 211,820 |
mLewisLogic/foursquare | foursquare/__init__.py | Foursquare._attach_endpoints | def _attach_endpoints(self):
"""Dynamically attach endpoint callables to this client"""
for name, endpoint in inspect.getmembers(self):
if inspect.isclass(endpoint) and issubclass(endpoint, self._Endpoint) and (endpoint is not self._Endpoint):
endpoint_instance = endpoint(self.base_requester)
setattr(self, endpoint_instance.endpoint, endpoint_instance) | python | def _attach_endpoints(self):
"""Dynamically attach endpoint callables to this client"""
for name, endpoint in inspect.getmembers(self):
if inspect.isclass(endpoint) and issubclass(endpoint, self._Endpoint) and (endpoint is not self._Endpoint):
endpoint_instance = endpoint(self.base_requester)
setattr(self, endpoint_instance.endpoint, endpoint_instance) | [
"def",
"_attach_endpoints",
"(",
"self",
")",
":",
"for",
"name",
",",
"endpoint",
"in",
"inspect",
".",
"getmembers",
"(",
"self",
")",
":",
"if",
"inspect",
".",
"isclass",
"(",
"endpoint",
")",
"and",
"issubclass",
"(",
"endpoint",
",",
"self",
".",
... | Dynamically attach endpoint callables to this client | [
"Dynamically",
"attach",
"endpoint",
"callables",
"to",
"this",
"client"
] | 420f3b588b9af154688ec82649f24a70f96c1665 | https://github.com/mLewisLogic/foursquare/blob/420f3b588b9af154688ec82649f24a70f96c1665/foursquare/__init__.py#L118-L123 | train | 211,821 |
talpor/django-dashing | dashing/utils.py | Router.register | def register(self, widget, basename, **parameters):
""" Register a widget, URL basename and any optional URL parameters.
Parameters are passed as keyword arguments, i.e.
>>> router.register(MyWidget, 'mywidget', my_parameter="[A-Z0-9]+")
This would be the equivalent of manually adding the following
to urlpatterns:
>>> url(r"^widgets/mywidget/(P<my_parameter>[A-Z0-9]+)/?",
MyWidget.as_view(), "widget_mywidget")
"""
self.registry.append((widget, basename, parameters)) | python | def register(self, widget, basename, **parameters):
""" Register a widget, URL basename and any optional URL parameters.
Parameters are passed as keyword arguments, i.e.
>>> router.register(MyWidget, 'mywidget', my_parameter="[A-Z0-9]+")
This would be the equivalent of manually adding the following
to urlpatterns:
>>> url(r"^widgets/mywidget/(P<my_parameter>[A-Z0-9]+)/?",
MyWidget.as_view(), "widget_mywidget")
"""
self.registry.append((widget, basename, parameters)) | [
"def",
"register",
"(",
"self",
",",
"widget",
",",
"basename",
",",
"*",
"*",
"parameters",
")",
":",
"self",
".",
"registry",
".",
"append",
"(",
"(",
"widget",
",",
"basename",
",",
"parameters",
")",
")"
] | Register a widget, URL basename and any optional URL parameters.
Parameters are passed as keyword arguments, i.e.
>>> router.register(MyWidget, 'mywidget', my_parameter="[A-Z0-9]+")
This would be the equivalent of manually adding the following
to urlpatterns:
>>> url(r"^widgets/mywidget/(P<my_parameter>[A-Z0-9]+)/?",
MyWidget.as_view(), "widget_mywidget") | [
"Register",
"a",
"widget",
"URL",
"basename",
"and",
"any",
"optional",
"URL",
"parameters",
"."
] | 1edb9ac5d7b7079f079a1e85552bfdfc5e1a93f6 | https://github.com/talpor/django-dashing/blob/1edb9ac5d7b7079f079a1e85552bfdfc5e1a93f6/dashing/utils.py#L9-L21 | train | 211,822 |
kelproject/pykube | pykube/query.py | Query.iterator | def iterator(self):
"""
Execute the API request and return an iterator over the objects. This
method does not use the query cache.
"""
for obj in (self.execute().json().get("items") or []):
yield self.api_obj_class(self.api, obj) | python | def iterator(self):
"""
Execute the API request and return an iterator over the objects. This
method does not use the query cache.
"""
for obj in (self.execute().json().get("items") or []):
yield self.api_obj_class(self.api, obj) | [
"def",
"iterator",
"(",
"self",
")",
":",
"for",
"obj",
"in",
"(",
"self",
".",
"execute",
"(",
")",
".",
"json",
"(",
")",
".",
"get",
"(",
"\"items\"",
")",
"or",
"[",
"]",
")",
":",
"yield",
"self",
".",
"api_obj_class",
"(",
"self",
".",
"a... | Execute the API request and return an iterator over the objects. This
method does not use the query cache. | [
"Execute",
"the",
"API",
"request",
"and",
"return",
"an",
"iterator",
"over",
"the",
"objects",
".",
"This",
"method",
"does",
"not",
"use",
"the",
"query",
"cache",
"."
] | e8a46298a592ad9037587afb707ac75b3114eff9 | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/query.py#L112-L118 | train | 211,823 |
kelproject/pykube | pykube/http.py | HTTPClient.version | def version(self):
"""
Get Kubernetes API version
"""
response = self.get(version="", base="/version")
response.raise_for_status()
data = response.json()
return (data["major"], data["minor"]) | python | def version(self):
"""
Get Kubernetes API version
"""
response = self.get(version="", base="/version")
response.raise_for_status()
data = response.json()
return (data["major"], data["minor"]) | [
"def",
"version",
"(",
"self",
")",
":",
"response",
"=",
"self",
".",
"get",
"(",
"version",
"=",
"\"\"",
",",
"base",
"=",
"\"/version\"",
")",
"response",
".",
"raise_for_status",
"(",
")",
"data",
"=",
"response",
".",
"json",
"(",
")",
"return",
... | Get Kubernetes API version | [
"Get",
"Kubernetes",
"API",
"version"
] | e8a46298a592ad9037587afb707ac75b3114eff9 | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/http.py#L178-L185 | train | 211,824 |
kelproject/pykube | pykube/http.py | HTTPClient.get_kwargs | def get_kwargs(self, **kwargs):
"""
Creates a full URL to request based on arguments.
:Parametes:
- `kwargs`: All keyword arguments to build a kubernetes API endpoint
"""
version = kwargs.pop("version", "v1")
if version == "v1":
base = kwargs.pop("base", "/api")
elif "/" in version:
base = kwargs.pop("base", "/apis")
else:
if "base" not in kwargs:
raise TypeError("unknown API version; base kwarg must be specified.")
base = kwargs.pop("base")
bits = [base, version]
# Overwrite (default) namespace from context if it was set
if "namespace" in kwargs:
n = kwargs.pop("namespace")
if n is not None:
if n:
namespace = n
else:
namespace = self.config.namespace
if namespace:
bits.extend([
"namespaces",
namespace,
])
url = kwargs.get("url", "")
if url.startswith("/"):
url = url[1:]
bits.append(url)
kwargs["url"] = self.url + posixpath.join(*bits)
return kwargs | python | def get_kwargs(self, **kwargs):
"""
Creates a full URL to request based on arguments.
:Parametes:
- `kwargs`: All keyword arguments to build a kubernetes API endpoint
"""
version = kwargs.pop("version", "v1")
if version == "v1":
base = kwargs.pop("base", "/api")
elif "/" in version:
base = kwargs.pop("base", "/apis")
else:
if "base" not in kwargs:
raise TypeError("unknown API version; base kwarg must be specified.")
base = kwargs.pop("base")
bits = [base, version]
# Overwrite (default) namespace from context if it was set
if "namespace" in kwargs:
n = kwargs.pop("namespace")
if n is not None:
if n:
namespace = n
else:
namespace = self.config.namespace
if namespace:
bits.extend([
"namespaces",
namespace,
])
url = kwargs.get("url", "")
if url.startswith("/"):
url = url[1:]
bits.append(url)
kwargs["url"] = self.url + posixpath.join(*bits)
return kwargs | [
"def",
"get_kwargs",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"version",
"=",
"kwargs",
".",
"pop",
"(",
"\"version\"",
",",
"\"v1\"",
")",
"if",
"version",
"==",
"\"v1\"",
":",
"base",
"=",
"kwargs",
".",
"pop",
"(",
"\"base\"",
",",
"\"/api\... | Creates a full URL to request based on arguments.
:Parametes:
- `kwargs`: All keyword arguments to build a kubernetes API endpoint | [
"Creates",
"a",
"full",
"URL",
"to",
"request",
"based",
"on",
"arguments",
"."
] | e8a46298a592ad9037587afb707ac75b3114eff9 | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/http.py#L195-L230 | train | 211,825 |
kelproject/pykube | pykube/http.py | HTTPClient.request | def request(self, *args, **kwargs):
"""
Makes an API request based on arguments.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.request(*args, **self.get_kwargs(**kwargs)) | python | def request(self, *args, **kwargs):
"""
Makes an API request based on arguments.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.request(*args, **self.get_kwargs(**kwargs)) | [
"def",
"request",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"session",
".",
"request",
"(",
"*",
"args",
",",
"*",
"*",
"self",
".",
"get_kwargs",
"(",
"*",
"*",
"kwargs",
")",
")"
] | Makes an API request based on arguments.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments | [
"Makes",
"an",
"API",
"request",
"based",
"on",
"arguments",
"."
] | e8a46298a592ad9037587afb707ac75b3114eff9 | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/http.py#L244-L252 | train | 211,826 |
kelproject/pykube | pykube/http.py | HTTPClient.get | def get(self, *args, **kwargs):
"""
Executes an HTTP GET.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.get(*args, **self.get_kwargs(**kwargs)) | python | def get(self, *args, **kwargs):
"""
Executes an HTTP GET.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.get(*args, **self.get_kwargs(**kwargs)) | [
"def",
"get",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"session",
".",
"get",
"(",
"*",
"args",
",",
"*",
"*",
"self",
".",
"get_kwargs",
"(",
"*",
"*",
"kwargs",
")",
")"
] | Executes an HTTP GET.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments | [
"Executes",
"an",
"HTTP",
"GET",
"."
] | e8a46298a592ad9037587afb707ac75b3114eff9 | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/http.py#L254-L262 | train | 211,827 |
kelproject/pykube | pykube/http.py | HTTPClient.options | def options(self, *args, **kwargs):
"""
Executes an HTTP OPTIONS.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.options(*args, **self.get_kwargs(**kwargs)) | python | def options(self, *args, **kwargs):
"""
Executes an HTTP OPTIONS.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.options(*args, **self.get_kwargs(**kwargs)) | [
"def",
"options",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"session",
".",
"options",
"(",
"*",
"args",
",",
"*",
"*",
"self",
".",
"get_kwargs",
"(",
"*",
"*",
"kwargs",
")",
")"
] | Executes an HTTP OPTIONS.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments | [
"Executes",
"an",
"HTTP",
"OPTIONS",
"."
] | e8a46298a592ad9037587afb707ac75b3114eff9 | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/http.py#L264-L272 | train | 211,828 |
kelproject/pykube | pykube/http.py | HTTPClient.head | def head(self, *args, **kwargs):
"""
Executes an HTTP HEAD.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.head(*args, **self.get_kwargs(**kwargs)) | python | def head(self, *args, **kwargs):
"""
Executes an HTTP HEAD.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.head(*args, **self.get_kwargs(**kwargs)) | [
"def",
"head",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"session",
".",
"head",
"(",
"*",
"args",
",",
"*",
"*",
"self",
".",
"get_kwargs",
"(",
"*",
"*",
"kwargs",
")",
")"
] | Executes an HTTP HEAD.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments | [
"Executes",
"an",
"HTTP",
"HEAD",
"."
] | e8a46298a592ad9037587afb707ac75b3114eff9 | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/http.py#L274-L282 | train | 211,829 |
kelproject/pykube | pykube/http.py | HTTPClient.post | def post(self, *args, **kwargs):
"""
Executes an HTTP POST.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.post(*args, **self.get_kwargs(**kwargs)) | python | def post(self, *args, **kwargs):
"""
Executes an HTTP POST.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.post(*args, **self.get_kwargs(**kwargs)) | [
"def",
"post",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"session",
".",
"post",
"(",
"*",
"args",
",",
"*",
"*",
"self",
".",
"get_kwargs",
"(",
"*",
"*",
"kwargs",
")",
")"
] | Executes an HTTP POST.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments | [
"Executes",
"an",
"HTTP",
"POST",
"."
] | e8a46298a592ad9037587afb707ac75b3114eff9 | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/http.py#L284-L292 | train | 211,830 |
kelproject/pykube | pykube/http.py | HTTPClient.put | def put(self, *args, **kwargs):
"""
Executes an HTTP PUT.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.put(*args, **self.get_kwargs(**kwargs)) | python | def put(self, *args, **kwargs):
"""
Executes an HTTP PUT.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.put(*args, **self.get_kwargs(**kwargs)) | [
"def",
"put",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"session",
".",
"put",
"(",
"*",
"args",
",",
"*",
"*",
"self",
".",
"get_kwargs",
"(",
"*",
"*",
"kwargs",
")",
")"
] | Executes an HTTP PUT.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments | [
"Executes",
"an",
"HTTP",
"PUT",
"."
] | e8a46298a592ad9037587afb707ac75b3114eff9 | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/http.py#L294-L302 | train | 211,831 |
kelproject/pykube | pykube/http.py | HTTPClient.patch | def patch(self, *args, **kwargs):
"""
Executes an HTTP PATCH.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.patch(*args, **self.get_kwargs(**kwargs)) | python | def patch(self, *args, **kwargs):
"""
Executes an HTTP PATCH.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.patch(*args, **self.get_kwargs(**kwargs)) | [
"def",
"patch",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"session",
".",
"patch",
"(",
"*",
"args",
",",
"*",
"*",
"self",
".",
"get_kwargs",
"(",
"*",
"*",
"kwargs",
")",
")"
] | Executes an HTTP PATCH.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments | [
"Executes",
"an",
"HTTP",
"PATCH",
"."
] | e8a46298a592ad9037587afb707ac75b3114eff9 | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/http.py#L304-L312 | train | 211,832 |
kelproject/pykube | pykube/http.py | HTTPClient.delete | def delete(self, *args, **kwargs):
"""
Executes an HTTP DELETE.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.delete(*args, **self.get_kwargs(**kwargs)) | python | def delete(self, *args, **kwargs):
"""
Executes an HTTP DELETE.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.delete(*args, **self.get_kwargs(**kwargs)) | [
"def",
"delete",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"session",
".",
"delete",
"(",
"*",
"args",
",",
"*",
"*",
"self",
".",
"get_kwargs",
"(",
"*",
"*",
"kwargs",
")",
")"
] | Executes an HTTP DELETE.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments | [
"Executes",
"an",
"HTTP",
"DELETE",
"."
] | e8a46298a592ad9037587afb707ac75b3114eff9 | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/http.py#L314-L322 | train | 211,833 |
kelproject/pykube | pykube/config.py | KubeConfig.from_file | def from_file(cls, filename, **kwargs):
"""
Creates an instance of the KubeConfig class from a kubeconfig file.
:Parameters:
- `filename`: The full path to the configuration file
"""
filename = os.path.expanduser(filename)
if not os.path.isfile(filename):
raise exceptions.PyKubeError("Configuration file {} not found".format(filename))
with open(filename) as f:
doc = yaml.safe_load(f.read())
self = cls(doc, **kwargs)
self.filename = filename
return self | python | def from_file(cls, filename, **kwargs):
"""
Creates an instance of the KubeConfig class from a kubeconfig file.
:Parameters:
- `filename`: The full path to the configuration file
"""
filename = os.path.expanduser(filename)
if not os.path.isfile(filename):
raise exceptions.PyKubeError("Configuration file {} not found".format(filename))
with open(filename) as f:
doc = yaml.safe_load(f.read())
self = cls(doc, **kwargs)
self.filename = filename
return self | [
"def",
"from_file",
"(",
"cls",
",",
"filename",
",",
"*",
"*",
"kwargs",
")",
":",
"filename",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"filename",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
":",
"raise",
"exc... | Creates an instance of the KubeConfig class from a kubeconfig file.
:Parameters:
- `filename`: The full path to the configuration file | [
"Creates",
"an",
"instance",
"of",
"the",
"KubeConfig",
"class",
"from",
"a",
"kubeconfig",
"file",
"."
] | e8a46298a592ad9037587afb707ac75b3114eff9 | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/config.py#L63-L77 | train | 211,834 |
kelproject/pykube | pykube/config.py | KubeConfig.clusters | def clusters(self):
"""
Returns known clusters by exposing as a read-only property.
"""
if not hasattr(self, "_clusters"):
cs = {}
for cr in self.doc["clusters"]:
cs[cr["name"]] = c = copy.deepcopy(cr["cluster"])
if "server" not in c:
c["server"] = "http://localhost"
BytesOrFile.maybe_set(c, "certificate-authority")
self._clusters = cs
return self._clusters | python | def clusters(self):
"""
Returns known clusters by exposing as a read-only property.
"""
if not hasattr(self, "_clusters"):
cs = {}
for cr in self.doc["clusters"]:
cs[cr["name"]] = c = copy.deepcopy(cr["cluster"])
if "server" not in c:
c["server"] = "http://localhost"
BytesOrFile.maybe_set(c, "certificate-authority")
self._clusters = cs
return self._clusters | [
"def",
"clusters",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"_clusters\"",
")",
":",
"cs",
"=",
"{",
"}",
"for",
"cr",
"in",
"self",
".",
"doc",
"[",
"\"clusters\"",
"]",
":",
"cs",
"[",
"cr",
"[",
"\"name\"",
"]",
"]",
... | Returns known clusters by exposing as a read-only property. | [
"Returns",
"known",
"clusters",
"by",
"exposing",
"as",
"a",
"read",
"-",
"only",
"property",
"."
] | e8a46298a592ad9037587afb707ac75b3114eff9 | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/config.py#L134-L146 | train | 211,835 |
kelproject/pykube | pykube/config.py | KubeConfig.users | def users(self):
"""
Returns known users by exposing as a read-only property.
"""
if not hasattr(self, "_users"):
us = {}
if "users" in self.doc:
for ur in self.doc["users"]:
us[ur["name"]] = u = copy.deepcopy(ur["user"])
BytesOrFile.maybe_set(u, "client-certificate")
BytesOrFile.maybe_set(u, "client-key")
self._users = us
return self._users | python | def users(self):
"""
Returns known users by exposing as a read-only property.
"""
if not hasattr(self, "_users"):
us = {}
if "users" in self.doc:
for ur in self.doc["users"]:
us[ur["name"]] = u = copy.deepcopy(ur["user"])
BytesOrFile.maybe_set(u, "client-certificate")
BytesOrFile.maybe_set(u, "client-key")
self._users = us
return self._users | [
"def",
"users",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"_users\"",
")",
":",
"us",
"=",
"{",
"}",
"if",
"\"users\"",
"in",
"self",
".",
"doc",
":",
"for",
"ur",
"in",
"self",
".",
"doc",
"[",
"\"users\"",
"]",
":",
"... | Returns known users by exposing as a read-only property. | [
"Returns",
"known",
"users",
"by",
"exposing",
"as",
"a",
"read",
"-",
"only",
"property",
"."
] | e8a46298a592ad9037587afb707ac75b3114eff9 | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/config.py#L149-L161 | train | 211,836 |
kelproject/pykube | pykube/config.py | KubeConfig.contexts | def contexts(self):
"""
Returns known contexts by exposing as a read-only property.
"""
if not hasattr(self, "_contexts"):
cs = {}
for cr in self.doc["contexts"]:
cs[cr["name"]] = copy.deepcopy(cr["context"])
self._contexts = cs
return self._contexts | python | def contexts(self):
"""
Returns known contexts by exposing as a read-only property.
"""
if not hasattr(self, "_contexts"):
cs = {}
for cr in self.doc["contexts"]:
cs[cr["name"]] = copy.deepcopy(cr["context"])
self._contexts = cs
return self._contexts | [
"def",
"contexts",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"_contexts\"",
")",
":",
"cs",
"=",
"{",
"}",
"for",
"cr",
"in",
"self",
".",
"doc",
"[",
"\"contexts\"",
"]",
":",
"cs",
"[",
"cr",
"[",
"\"name\"",
"]",
"]",
... | Returns known contexts by exposing as a read-only property. | [
"Returns",
"known",
"contexts",
"by",
"exposing",
"as",
"a",
"read",
"-",
"only",
"property",
"."
] | e8a46298a592ad9037587afb707ac75b3114eff9 | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/config.py#L164-L173 | train | 211,837 |
kelproject/pykube | pykube/config.py | KubeConfig.user | def user(self):
"""
Returns the current user set by current context
"""
return self.users.get(self.contexts[self.current_context].get("user", ""), {}) | python | def user(self):
"""
Returns the current user set by current context
"""
return self.users.get(self.contexts[self.current_context].get("user", ""), {}) | [
"def",
"user",
"(",
"self",
")",
":",
"return",
"self",
".",
"users",
".",
"get",
"(",
"self",
".",
"contexts",
"[",
"self",
".",
"current_context",
"]",
".",
"get",
"(",
"\"user\"",
",",
"\"\"",
")",
",",
"{",
"}",
")"
] | Returns the current user set by current context | [
"Returns",
"the",
"current",
"user",
"set",
"by",
"current",
"context"
] | e8a46298a592ad9037587afb707ac75b3114eff9 | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/config.py#L184-L188 | train | 211,838 |
kelproject/pykube | pykube/config.py | BytesOrFile.bytes | def bytes(self):
"""
Returns the provided data as bytes.
"""
if self._filename:
with open(self._filename, "rb") as f:
return f.read()
else:
return self._bytes | python | def bytes(self):
"""
Returns the provided data as bytes.
"""
if self._filename:
with open(self._filename, "rb") as f:
return f.read()
else:
return self._bytes | [
"def",
"bytes",
"(",
"self",
")",
":",
"if",
"self",
".",
"_filename",
":",
"with",
"open",
"(",
"self",
".",
"_filename",
",",
"\"rb\"",
")",
"as",
"f",
":",
"return",
"f",
".",
"read",
"(",
")",
"else",
":",
"return",
"self",
".",
"_bytes"
] | Returns the provided data as bytes. | [
"Returns",
"the",
"provided",
"data",
"as",
"bytes",
"."
] | e8a46298a592ad9037587afb707ac75b3114eff9 | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/config.py#L250-L258 | train | 211,839 |
kelproject/pykube | pykube/config.py | BytesOrFile.filename | def filename(self):
"""
Returns the provided data as a file location.
"""
if self._filename:
return self._filename
else:
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(self._bytes)
return f.name | python | def filename(self):
"""
Returns the provided data as a file location.
"""
if self._filename:
return self._filename
else:
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(self._bytes)
return f.name | [
"def",
"filename",
"(",
"self",
")",
":",
"if",
"self",
".",
"_filename",
":",
"return",
"self",
".",
"_filename",
"else",
":",
"with",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"delete",
"=",
"False",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"... | Returns the provided data as a file location. | [
"Returns",
"the",
"provided",
"data",
"as",
"a",
"file",
"location",
"."
] | e8a46298a592ad9037587afb707ac75b3114eff9 | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/config.py#L260-L269 | train | 211,840 |
kelproject/pykube | pykube/objects.py | object_factory | def object_factory(api, api_version, kind):
"""
Dynamically builds a Python class for the given Kubernetes object in an API.
For example:
api = pykube.HTTPClient(...)
NetworkPolicy = pykube.object_factory(api, "networking.k8s.io/v1", "NetworkPolicy")
This enables construction of any Kubernetes object kind without explicit support
from pykube.
Currently, the HTTPClient passed to this function will not be bound to the returned type.
It is planned to fix this, but in the mean time pass it as you would normally.
"""
resource_list = api.resource_list(api_version)
resource = next((resource for resource in resource_list["resources"] if resource["kind"] == kind), None)
base = NamespacedAPIObject if resource["namespaced"] else APIObject
return type(kind, (base,), {
"version": api_version,
"endpoint": resource["name"],
"kind": kind
}) | python | def object_factory(api, api_version, kind):
"""
Dynamically builds a Python class for the given Kubernetes object in an API.
For example:
api = pykube.HTTPClient(...)
NetworkPolicy = pykube.object_factory(api, "networking.k8s.io/v1", "NetworkPolicy")
This enables construction of any Kubernetes object kind without explicit support
from pykube.
Currently, the HTTPClient passed to this function will not be bound to the returned type.
It is planned to fix this, but in the mean time pass it as you would normally.
"""
resource_list = api.resource_list(api_version)
resource = next((resource for resource in resource_list["resources"] if resource["kind"] == kind), None)
base = NamespacedAPIObject if resource["namespaced"] else APIObject
return type(kind, (base,), {
"version": api_version,
"endpoint": resource["name"],
"kind": kind
}) | [
"def",
"object_factory",
"(",
"api",
",",
"api_version",
",",
"kind",
")",
":",
"resource_list",
"=",
"api",
".",
"resource_list",
"(",
"api_version",
")",
"resource",
"=",
"next",
"(",
"(",
"resource",
"for",
"resource",
"in",
"resource_list",
"[",
"\"resou... | Dynamically builds a Python class for the given Kubernetes object in an API.
For example:
api = pykube.HTTPClient(...)
NetworkPolicy = pykube.object_factory(api, "networking.k8s.io/v1", "NetworkPolicy")
This enables construction of any Kubernetes object kind without explicit support
from pykube.
Currently, the HTTPClient passed to this function will not be bound to the returned type.
It is planned to fix this, but in the mean time pass it as you would normally. | [
"Dynamically",
"builds",
"a",
"Python",
"class",
"for",
"the",
"given",
"Kubernetes",
"object",
"in",
"an",
"API",
"."
] | e8a46298a592ad9037587afb707ac75b3114eff9 | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/objects.py#L138-L160 | train | 211,841 |
Crypto-toolbox/bitex | bitex/api/WSS/bitfinex.py | BitfinexWSS._handle_info | def _handle_info(self, *args, **kwargs):
"""
Handles info messages and executed corresponding code
"""
if 'version' in kwargs:
# set api version number and exit
self.api_version = kwargs['version']
print("Initialized API with version %s" % self.api_version)
return
try:
info_code = str(kwargs['code'])
except KeyError:
raise FaultyPayloadError("_handle_info: %s" % kwargs)
if not info_code.startswith('2'):
raise ValueError("Info Code must start with 2! %s", kwargs)
output_msg = "_handle_info(): %s" % kwargs
log.info(output_msg)
try:
self._code_handlers[info_code]()
except KeyError:
raise UnknownWSSInfo(output_msg) | python | def _handle_info(self, *args, **kwargs):
"""
Handles info messages and executed corresponding code
"""
if 'version' in kwargs:
# set api version number and exit
self.api_version = kwargs['version']
print("Initialized API with version %s" % self.api_version)
return
try:
info_code = str(kwargs['code'])
except KeyError:
raise FaultyPayloadError("_handle_info: %s" % kwargs)
if not info_code.startswith('2'):
raise ValueError("Info Code must start with 2! %s", kwargs)
output_msg = "_handle_info(): %s" % kwargs
log.info(output_msg)
try:
self._code_handlers[info_code]()
except KeyError:
raise UnknownWSSInfo(output_msg) | [
"def",
"_handle_info",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'version'",
"in",
"kwargs",
":",
"# set api version number and exit",
"self",
".",
"api_version",
"=",
"kwargs",
"[",
"'version'",
"]",
"print",
"(",
"\"Initializ... | Handles info messages and executed corresponding code | [
"Handles",
"info",
"messages",
"and",
"executed",
"corresponding",
"code"
] | 56d46ea3db6de5219a72dad9b052fbabc921232f | https://github.com/Crypto-toolbox/bitex/blob/56d46ea3db6de5219a72dad9b052fbabc921232f/bitex/api/WSS/bitfinex.py#L484-L507 | train | 211,842 |
miyakogi/m2r | m2r.py | setup | def setup(app):
"""When used for sphinx extension."""
global _is_sphinx
_is_sphinx = True
app.add_config_value('no_underscore_emphasis', False, 'env')
app.add_config_value('m2r_parse_relative_links', False, 'env')
app.add_config_value('m2r_anonymous_references', False, 'env')
app.add_config_value('m2r_disable_inline_math', False, 'env')
app.add_source_parser('.md', M2RParser)
app.add_directive('mdinclude', MdInclude)
metadata = dict(
version=__version__,
parallel_read_safe=True,
parallel_write_safe=True,
)
return metadata | python | def setup(app):
"""When used for sphinx extension."""
global _is_sphinx
_is_sphinx = True
app.add_config_value('no_underscore_emphasis', False, 'env')
app.add_config_value('m2r_parse_relative_links', False, 'env')
app.add_config_value('m2r_anonymous_references', False, 'env')
app.add_config_value('m2r_disable_inline_math', False, 'env')
app.add_source_parser('.md', M2RParser)
app.add_directive('mdinclude', MdInclude)
metadata = dict(
version=__version__,
parallel_read_safe=True,
parallel_write_safe=True,
)
return metadata | [
"def",
"setup",
"(",
"app",
")",
":",
"global",
"_is_sphinx",
"_is_sphinx",
"=",
"True",
"app",
".",
"add_config_value",
"(",
"'no_underscore_emphasis'",
",",
"False",
",",
"'env'",
")",
"app",
".",
"add_config_value",
"(",
"'m2r_parse_relative_links'",
",",
"Fa... | When used for sphinx extension. | [
"When",
"used",
"for",
"sphinx",
"extension",
"."
] | 661a82ccfee48342dcbc3dcca4fd77e6a0a1a900 | https://github.com/miyakogi/m2r/blob/661a82ccfee48342dcbc3dcca4fd77e6a0a1a900/m2r.py#L644-L659 | train | 211,843 |
miyakogi/m2r | m2r.py | RestRenderer.link | def link(self, link, title, text):
"""Rendering a given link with content and title.
:param link: href link for ``<a>`` tag.
:param title: title content for `title` attribute.
:param text: text content for description.
"""
if self.anonymous_references:
underscore = '__'
else:
underscore = '_'
if title:
return self._raw_html(
'<a href="{link}" title="{title}">{text}</a>'.format(
link=link, title=title, text=text
)
)
if not self.parse_relative_links:
return '\ `{text} <{target}>`{underscore}\ '.format(
target=link,
text=text,
underscore=underscore
)
else:
url_info = urlparse(link)
if url_info.scheme:
return '\ `{text} <{target}>`{underscore}\ '.format(
target=link,
text=text,
underscore=underscore
)
else:
link_type = 'doc'
anchor = url_info.fragment
if url_info.fragment:
if url_info.path:
# Can't link to anchors via doc directive.
anchor = ''
else:
# Example: [text](#anchor)
link_type = 'ref'
doc_link = '{doc_name}{anchor}'.format(
# splittext approach works whether or not path is set. It
# will return an empty string if unset, which leads to
# anchor only ref.
doc_name=os.path.splitext(url_info.path)[0],
anchor=anchor
)
return '\ :{link_type}:`{text} <{doc_link}>`\ '.format(
link_type=link_type,
doc_link=doc_link,
text=text
) | python | def link(self, link, title, text):
"""Rendering a given link with content and title.
:param link: href link for ``<a>`` tag.
:param title: title content for `title` attribute.
:param text: text content for description.
"""
if self.anonymous_references:
underscore = '__'
else:
underscore = '_'
if title:
return self._raw_html(
'<a href="{link}" title="{title}">{text}</a>'.format(
link=link, title=title, text=text
)
)
if not self.parse_relative_links:
return '\ `{text} <{target}>`{underscore}\ '.format(
target=link,
text=text,
underscore=underscore
)
else:
url_info = urlparse(link)
if url_info.scheme:
return '\ `{text} <{target}>`{underscore}\ '.format(
target=link,
text=text,
underscore=underscore
)
else:
link_type = 'doc'
anchor = url_info.fragment
if url_info.fragment:
if url_info.path:
# Can't link to anchors via doc directive.
anchor = ''
else:
# Example: [text](#anchor)
link_type = 'ref'
doc_link = '{doc_name}{anchor}'.format(
# splittext approach works whether or not path is set. It
# will return an empty string if unset, which leads to
# anchor only ref.
doc_name=os.path.splitext(url_info.path)[0],
anchor=anchor
)
return '\ :{link_type}:`{text} <{doc_link}>`\ '.format(
link_type=link_type,
doc_link=doc_link,
text=text
) | [
"def",
"link",
"(",
"self",
",",
"link",
",",
"title",
",",
"text",
")",
":",
"if",
"self",
".",
"anonymous_references",
":",
"underscore",
"=",
"'__'",
"else",
":",
"underscore",
"=",
"'_'",
"if",
"title",
":",
"return",
"self",
".",
"_raw_html",
"(",... | Rendering a given link with content and title.
:param link: href link for ``<a>`` tag.
:param title: title content for `title` attribute.
:param text: text content for description. | [
"Rendering",
"a",
"given",
"link",
"with",
"content",
"and",
"title",
"."
] | 661a82ccfee48342dcbc3dcca4fd77e6a0a1a900 | https://github.com/miyakogi/m2r/blob/661a82ccfee48342dcbc3dcca4fd77e6a0a1a900/m2r.py#L377-L429 | train | 211,844 |
irgangla/icalevents | icalevents/icalevents.py | events | def events(url=None, file=None, string_content=None, start=None, end=None, fix_apple=False):
"""
Get all events form the given iCal URL occurring in the given time range.
:param url: iCal URL
:param file: iCal file path
:param string_content: iCal content as string
:param start: start date (see dateutils.date)
:param end: end date (see dateutils.date)
:param fix_apple: fix known Apple iCal issues
:return: events as list of dictionaries
"""
found_events = []
content = None
if url:
content = ICalDownload().data_from_url(url, apple_fix=fix_apple)
if not content and file:
content = ICalDownload().data_from_file(file, apple_fix=fix_apple)
if not content and string_content:
content = ICalDownload().data_from_string(string_content,
apple_fix=fix_apple)
found_events += parse_events(content, start=start, end=end)
return found_events | python | def events(url=None, file=None, string_content=None, start=None, end=None, fix_apple=False):
"""
Get all events form the given iCal URL occurring in the given time range.
:param url: iCal URL
:param file: iCal file path
:param string_content: iCal content as string
:param start: start date (see dateutils.date)
:param end: end date (see dateutils.date)
:param fix_apple: fix known Apple iCal issues
:return: events as list of dictionaries
"""
found_events = []
content = None
if url:
content = ICalDownload().data_from_url(url, apple_fix=fix_apple)
if not content and file:
content = ICalDownload().data_from_file(file, apple_fix=fix_apple)
if not content and string_content:
content = ICalDownload().data_from_string(string_content,
apple_fix=fix_apple)
found_events += parse_events(content, start=start, end=end)
return found_events | [
"def",
"events",
"(",
"url",
"=",
"None",
",",
"file",
"=",
"None",
",",
"string_content",
"=",
"None",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"fix_apple",
"=",
"False",
")",
":",
"found_events",
"=",
"[",
"]",
"content",
"=",
"No... | Get all events form the given iCal URL occurring in the given time range.
:param url: iCal URL
:param file: iCal file path
:param string_content: iCal content as string
:param start: start date (see dateutils.date)
:param end: end date (see dateutils.date)
:param fix_apple: fix known Apple iCal issues
:return: events as list of dictionaries | [
"Get",
"all",
"events",
"form",
"the",
"given",
"iCal",
"URL",
"occurring",
"in",
"the",
"given",
"time",
"range",
"."
] | bb34c770ae6ffbf4c793e09dc5176344b285f386 | https://github.com/irgangla/icalevents/blob/bb34c770ae6ffbf4c793e09dc5176344b285f386/icalevents/icalevents.py#L15-L43 | train | 211,845 |
irgangla/icalevents | icalevents/icalevents.py | request_data | def request_data(key, url, file, string_content, start, end, fix_apple):
"""
Request data, update local data cache and remove this Thread form queue.
:param key: key for data source to get result later
:param url: iCal URL
:param file: iCal file path
:param string_content: iCal content as string
:param start: start date
:param end: end date
:param fix_apple: fix known Apple iCal issues
"""
data = []
try:
data += events(url=url, file=file, string_content=string_content,
start=start, end=end, fix_apple=fix_apple)
finally:
update_events(key, data)
request_finished(key) | python | def request_data(key, url, file, string_content, start, end, fix_apple):
"""
Request data, update local data cache and remove this Thread form queue.
:param key: key for data source to get result later
:param url: iCal URL
:param file: iCal file path
:param string_content: iCal content as string
:param start: start date
:param end: end date
:param fix_apple: fix known Apple iCal issues
"""
data = []
try:
data += events(url=url, file=file, string_content=string_content,
start=start, end=end, fix_apple=fix_apple)
finally:
update_events(key, data)
request_finished(key) | [
"def",
"request_data",
"(",
"key",
",",
"url",
",",
"file",
",",
"string_content",
",",
"start",
",",
"end",
",",
"fix_apple",
")",
":",
"data",
"=",
"[",
"]",
"try",
":",
"data",
"+=",
"events",
"(",
"url",
"=",
"url",
",",
"file",
"=",
"file",
... | Request data, update local data cache and remove this Thread form queue.
:param key: key for data source to get result later
:param url: iCal URL
:param file: iCal file path
:param string_content: iCal content as string
:param start: start date
:param end: end date
:param fix_apple: fix known Apple iCal issues | [
"Request",
"data",
"update",
"local",
"data",
"cache",
"and",
"remove",
"this",
"Thread",
"form",
"queue",
"."
] | bb34c770ae6ffbf4c793e09dc5176344b285f386 | https://github.com/irgangla/icalevents/blob/bb34c770ae6ffbf4c793e09dc5176344b285f386/icalevents/icalevents.py#L46-L65 | train | 211,846 |
irgangla/icalevents | icalevents/icalevents.py | events_async | def events_async(key, url=None, file=None, start=None, string_content=None,
end=None, fix_apple=False):
"""
Trigger an asynchronous data request.
:param key: key for data source to get result later
:param url: iCal URL
:param file: iCal file path
:param string_content: iCal content as string
:param start: start date
:param end: end date
:param fix_apple: fix known Apple iCal issues
"""
t = Thread(target=request_data, args=(key, url, file, string_content, start, end, fix_apple))
with event_lock:
if key not in threads:
threads[key] = []
threads[key].append(t)
if not threads[key][0].is_alive():
threads[key][0].start() | python | def events_async(key, url=None, file=None, start=None, string_content=None,
end=None, fix_apple=False):
"""
Trigger an asynchronous data request.
:param key: key for data source to get result later
:param url: iCal URL
:param file: iCal file path
:param string_content: iCal content as string
:param start: start date
:param end: end date
:param fix_apple: fix known Apple iCal issues
"""
t = Thread(target=request_data, args=(key, url, file, string_content, start, end, fix_apple))
with event_lock:
if key not in threads:
threads[key] = []
threads[key].append(t)
if not threads[key][0].is_alive():
threads[key][0].start() | [
"def",
"events_async",
"(",
"key",
",",
"url",
"=",
"None",
",",
"file",
"=",
"None",
",",
"start",
"=",
"None",
",",
"string_content",
"=",
"None",
",",
"end",
"=",
"None",
",",
"fix_apple",
"=",
"False",
")",
":",
"t",
"=",
"Thread",
"(",
"target... | Trigger an asynchronous data request.
:param key: key for data source to get result later
:param url: iCal URL
:param file: iCal file path
:param string_content: iCal content as string
:param start: start date
:param end: end date
:param fix_apple: fix known Apple iCal issues | [
"Trigger",
"an",
"asynchronous",
"data",
"request",
"."
] | bb34c770ae6ffbf4c793e09dc5176344b285f386 | https://github.com/irgangla/icalevents/blob/bb34c770ae6ffbf4c793e09dc5176344b285f386/icalevents/icalevents.py#L68-L90 | train | 211,847 |
irgangla/icalevents | icalevents/icalevents.py | request_finished | def request_finished(key):
"""
Remove finished Thread from queue.
:param key: data source key
"""
with event_lock:
threads[key] = threads[key][1:]
if threads[key]:
threads[key][0].run() | python | def request_finished(key):
"""
Remove finished Thread from queue.
:param key: data source key
"""
with event_lock:
threads[key] = threads[key][1:]
if threads[key]:
threads[key][0].run() | [
"def",
"request_finished",
"(",
"key",
")",
":",
"with",
"event_lock",
":",
"threads",
"[",
"key",
"]",
"=",
"threads",
"[",
"key",
"]",
"[",
"1",
":",
"]",
"if",
"threads",
"[",
"key",
"]",
":",
"threads",
"[",
"key",
"]",
"[",
"0",
"]",
".",
... | Remove finished Thread from queue.
:param key: data source key | [
"Remove",
"finished",
"Thread",
"from",
"queue",
"."
] | bb34c770ae6ffbf4c793e09dc5176344b285f386 | https://github.com/irgangla/icalevents/blob/bb34c770ae6ffbf4c793e09dc5176344b285f386/icalevents/icalevents.py#L93-L103 | train | 211,848 |
irgangla/icalevents | icalevents/icaldownload.py | ICalDownload.data_from_url | def data_from_url(self, url, apple_fix=False):
"""
Download iCal data from URL.
:param url: URL to download
:param apple_fix: fix Apple bugs (protocol type and tzdata in iCal)
:return: decoded (and fixed) iCal data
"""
if apple_fix:
url = apple_url_fix(url)
_, content = self.http.request(url)
if not content:
raise ConnectionError('Could not get data from %s!' % url)
return self.decode(content, apple_fix=apple_fix) | python | def data_from_url(self, url, apple_fix=False):
"""
Download iCal data from URL.
:param url: URL to download
:param apple_fix: fix Apple bugs (protocol type and tzdata in iCal)
:return: decoded (and fixed) iCal data
"""
if apple_fix:
url = apple_url_fix(url)
_, content = self.http.request(url)
if not content:
raise ConnectionError('Could not get data from %s!' % url)
return self.decode(content, apple_fix=apple_fix) | [
"def",
"data_from_url",
"(",
"self",
",",
"url",
",",
"apple_fix",
"=",
"False",
")",
":",
"if",
"apple_fix",
":",
"url",
"=",
"apple_url_fix",
"(",
"url",
")",
"_",
",",
"content",
"=",
"self",
".",
"http",
".",
"request",
"(",
"url",
")",
"if",
"... | Download iCal data from URL.
:param url: URL to download
:param apple_fix: fix Apple bugs (protocol type and tzdata in iCal)
:return: decoded (and fixed) iCal data | [
"Download",
"iCal",
"data",
"from",
"URL",
"."
] | bb34c770ae6ffbf4c793e09dc5176344b285f386 | https://github.com/irgangla/icalevents/blob/bb34c770ae6ffbf4c793e09dc5176344b285f386/icalevents/icaldownload.py#L45-L61 | train | 211,849 |
irgangla/icalevents | icalevents/icaldownload.py | ICalDownload.data_from_file | def data_from_file(self, file, apple_fix=False):
"""
Read iCal data from file.
:param file: file to read
:param apple_fix: fix wrong Apple tzdata in iCal
:return: decoded (and fixed) iCal data
"""
with open(file, mode='rb') as f:
content = f.read()
if not content:
raise IOError("File %f is not readable or is empty!" % file)
return self.decode(content, apple_fix=apple_fix) | python | def data_from_file(self, file, apple_fix=False):
"""
Read iCal data from file.
:param file: file to read
:param apple_fix: fix wrong Apple tzdata in iCal
:return: decoded (and fixed) iCal data
"""
with open(file, mode='rb') as f:
content = f.read()
if not content:
raise IOError("File %f is not readable or is empty!" % file)
return self.decode(content, apple_fix=apple_fix) | [
"def",
"data_from_file",
"(",
"self",
",",
"file",
",",
"apple_fix",
"=",
"False",
")",
":",
"with",
"open",
"(",
"file",
",",
"mode",
"=",
"'rb'",
")",
"as",
"f",
":",
"content",
"=",
"f",
".",
"read",
"(",
")",
"if",
"not",
"content",
":",
"rai... | Read iCal data from file.
:param file: file to read
:param apple_fix: fix wrong Apple tzdata in iCal
:return: decoded (and fixed) iCal data | [
"Read",
"iCal",
"data",
"from",
"file",
"."
] | bb34c770ae6ffbf4c793e09dc5176344b285f386 | https://github.com/irgangla/icalevents/blob/bb34c770ae6ffbf4c793e09dc5176344b285f386/icalevents/icaldownload.py#L63-L77 | train | 211,850 |
irgangla/icalevents | icalevents/icaldownload.py | ICalDownload.decode | def decode(self, content, apple_fix=False):
"""
Decode content using the set charset.
:param content: content do decode
:param apple_fix: fix Apple txdata bug
:return: decoded (and fixed) content
"""
content = content.decode(self.encoding)
content = content.replace('\r', '')
if apple_fix:
content = apple_data_fix(content)
return content | python | def decode(self, content, apple_fix=False):
"""
Decode content using the set charset.
:param content: content do decode
:param apple_fix: fix Apple txdata bug
:return: decoded (and fixed) content
"""
content = content.decode(self.encoding)
content = content.replace('\r', '')
if apple_fix:
content = apple_data_fix(content)
return content | [
"def",
"decode",
"(",
"self",
",",
"content",
",",
"apple_fix",
"=",
"False",
")",
":",
"content",
"=",
"content",
".",
"decode",
"(",
"self",
".",
"encoding",
")",
"content",
"=",
"content",
".",
"replace",
"(",
"'\\r'",
",",
"''",
")",
"if",
"apple... | Decode content using the set charset.
:param content: content do decode
:param apple_fix: fix Apple txdata bug
:return: decoded (and fixed) content | [
"Decode",
"content",
"using",
"the",
"set",
"charset",
"."
] | bb34c770ae6ffbf4c793e09dc5176344b285f386 | https://github.com/irgangla/icalevents/blob/bb34c770ae6ffbf4c793e09dc5176344b285f386/icalevents/icaldownload.py#L85-L99 | train | 211,851 |
irgangla/icalevents | icalevents/icalparser.py | create_event | def create_event(component, tz=UTC):
"""
Create an event from its iCal representation.
:param component: iCal component
:param tz: timezone for start and end times
:return: event
"""
event = Event()
event.start = normalize(component.get('dtstart').dt, tz=tz)
if component.get('dtend'):
event.end = normalize(component.get('dtend').dt, tz=tz)
elif component.get('duration'): # compute implicit end as start + duration
event.end = event.start + component.get('duration').dt
else: # compute implicit end as start + 0
event.end = event.start
try:
event.summary = str(component.get('summary'))
except UnicodeEncodeError as e:
event.summary = str(component.get('summary').encode('utf-8'))
try:
event.description = str(component.get('description'))
except UnicodeEncodeError as e:
event.description = str(component.get('description').encode('utf-8'))
event.all_day = type(component.get('dtstart').dt) is date
if component.get('rrule'):
event.recurring = True
try:
event.location = str(component.get('location'))
except UnicodeEncodeError as e:
event.location = str(component.get('location').encode('utf-8'))
if component.get('attendee'):
event.attendee = component.get('attendee')
if type(event.attendee) is list:
temp = []
for a in event.attendee:
temp.append(a.encode('utf-8').decode('ascii'))
event.attendee = temp
else:
event.attendee = event.attendee.encode('utf-8').decode('ascii')
if component.get('uid'):
event.uid = component.get('uid').encode('utf-8').decode('ascii')
if component.get('organizer'):
event.organizer = component.get('organizer').encode('utf-8').decode('ascii')
return event | python | def create_event(component, tz=UTC):
"""
Create an event from its iCal representation.
:param component: iCal component
:param tz: timezone for start and end times
:return: event
"""
event = Event()
event.start = normalize(component.get('dtstart').dt, tz=tz)
if component.get('dtend'):
event.end = normalize(component.get('dtend').dt, tz=tz)
elif component.get('duration'): # compute implicit end as start + duration
event.end = event.start + component.get('duration').dt
else: # compute implicit end as start + 0
event.end = event.start
try:
event.summary = str(component.get('summary'))
except UnicodeEncodeError as e:
event.summary = str(component.get('summary').encode('utf-8'))
try:
event.description = str(component.get('description'))
except UnicodeEncodeError as e:
event.description = str(component.get('description').encode('utf-8'))
event.all_day = type(component.get('dtstart').dt) is date
if component.get('rrule'):
event.recurring = True
try:
event.location = str(component.get('location'))
except UnicodeEncodeError as e:
event.location = str(component.get('location').encode('utf-8'))
if component.get('attendee'):
event.attendee = component.get('attendee')
if type(event.attendee) is list:
temp = []
for a in event.attendee:
temp.append(a.encode('utf-8').decode('ascii'))
event.attendee = temp
else:
event.attendee = event.attendee.encode('utf-8').decode('ascii')
if component.get('uid'):
event.uid = component.get('uid').encode('utf-8').decode('ascii')
if component.get('organizer'):
event.organizer = component.get('organizer').encode('utf-8').decode('ascii')
return event | [
"def",
"create_event",
"(",
"component",
",",
"tz",
"=",
"UTC",
")",
":",
"event",
"=",
"Event",
"(",
")",
"event",
".",
"start",
"=",
"normalize",
"(",
"component",
".",
"get",
"(",
"'dtstart'",
")",
".",
"dt",
",",
"tz",
"=",
"tz",
")",
"if",
"... | Create an event from its iCal representation.
:param component: iCal component
:param tz: timezone for start and end times
:return: event | [
"Create",
"an",
"event",
"from",
"its",
"iCal",
"representation",
"."
] | bb34c770ae6ffbf4c793e09dc5176344b285f386 | https://github.com/irgangla/icalevents/blob/bb34c770ae6ffbf4c793e09dc5176344b285f386/icalevents/icalparser.py#L122-L174 | train | 211,852 |
irgangla/icalevents | icalevents/icalparser.py | normalize | def normalize(dt, tz=UTC):
"""
Convert date or datetime to datetime with timezone.
:param dt: date to normalize
:param tz: the normalized date's timezone
:return: date as datetime with timezone
"""
if type(dt) is date:
dt = dt + relativedelta(hour=0)
elif type(dt) is datetime:
pass
else:
raise ValueError("unknown type %s" % type(dt))
if dt.tzinfo:
dt = dt.astimezone(tz)
else:
dt = dt.replace(tzinfo=tz)
return dt | python | def normalize(dt, tz=UTC):
"""
Convert date or datetime to datetime with timezone.
:param dt: date to normalize
:param tz: the normalized date's timezone
:return: date as datetime with timezone
"""
if type(dt) is date:
dt = dt + relativedelta(hour=0)
elif type(dt) is datetime:
pass
else:
raise ValueError("unknown type %s" % type(dt))
if dt.tzinfo:
dt = dt.astimezone(tz)
else:
dt = dt.replace(tzinfo=tz)
return dt | [
"def",
"normalize",
"(",
"dt",
",",
"tz",
"=",
"UTC",
")",
":",
"if",
"type",
"(",
"dt",
")",
"is",
"date",
":",
"dt",
"=",
"dt",
"+",
"relativedelta",
"(",
"hour",
"=",
"0",
")",
"elif",
"type",
"(",
"dt",
")",
"is",
"datetime",
":",
"pass",
... | Convert date or datetime to datetime with timezone.
:param dt: date to normalize
:param tz: the normalized date's timezone
:return: date as datetime with timezone | [
"Convert",
"date",
"or",
"datetime",
"to",
"datetime",
"with",
"timezone",
"."
] | bb34c770ae6ffbf4c793e09dc5176344b285f386 | https://github.com/irgangla/icalevents/blob/bb34c770ae6ffbf4c793e09dc5176344b285f386/icalevents/icalparser.py#L177-L197 | train | 211,853 |
irgangla/icalevents | icalevents/icalparser.py | parse_events | def parse_events(content, start=None, end=None, default_span=timedelta(days=7)):
"""
Query the events occurring in a given time range.
:param content: iCal URL/file content as String
:param start: start date for search, default today
:param end: end date for search
:param default_span: default query length (one week)
:return: events as list
"""
if not start:
start = now()
if not end:
end = start + default_span
if not content:
raise ValueError('Content is invalid!')
calendar = Calendar.from_ical(content)
# Find the calendar's timezone info, or use UTC
for c in calendar.walk():
if c.name == 'VTIMEZONE':
cal_tz = gettz(str(c['TZID']))
break;
else:
cal_tz = UTC
start = normalize(start, cal_tz)
end = normalize(end, cal_tz)
found = []
for component in calendar.walk():
if component.name == "VEVENT":
e = create_event(component)
if e.recurring:
# Unfold recurring events according to their rrule
rule = parse_rrule(component, cal_tz)
dur = e.end - e.start
found.extend(e.copy_to(dt) for dt in rule.between(start - dur, end, inc=True))
elif e.end >= start and e.start <= end:
found.append(e)
return found | python | def parse_events(content, start=None, end=None, default_span=timedelta(days=7)):
"""
Query the events occurring in a given time range.
:param content: iCal URL/file content as String
:param start: start date for search, default today
:param end: end date for search
:param default_span: default query length (one week)
:return: events as list
"""
if not start:
start = now()
if not end:
end = start + default_span
if not content:
raise ValueError('Content is invalid!')
calendar = Calendar.from_ical(content)
# Find the calendar's timezone info, or use UTC
for c in calendar.walk():
if c.name == 'VTIMEZONE':
cal_tz = gettz(str(c['TZID']))
break;
else:
cal_tz = UTC
start = normalize(start, cal_tz)
end = normalize(end, cal_tz)
found = []
for component in calendar.walk():
if component.name == "VEVENT":
e = create_event(component)
if e.recurring:
# Unfold recurring events according to their rrule
rule = parse_rrule(component, cal_tz)
dur = e.end - e.start
found.extend(e.copy_to(dt) for dt in rule.between(start - dur, end, inc=True))
elif e.end >= start and e.start <= end:
found.append(e)
return found | [
"def",
"parse_events",
"(",
"content",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"default_span",
"=",
"timedelta",
"(",
"days",
"=",
"7",
")",
")",
":",
"if",
"not",
"start",
":",
"start",
"=",
"now",
"(",
")",
"if",
"not",
"end",
... | Query the events occurring in a given time range.
:param content: iCal URL/file content as String
:param start: start date for search, default today
:param end: end date for search
:param default_span: default query length (one week)
:return: events as list | [
"Query",
"the",
"events",
"occurring",
"in",
"a",
"given",
"time",
"range",
"."
] | bb34c770ae6ffbf4c793e09dc5176344b285f386 | https://github.com/irgangla/icalevents/blob/bb34c770ae6ffbf4c793e09dc5176344b285f386/icalevents/icalparser.py#L200-L244 | train | 211,854 |
irgangla/icalevents | icalevents/icalparser.py | Event.copy_to | def copy_to(self, new_start=None, uid=None):
"""
Create a new event equal to this with new start date.
:param new_start: new start date
:param uid: UID of new event
:return: new event
"""
if not new_start:
new_start = self.start
if not uid:
uid = "%s_%d" % (self.uid, randint(0, 1000000))
ne = Event()
ne.summary = self.summary
ne.description = self.description
ne.start = new_start
if self.end:
duration = self.end - self.start
ne.end = (new_start + duration)
ne.all_day = self.all_day
ne.recurring = self.recurring
ne.location = self.location
ne.uid = uid
return ne | python | def copy_to(self, new_start=None, uid=None):
"""
Create a new event equal to this with new start date.
:param new_start: new start date
:param uid: UID of new event
:return: new event
"""
if not new_start:
new_start = self.start
if not uid:
uid = "%s_%d" % (self.uid, randint(0, 1000000))
ne = Event()
ne.summary = self.summary
ne.description = self.description
ne.start = new_start
if self.end:
duration = self.end - self.start
ne.end = (new_start + duration)
ne.all_day = self.all_day
ne.recurring = self.recurring
ne.location = self.location
ne.uid = uid
return ne | [
"def",
"copy_to",
"(",
"self",
",",
"new_start",
"=",
"None",
",",
"uid",
"=",
"None",
")",
":",
"if",
"not",
"new_start",
":",
"new_start",
"=",
"self",
".",
"start",
"if",
"not",
"uid",
":",
"uid",
"=",
"\"%s_%d\"",
"%",
"(",
"self",
".",
"uid",
... | Create a new event equal to this with new start date.
:param new_start: new start date
:param uid: UID of new event
:return: new event | [
"Create",
"a",
"new",
"event",
"equal",
"to",
"this",
"with",
"new",
"start",
"date",
"."
] | bb34c770ae6ffbf4c793e09dc5176344b285f386 | https://github.com/irgangla/icalevents/blob/bb34c770ae6ffbf4c793e09dc5176344b285f386/icalevents/icalparser.py#L91-L119 | train | 211,855 |
pyupio/pyup | travis_pypi_setup.py | fetch_public_key | def fetch_public_key(repo):
"""Download RSA public key Travis will use for this repo.
Travis API docs: http://docs.travis-ci.com/api/#repository-keys
"""
keyurl = 'https://api.travis-ci.org/repos/{0}/key'.format(repo)
data = json.loads(urlopen(keyurl).read())
if 'key' not in data:
errmsg = "Could not find public key for repo: {}.\n".format(repo)
errmsg += "Have you already added your GitHub repo to Travis?"
raise ValueError(errmsg)
return data['key'] | python | def fetch_public_key(repo):
"""Download RSA public key Travis will use for this repo.
Travis API docs: http://docs.travis-ci.com/api/#repository-keys
"""
keyurl = 'https://api.travis-ci.org/repos/{0}/key'.format(repo)
data = json.loads(urlopen(keyurl).read())
if 'key' not in data:
errmsg = "Could not find public key for repo: {}.\n".format(repo)
errmsg += "Have you already added your GitHub repo to Travis?"
raise ValueError(errmsg)
return data['key'] | [
"def",
"fetch_public_key",
"(",
"repo",
")",
":",
"keyurl",
"=",
"'https://api.travis-ci.org/repos/{0}/key'",
".",
"format",
"(",
"repo",
")",
"data",
"=",
"json",
".",
"loads",
"(",
"urlopen",
"(",
"keyurl",
")",
".",
"read",
"(",
")",
")",
"if",
"'key'",... | Download RSA public key Travis will use for this repo.
Travis API docs: http://docs.travis-ci.com/api/#repository-keys | [
"Download",
"RSA",
"public",
"key",
"Travis",
"will",
"use",
"for",
"this",
"repo",
"."
] | b20fa88e03cfdf5dc409a9f00d27629188171c31 | https://github.com/pyupio/pyup/blob/b20fa88e03cfdf5dc409a9f00d27629188171c31/travis_pypi_setup.py#L55-L66 | train | 211,856 |
Lvl4Sword/Killer | killer/killer_base.py | KillerBase.kill_the_system | def kill_the_system(self, warning: str):
"""Send an e-mail, and then
shut the system down quickly.
"""
log.critical('Kill reason: ' + warning)
if self.DEBUG:
return
try:
self.mail_this(warning)
except socket.gaierror:
current_time = time.localtime()
formatted_time = time.strftime('%Y-%m-%d %I:%M:%S%p', current_time)
with open(self.config['global']['killer_file'], 'a', encoding='utf-8') as killer_file:
killer_file.write('Time: {0}\nInternet is out.\n'
'Failure: {1}\n\n'.format(formatted_time, warning)) | python | def kill_the_system(self, warning: str):
"""Send an e-mail, and then
shut the system down quickly.
"""
log.critical('Kill reason: ' + warning)
if self.DEBUG:
return
try:
self.mail_this(warning)
except socket.gaierror:
current_time = time.localtime()
formatted_time = time.strftime('%Y-%m-%d %I:%M:%S%p', current_time)
with open(self.config['global']['killer_file'], 'a', encoding='utf-8') as killer_file:
killer_file.write('Time: {0}\nInternet is out.\n'
'Failure: {1}\n\n'.format(formatted_time, warning)) | [
"def",
"kill_the_system",
"(",
"self",
",",
"warning",
":",
"str",
")",
":",
"log",
".",
"critical",
"(",
"'Kill reason: '",
"+",
"warning",
")",
"if",
"self",
".",
"DEBUG",
":",
"return",
"try",
":",
"self",
".",
"mail_this",
"(",
"warning",
")",
"exc... | Send an e-mail, and then
shut the system down quickly. | [
"Send",
"an",
"e",
"-",
"mail",
"and",
"then",
"shut",
"the",
"system",
"down",
"quickly",
"."
] | a6d1d51a62516da0aedead78331331d7f8c49226 | https://github.com/Lvl4Sword/Killer/blob/a6d1d51a62516da0aedead78331331d7f8c49226/killer/killer_base.py#L108-L123 | train | 211,857 |
Lvl4Sword/Killer | killer/killer.py | get_killer | def get_killer(args):
"""Returns a KillerBase instance subclassed based on the OS."""
if POSIX:
log.debug('Platform: POSIX')
from killer.killer_posix import KillerPosix
return KillerPosix(config_path=args.config, debug=args.debug)
elif WINDOWS:
log.debug('Platform: Windows')
from killer.killer_windows import KillerWindows
return KillerWindows(config_path=args.config, debug=args.debug)
else:
# TODO: WSL
# TODO: OSX
# TODO: BSD
raise NotImplementedError("Your platform is not currently supported."
"If you would like support to be added, or "
"if your platform is supported and this is "
"a bug, please open an issue on GitHub!") | python | def get_killer(args):
"""Returns a KillerBase instance subclassed based on the OS."""
if POSIX:
log.debug('Platform: POSIX')
from killer.killer_posix import KillerPosix
return KillerPosix(config_path=args.config, debug=args.debug)
elif WINDOWS:
log.debug('Platform: Windows')
from killer.killer_windows import KillerWindows
return KillerWindows(config_path=args.config, debug=args.debug)
else:
# TODO: WSL
# TODO: OSX
# TODO: BSD
raise NotImplementedError("Your platform is not currently supported."
"If you would like support to be added, or "
"if your platform is supported and this is "
"a bug, please open an issue on GitHub!") | [
"def",
"get_killer",
"(",
"args",
")",
":",
"if",
"POSIX",
":",
"log",
".",
"debug",
"(",
"'Platform: POSIX'",
")",
"from",
"killer",
".",
"killer_posix",
"import",
"KillerPosix",
"return",
"KillerPosix",
"(",
"config_path",
"=",
"args",
".",
"config",
",",
... | Returns a KillerBase instance subclassed based on the OS. | [
"Returns",
"a",
"KillerBase",
"instance",
"subclassed",
"based",
"on",
"the",
"OS",
"."
] | a6d1d51a62516da0aedead78331331d7f8c49226 | https://github.com/Lvl4Sword/Killer/blob/a6d1d51a62516da0aedead78331331d7f8c49226/killer/killer.py#L44-L61 | train | 211,858 |
Lvl4Sword/Killer | killer/posix/power.py | get_devices | def get_devices(device_type: DeviceType) -> Iterator[str]:
"""Gets names of power devices of the specified type.
:param str device_type: the type of the devices to retrieve
:return: the device names
:rtype: Iterator[str]
"""
for device in BASE_PATH.iterdir():
with open(str(Path(device, 'type'))) as type_file:
if type_file.readline().strip() == device_type.value:
yield device.name | python | def get_devices(device_type: DeviceType) -> Iterator[str]:
"""Gets names of power devices of the specified type.
:param str device_type: the type of the devices to retrieve
:return: the device names
:rtype: Iterator[str]
"""
for device in BASE_PATH.iterdir():
with open(str(Path(device, 'type'))) as type_file:
if type_file.readline().strip() == device_type.value:
yield device.name | [
"def",
"get_devices",
"(",
"device_type",
":",
"DeviceType",
")",
"->",
"Iterator",
"[",
"str",
"]",
":",
"for",
"device",
"in",
"BASE_PATH",
".",
"iterdir",
"(",
")",
":",
"with",
"open",
"(",
"str",
"(",
"Path",
"(",
"device",
",",
"'type'",
")",
"... | Gets names of power devices of the specified type.
:param str device_type: the type of the devices to retrieve
:return: the device names
:rtype: Iterator[str] | [
"Gets",
"names",
"of",
"power",
"devices",
"of",
"the",
"specified",
"type",
"."
] | a6d1d51a62516da0aedead78331331d7f8c49226 | https://github.com/Lvl4Sword/Killer/blob/a6d1d51a62516da0aedead78331331d7f8c49226/killer/posix/power.py#L35-L45 | train | 211,859 |
Lvl4Sword/Killer | killer/posix/power.py | _get_property | def _get_property(device_path: Union[Path, str], property_name: str) -> str:
"""Gets the given property for a device."""
with open(str(Path(device_path, property_name))) as file:
return file.readline().strip() | python | def _get_property(device_path: Union[Path, str], property_name: str) -> str:
"""Gets the given property for a device."""
with open(str(Path(device_path, property_name))) as file:
return file.readline().strip() | [
"def",
"_get_property",
"(",
"device_path",
":",
"Union",
"[",
"Path",
",",
"str",
"]",
",",
"property_name",
":",
"str",
")",
"->",
"str",
":",
"with",
"open",
"(",
"str",
"(",
"Path",
"(",
"device_path",
",",
"property_name",
")",
")",
")",
"as",
"... | Gets the given property for a device. | [
"Gets",
"the",
"given",
"property",
"for",
"a",
"device",
"."
] | a6d1d51a62516da0aedead78331331d7f8c49226 | https://github.com/Lvl4Sword/Killer/blob/a6d1d51a62516da0aedead78331331d7f8c49226/killer/posix/power.py#L68-L71 | train | 211,860 |
Lvl4Sword/Killer | killer/windows/power.py | get_power_status | def get_power_status() -> SystemPowerStatus:
"""Retrieves the power status of the system.
The status indicates whether the system is running on AC or DC power,
whether the battery is currently charging, how much battery life remains,
and if battery saver is on or off.
:raises OSError: if the call to GetSystemPowerStatus fails
:return: the power status
:rtype: SystemPowerStatus
"""
get_system_power_status = ctypes.windll.kernel32.GetSystemPowerStatus
get_system_power_status.argtypes = [ctypes.POINTER(SystemPowerStatus)]
get_system_power_status.restype = wintypes.BOOL
status = SystemPowerStatus()
if not get_system_power_status(ctypes.pointer(status)):
raise ctypes.WinError()
else:
return status | python | def get_power_status() -> SystemPowerStatus:
"""Retrieves the power status of the system.
The status indicates whether the system is running on AC or DC power,
whether the battery is currently charging, how much battery life remains,
and if battery saver is on or off.
:raises OSError: if the call to GetSystemPowerStatus fails
:return: the power status
:rtype: SystemPowerStatus
"""
get_system_power_status = ctypes.windll.kernel32.GetSystemPowerStatus
get_system_power_status.argtypes = [ctypes.POINTER(SystemPowerStatus)]
get_system_power_status.restype = wintypes.BOOL
status = SystemPowerStatus()
if not get_system_power_status(ctypes.pointer(status)):
raise ctypes.WinError()
else:
return status | [
"def",
"get_power_status",
"(",
")",
"->",
"SystemPowerStatus",
":",
"get_system_power_status",
"=",
"ctypes",
".",
"windll",
".",
"kernel32",
".",
"GetSystemPowerStatus",
"get_system_power_status",
".",
"argtypes",
"=",
"[",
"ctypes",
".",
"POINTER",
"(",
"SystemPo... | Retrieves the power status of the system.
The status indicates whether the system is running on AC or DC power,
whether the battery is currently charging, how much battery life remains,
and if battery saver is on or off.
:raises OSError: if the call to GetSystemPowerStatus fails
:return: the power status
:rtype: SystemPowerStatus | [
"Retrieves",
"the",
"power",
"status",
"of",
"the",
"system",
"."
] | a6d1d51a62516da0aedead78331331d7f8c49226 | https://github.com/Lvl4Sword/Killer/blob/a6d1d51a62516da0aedead78331331d7f8c49226/killer/windows/power.py#L54-L73 | train | 211,861 |
PagerDuty/pagerduty-api-python-client | pypd/models/ability.py | abilities | def abilities(api_key=None, add_headers=None):
"""Fetch a list of permission-like strings for this account."""
client = ClientMixin(api_key=api_key)
result = client.request('GET', endpoint='abilities',
add_headers=add_headers,)
return result['abilities'] | python | def abilities(api_key=None, add_headers=None):
"""Fetch a list of permission-like strings for this account."""
client = ClientMixin(api_key=api_key)
result = client.request('GET', endpoint='abilities',
add_headers=add_headers,)
return result['abilities'] | [
"def",
"abilities",
"(",
"api_key",
"=",
"None",
",",
"add_headers",
"=",
"None",
")",
":",
"client",
"=",
"ClientMixin",
"(",
"api_key",
"=",
"api_key",
")",
"result",
"=",
"client",
".",
"request",
"(",
"'GET'",
",",
"endpoint",
"=",
"'abilities'",
","... | Fetch a list of permission-like strings for this account. | [
"Fetch",
"a",
"list",
"of",
"permission",
"-",
"like",
"strings",
"for",
"this",
"account",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/ability.py#L13-L18 | train | 211,862 |
PagerDuty/pagerduty-api-python-client | pypd/models/ability.py | can | def can(ability, add_headers=None):
"""Test whether an ability is allowed."""
client = ClientMixin(api_key=None)
try:
client.request('GET', endpoint='abilities/%s' % ability,
add_headers=add_headers)
return True
except Exception:
pass
return False | python | def can(ability, add_headers=None):
"""Test whether an ability is allowed."""
client = ClientMixin(api_key=None)
try:
client.request('GET', endpoint='abilities/%s' % ability,
add_headers=add_headers)
return True
except Exception:
pass
return False | [
"def",
"can",
"(",
"ability",
",",
"add_headers",
"=",
"None",
")",
":",
"client",
"=",
"ClientMixin",
"(",
"api_key",
"=",
"None",
")",
"try",
":",
"client",
".",
"request",
"(",
"'GET'",
",",
"endpoint",
"=",
"'abilities/%s'",
"%",
"ability",
",",
"a... | Test whether an ability is allowed. | [
"Test",
"whether",
"an",
"ability",
"is",
"allowed",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/ability.py#L21-L30 | train | 211,863 |
PagerDuty/pagerduty-api-python-client | pypd/__init__.py | set_api_key_from_file | def set_api_key_from_file(path, set_global=True):
"""Set the global api_key from a file path."""
with open(path, 'r+b') as f:
global api_key
api_key = f.read().strip()
return api_key | python | def set_api_key_from_file(path, set_global=True):
"""Set the global api_key from a file path."""
with open(path, 'r+b') as f:
global api_key
api_key = f.read().strip()
return api_key | [
"def",
"set_api_key_from_file",
"(",
"path",
",",
"set_global",
"=",
"True",
")",
":",
"with",
"open",
"(",
"path",
",",
"'r+b'",
")",
"as",
"f",
":",
"global",
"api_key",
"api_key",
"=",
"f",
".",
"read",
"(",
")",
".",
"strip",
"(",
")",
"return",
... | Set the global api_key from a file path. | [
"Set",
"the",
"global",
"api_key",
"from",
"a",
"file",
"path",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/__init__.py#L29-L34 | train | 211,864 |
PagerDuty/pagerduty-api-python-client | pypd/models/incident.py | Incident.resolve | def resolve(self, from_email, resolution=None):
"""Resolve an incident using a valid email address."""
if from_email is None or not isinstance(from_email, six.string_types):
raise MissingFromEmail(from_email)
endpoint = '/'.join((self.endpoint, self.id,))
add_headers = {'from': from_email, }
data = {
'incident': {
'type': 'incident',
'status': 'resolved',
}
}
if resolution is not None:
data['resolution'] = resolution
result = self.request('PUT',
endpoint=endpoint,
add_headers=add_headers,
data=data,)
return result | python | def resolve(self, from_email, resolution=None):
"""Resolve an incident using a valid email address."""
if from_email is None or not isinstance(from_email, six.string_types):
raise MissingFromEmail(from_email)
endpoint = '/'.join((self.endpoint, self.id,))
add_headers = {'from': from_email, }
data = {
'incident': {
'type': 'incident',
'status': 'resolved',
}
}
if resolution is not None:
data['resolution'] = resolution
result = self.request('PUT',
endpoint=endpoint,
add_headers=add_headers,
data=data,)
return result | [
"def",
"resolve",
"(",
"self",
",",
"from_email",
",",
"resolution",
"=",
"None",
")",
":",
"if",
"from_email",
"is",
"None",
"or",
"not",
"isinstance",
"(",
"from_email",
",",
"six",
".",
"string_types",
")",
":",
"raise",
"MissingFromEmail",
"(",
"from_e... | Resolve an incident using a valid email address. | [
"Resolve",
"an",
"incident",
"using",
"a",
"valid",
"email",
"address",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/incident.py#L23-L44 | train | 211,865 |
PagerDuty/pagerduty-api-python-client | pypd/models/incident.py | Incident.reassign | def reassign(self, from_email, user_ids):
"""Reassign an incident to other users using a valid email address."""
endpoint = '/'.join((self.endpoint, self.id,))
if from_email is None or not isinstance(from_email, six.string_types):
raise MissingFromEmail(from_email)
if user_ids is None or not isinstance(user_ids, list):
raise InvalidArguments(user_ids)
if not all([isinstance(i, six.string_types) for i in user_ids]):
raise InvalidArguments(user_ids)
assignees = [
{
'assignee': {
'id': user_id,
'type': 'user_reference',
}
}
for user_id in user_ids
]
add_headers = {'from': from_email, }
data = {
'incident': {
'type': 'incident',
'assignments': assignees,
}
}
result = self.request('PUT',
endpoint=endpoint,
add_headers=add_headers,
data=data,)
return result | python | def reassign(self, from_email, user_ids):
"""Reassign an incident to other users using a valid email address."""
endpoint = '/'.join((self.endpoint, self.id,))
if from_email is None or not isinstance(from_email, six.string_types):
raise MissingFromEmail(from_email)
if user_ids is None or not isinstance(user_ids, list):
raise InvalidArguments(user_ids)
if not all([isinstance(i, six.string_types) for i in user_ids]):
raise InvalidArguments(user_ids)
assignees = [
{
'assignee': {
'id': user_id,
'type': 'user_reference',
}
}
for user_id in user_ids
]
add_headers = {'from': from_email, }
data = {
'incident': {
'type': 'incident',
'assignments': assignees,
}
}
result = self.request('PUT',
endpoint=endpoint,
add_headers=add_headers,
data=data,)
return result | [
"def",
"reassign",
"(",
"self",
",",
"from_email",
",",
"user_ids",
")",
":",
"endpoint",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"endpoint",
",",
"self",
".",
"id",
",",
")",
")",
"if",
"from_email",
"is",
"None",
"or",
"not",
"isinstance",
... | Reassign an incident to other users using a valid email address. | [
"Reassign",
"an",
"incident",
"to",
"other",
"users",
"using",
"a",
"valid",
"email",
"address",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/incident.py#L67-L101 | train | 211,866 |
PagerDuty/pagerduty-api-python-client | pypd/models/incident.py | Incident.log_entries | def log_entries(self, time_zone='UTC', is_overview=False,
include=None, fetch_all=True):
"""Query for log entries on an incident instance."""
endpoint = '/'.join((self.endpoint, self.id, 'log_entries'))
query_params = {
'time_zone': time_zone,
'is_overview': json.dumps(is_overview),
}
if include:
query_params['include'] = include
result = self.logEntryFactory.find(
endpoint=endpoint,
api_key=self.api_key,
fetch_all=fetch_all,
**query_params
)
return result | python | def log_entries(self, time_zone='UTC', is_overview=False,
include=None, fetch_all=True):
"""Query for log entries on an incident instance."""
endpoint = '/'.join((self.endpoint, self.id, 'log_entries'))
query_params = {
'time_zone': time_zone,
'is_overview': json.dumps(is_overview),
}
if include:
query_params['include'] = include
result = self.logEntryFactory.find(
endpoint=endpoint,
api_key=self.api_key,
fetch_all=fetch_all,
**query_params
)
return result | [
"def",
"log_entries",
"(",
"self",
",",
"time_zone",
"=",
"'UTC'",
",",
"is_overview",
"=",
"False",
",",
"include",
"=",
"None",
",",
"fetch_all",
"=",
"True",
")",
":",
"endpoint",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"endpoint",
",",
"s... | Query for log entries on an incident instance. | [
"Query",
"for",
"log",
"entries",
"on",
"an",
"incident",
"instance",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/incident.py#L103-L123 | train | 211,867 |
PagerDuty/pagerduty-api-python-client | pypd/models/incident.py | Incident.notes | def notes(self):
"""Query for notes attached to this incident."""
endpoint = '/'.join((self.endpoint, self.id, 'notes'))
return self.noteFactory.find(
endpoint=endpoint,
api_key=self.api_key,
) | python | def notes(self):
"""Query for notes attached to this incident."""
endpoint = '/'.join((self.endpoint, self.id, 'notes'))
return self.noteFactory.find(
endpoint=endpoint,
api_key=self.api_key,
) | [
"def",
"notes",
"(",
"self",
")",
":",
"endpoint",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"endpoint",
",",
"self",
".",
"id",
",",
"'notes'",
")",
")",
"return",
"self",
".",
"noteFactory",
".",
"find",
"(",
"endpoint",
"=",
"endpoint",
",... | Query for notes attached to this incident. | [
"Query",
"for",
"notes",
"attached",
"to",
"this",
"incident",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/incident.py#L129-L135 | train | 211,868 |
PagerDuty/pagerduty-api-python-client | pypd/models/incident.py | Incident.create_note | def create_note(self, from_email, content):
"""Create a note for this incident."""
if from_email is None or not isinstance(from_email, six.string_types):
raise MissingFromEmail(from_email)
endpoint = '/'.join((self.endpoint, self.id, 'notes'))
add_headers = {'from': from_email, }
return self.noteFactory.create(
endpoint=endpoint,
api_key=self.api_key,
add_headers=add_headers,
data={'content': content},
) | python | def create_note(self, from_email, content):
"""Create a note for this incident."""
if from_email is None or not isinstance(from_email, six.string_types):
raise MissingFromEmail(from_email)
endpoint = '/'.join((self.endpoint, self.id, 'notes'))
add_headers = {'from': from_email, }
return self.noteFactory.create(
endpoint=endpoint,
api_key=self.api_key,
add_headers=add_headers,
data={'content': content},
) | [
"def",
"create_note",
"(",
"self",
",",
"from_email",
",",
"content",
")",
":",
"if",
"from_email",
"is",
"None",
"or",
"not",
"isinstance",
"(",
"from_email",
",",
"six",
".",
"string_types",
")",
":",
"raise",
"MissingFromEmail",
"(",
"from_email",
")",
... | Create a note for this incident. | [
"Create",
"a",
"note",
"for",
"this",
"incident",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/incident.py#L137-L150 | train | 211,869 |
PagerDuty/pagerduty-api-python-client | pypd/models/incident.py | Incident.snooze | def snooze(self, from_email, duration):
"""Snooze this incident for `duration` seconds."""
if from_email is None or not isinstance(from_email, six.string_types):
raise MissingFromEmail(from_email)
endpoint = '/'.join((self.endpoint, self.id, 'snooze'))
add_headers = {'from': from_email, }
return self.__class__.create(
endpoint=endpoint,
api_key=self.api_key,
add_headers=add_headers,
data_key='duration',
data=duration,
) | python | def snooze(self, from_email, duration):
"""Snooze this incident for `duration` seconds."""
if from_email is None or not isinstance(from_email, six.string_types):
raise MissingFromEmail(from_email)
endpoint = '/'.join((self.endpoint, self.id, 'snooze'))
add_headers = {'from': from_email, }
return self.__class__.create(
endpoint=endpoint,
api_key=self.api_key,
add_headers=add_headers,
data_key='duration',
data=duration,
) | [
"def",
"snooze",
"(",
"self",
",",
"from_email",
",",
"duration",
")",
":",
"if",
"from_email",
"is",
"None",
"or",
"not",
"isinstance",
"(",
"from_email",
",",
"six",
".",
"string_types",
")",
":",
"raise",
"MissingFromEmail",
"(",
"from_email",
")",
"end... | Snooze this incident for `duration` seconds. | [
"Snooze",
"this",
"incident",
"for",
"duration",
"seconds",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/incident.py#L152-L166 | train | 211,870 |
PagerDuty/pagerduty-api-python-client | pypd/models/incident.py | Incident.merge | def merge(self, from_email, source_incidents):
"""Merge other incidents into this incident."""
if from_email is None or not isinstance(from_email, six.string_types):
raise MissingFromEmail(from_email)
add_headers = {'from': from_email, }
endpoint = '/'.join((self.endpoint, self.id, 'merge'))
incident_ids = [entity['id'] if isinstance(entity, Entity) else entity
for entity in source_incidents]
incident_references = [{'type': 'incident_reference', 'id': id_}
for id_ in incident_ids]
return self.__class__.create(
endpoint=endpoint,
api_key=self.api_key,
add_headers=add_headers,
data_key='source_incidents',
data=incident_references,
method='PUT',
) | python | def merge(self, from_email, source_incidents):
"""Merge other incidents into this incident."""
if from_email is None or not isinstance(from_email, six.string_types):
raise MissingFromEmail(from_email)
add_headers = {'from': from_email, }
endpoint = '/'.join((self.endpoint, self.id, 'merge'))
incident_ids = [entity['id'] if isinstance(entity, Entity) else entity
for entity in source_incidents]
incident_references = [{'type': 'incident_reference', 'id': id_}
for id_ in incident_ids]
return self.__class__.create(
endpoint=endpoint,
api_key=self.api_key,
add_headers=add_headers,
data_key='source_incidents',
data=incident_references,
method='PUT',
) | [
"def",
"merge",
"(",
"self",
",",
"from_email",
",",
"source_incidents",
")",
":",
"if",
"from_email",
"is",
"None",
"or",
"not",
"isinstance",
"(",
"from_email",
",",
"six",
".",
"string_types",
")",
":",
"raise",
"MissingFromEmail",
"(",
"from_email",
")",... | Merge other incidents into this incident. | [
"Merge",
"other",
"incidents",
"into",
"this",
"incident",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/incident.py#L168-L187 | train | 211,871 |
PagerDuty/pagerduty-api-python-client | pypd/models/incident.py | Incident.alerts | def alerts(self):
"""Query for alerts attached to this incident."""
endpoint = '/'.join((self.endpoint, self.id, 'alerts'))
return self.alertFactory.find(
endpoint=endpoint,
api_key=self.api_key,
) | python | def alerts(self):
"""Query for alerts attached to this incident."""
endpoint = '/'.join((self.endpoint, self.id, 'alerts'))
return self.alertFactory.find(
endpoint=endpoint,
api_key=self.api_key,
) | [
"def",
"alerts",
"(",
"self",
")",
":",
"endpoint",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"endpoint",
",",
"self",
".",
"id",
",",
"'alerts'",
")",
")",
"return",
"self",
".",
"alertFactory",
".",
"find",
"(",
"endpoint",
"=",
"endpoint",
... | Query for alerts attached to this incident. | [
"Query",
"for",
"alerts",
"attached",
"to",
"this",
"incident",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/incident.py#L189-L195 | train | 211,872 |
PagerDuty/pagerduty-api-python-client | pypd/models/notification.py | Notification.find | def find(cls, *args, **kwargs):
"""
Find notifications.
Optional kwargs are:
since:
datetime instance
until:
datetime instance
If not specified, until will default to now(), and since will default
to 30 days prior to until.
As per PD spec, date range must not exceed 1 month.
"""
seconds = 60 * 60 * 24 * 30 # seconds in 30 days
until = kwargs.pop('until', None)
since = kwargs.pop('since', None)
if until is None:
until = datetime.datetime.now()
if since is None:
since = until - datetime.timedelta(seconds=seconds)
dt = until - since
if dt > datetime.timedelta(seconds=seconds):
raise InvalidArguments(until, since)
kwargs['since'] = since.isoformat()
kwargs['until'] = until.isoformat()
return getattr(Entity, 'find').__func__(cls, *args, **kwargs) | python | def find(cls, *args, **kwargs):
"""
Find notifications.
Optional kwargs are:
since:
datetime instance
until:
datetime instance
If not specified, until will default to now(), and since will default
to 30 days prior to until.
As per PD spec, date range must not exceed 1 month.
"""
seconds = 60 * 60 * 24 * 30 # seconds in 30 days
until = kwargs.pop('until', None)
since = kwargs.pop('since', None)
if until is None:
until = datetime.datetime.now()
if since is None:
since = until - datetime.timedelta(seconds=seconds)
dt = until - since
if dt > datetime.timedelta(seconds=seconds):
raise InvalidArguments(until, since)
kwargs['since'] = since.isoformat()
kwargs['until'] = until.isoformat()
return getattr(Entity, 'find').__func__(cls, *args, **kwargs) | [
"def",
"find",
"(",
"cls",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"seconds",
"=",
"60",
"*",
"60",
"*",
"24",
"*",
"30",
"# seconds in 30 days",
"until",
"=",
"kwargs",
".",
"pop",
"(",
"'until'",
",",
"None",
")",
"since",
"=",
"kw... | Find notifications.
Optional kwargs are:
since:
datetime instance
until:
datetime instance
If not specified, until will default to now(), and since will default
to 30 days prior to until.
As per PD spec, date range must not exceed 1 month. | [
"Find",
"notifications",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/notification.py#L14-L46 | train | 211,873 |
PagerDuty/pagerduty-api-python-client | pypd/models/escalation_policy.py | EscalationPolicy.services | def services(self):
"""Fetch all instances of services for this EP."""
ids = [ref['id'] for ref in self['services']]
return [Service.fetch(id) for id in ids] | python | def services(self):
"""Fetch all instances of services for this EP."""
ids = [ref['id'] for ref in self['services']]
return [Service.fetch(id) for id in ids] | [
"def",
"services",
"(",
"self",
")",
":",
"ids",
"=",
"[",
"ref",
"[",
"'id'",
"]",
"for",
"ref",
"in",
"self",
"[",
"'services'",
"]",
"]",
"return",
"[",
"Service",
".",
"fetch",
"(",
"id",
")",
"for",
"id",
"in",
"ids",
"]"
] | Fetch all instances of services for this EP. | [
"Fetch",
"all",
"instances",
"of",
"services",
"for",
"this",
"EP",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/escalation_policy.py#L13-L16 | train | 211,874 |
PagerDuty/pagerduty-api-python-client | pypd/models/alert.py | Alert.fetch | def fetch(cls, id, incident=None, endpoint=None, *args, **kwargs):
"""Customize fetch because this is a nested resource."""
if incident is None and endpoint is None:
raise InvalidArguments(incident, endpoint)
if endpoint is None:
iid = incident['id'] if isinstance(incident, Entity) else incident
endpoint = 'incidents/{0}/alerts'.format(iid)
return getattr(Entity, 'fetch').__func__(cls, id, endpoint=endpoint,
*args, **kwargs) | python | def fetch(cls, id, incident=None, endpoint=None, *args, **kwargs):
"""Customize fetch because this is a nested resource."""
if incident is None and endpoint is None:
raise InvalidArguments(incident, endpoint)
if endpoint is None:
iid = incident['id'] if isinstance(incident, Entity) else incident
endpoint = 'incidents/{0}/alerts'.format(iid)
return getattr(Entity, 'fetch').__func__(cls, id, endpoint=endpoint,
*args, **kwargs) | [
"def",
"fetch",
"(",
"cls",
",",
"id",
",",
"incident",
"=",
"None",
",",
"endpoint",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"incident",
"is",
"None",
"and",
"endpoint",
"is",
"None",
":",
"raise",
"InvalidArguments",... | Customize fetch because this is a nested resource. | [
"Customize",
"fetch",
"because",
"this",
"is",
"a",
"nested",
"resource",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/alert.py#L11-L21 | train | 211,875 |
PagerDuty/pagerduty-api-python-client | pypd/models/alert.py | Alert.resolve | def resolve(self, from_email):
"""Resolve an alert using a valid email address."""
if from_email is None or not isinstance(from_email, six.string_types):
raise MissingFromEmail(from_email)
parent_incident_id = self['incident']['id']
endpoint_format = 'incidents/{0}/alerts/{1}'
endpoint = endpoint_format.format(parent_incident_id, self['id'])
add_headers = {'from': from_email, }
data = {
'alert': {
'id': self['id'],
'type': 'alert',
'status': 'resolved',
}
}
result = self.request('PUT',
endpoint=endpoint,
add_headers=add_headers,
data=data,)
return result | python | def resolve(self, from_email):
"""Resolve an alert using a valid email address."""
if from_email is None or not isinstance(from_email, six.string_types):
raise MissingFromEmail(from_email)
parent_incident_id = self['incident']['id']
endpoint_format = 'incidents/{0}/alerts/{1}'
endpoint = endpoint_format.format(parent_incident_id, self['id'])
add_headers = {'from': from_email, }
data = {
'alert': {
'id': self['id'],
'type': 'alert',
'status': 'resolved',
}
}
result = self.request('PUT',
endpoint=endpoint,
add_headers=add_headers,
data=data,)
return result | [
"def",
"resolve",
"(",
"self",
",",
"from_email",
")",
":",
"if",
"from_email",
"is",
"None",
"or",
"not",
"isinstance",
"(",
"from_email",
",",
"six",
".",
"string_types",
")",
":",
"raise",
"MissingFromEmail",
"(",
"from_email",
")",
"parent_incident_id",
... | Resolve an alert using a valid email address. | [
"Resolve",
"an",
"alert",
"using",
"a",
"valid",
"email",
"address",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/alert.py#L23-L45 | train | 211,876 |
PagerDuty/pagerduty-api-python-client | pypd/models/alert.py | Alert.associate | def associate(self, from_email, new_parent_incident=None):
"""Associate an alert with an incident using a valid email address."""
if from_email is None or not isinstance(from_email, six.string_types):
raise MissingFromEmail(from_email)
if new_parent_incident is None:
raise InvalidArguments(new_parent_incident)
parent_incident_id = self['incident']['id']
endpoint_format = 'incidents/{0}/alerts/{1}'
endpoint = endpoint_format.format(parent_incident_id, self['id'])
if isinstance(new_parent_incident, Entity):
new_parent_incident_id = new_parent_incident['id']
else:
new_parent_incident_id = new_parent_incident
add_headers = {'from': from_email, }
data = {
'alert': {
'id': self['id'],
'type': 'alert',
'incident': {
'type': 'incident',
'id': new_parent_incident_id,
}
}
}
result = self.request('PUT',
endpoint=endpoint,
add_headers=add_headers,
data=data,)
return result | python | def associate(self, from_email, new_parent_incident=None):
"""Associate an alert with an incident using a valid email address."""
if from_email is None or not isinstance(from_email, six.string_types):
raise MissingFromEmail(from_email)
if new_parent_incident is None:
raise InvalidArguments(new_parent_incident)
parent_incident_id = self['incident']['id']
endpoint_format = 'incidents/{0}/alerts/{1}'
endpoint = endpoint_format.format(parent_incident_id, self['id'])
if isinstance(new_parent_incident, Entity):
new_parent_incident_id = new_parent_incident['id']
else:
new_parent_incident_id = new_parent_incident
add_headers = {'from': from_email, }
data = {
'alert': {
'id': self['id'],
'type': 'alert',
'incident': {
'type': 'incident',
'id': new_parent_incident_id,
}
}
}
result = self.request('PUT',
endpoint=endpoint,
add_headers=add_headers,
data=data,)
return result | [
"def",
"associate",
"(",
"self",
",",
"from_email",
",",
"new_parent_incident",
"=",
"None",
")",
":",
"if",
"from_email",
"is",
"None",
"or",
"not",
"isinstance",
"(",
"from_email",
",",
"six",
".",
"string_types",
")",
":",
"raise",
"MissingFromEmail",
"("... | Associate an alert with an incident using a valid email address. | [
"Associate",
"an",
"alert",
"with",
"an",
"incident",
"using",
"a",
"valid",
"email",
"address",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/alert.py#L47-L80 | train | 211,877 |
PagerDuty/pagerduty-api-python-client | pypd/models/integration.py | Integration.fetch | def fetch(cls, id, service=None, endpoint=None, *args, **kwargs):
"""Customize fetch because it lives on a special endpoint."""
if service is None and endpoint is None:
raise InvalidArguments(service, endpoint)
if endpoint is None:
sid = service['id'] if isinstance(service, Entity) else service
endpoint = 'services/{0}/integrations'.format(sid)
return getattr(Entity, 'fetch').__func__(cls, id, endpoint=endpoint,
*args, **kwargs) | python | def fetch(cls, id, service=None, endpoint=None, *args, **kwargs):
"""Customize fetch because it lives on a special endpoint."""
if service is None and endpoint is None:
raise InvalidArguments(service, endpoint)
if endpoint is None:
sid = service['id'] if isinstance(service, Entity) else service
endpoint = 'services/{0}/integrations'.format(sid)
return getattr(Entity, 'fetch').__func__(cls, id, endpoint=endpoint,
*args, **kwargs) | [
"def",
"fetch",
"(",
"cls",
",",
"id",
",",
"service",
"=",
"None",
",",
"endpoint",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"service",
"is",
"None",
"and",
"endpoint",
"is",
"None",
":",
"raise",
"InvalidArguments",
... | Customize fetch because it lives on a special endpoint. | [
"Customize",
"fetch",
"because",
"it",
"lives",
"on",
"a",
"special",
"endpoint",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/integration.py#L39-L49 | train | 211,878 |
PagerDuty/pagerduty-api-python-client | pypd/models/integration.py | Integration.create | def create(cls, service=None, endpoint=None, data=None, *args, **kwargs):
"""
Create an integration within the scope of an service.
Make sure that they should reasonably be able to query with an
service or endpoint that knows about an service.
"""
cls.validate(data)
if service is None and endpoint is None:
raise InvalidArguments(service, endpoint)
if endpoint is None:
sid = service['id'] if isinstance(service, Entity) else service
endpoint = 'services/{0}/integrations'.format(sid)
# otherwise endpoint should contain the service path too
return getattr(Entity, 'create').__func__(cls, endpoint=endpoint,
data=data, *args, **kwargs) | python | def create(cls, service=None, endpoint=None, data=None, *args, **kwargs):
"""
Create an integration within the scope of an service.
Make sure that they should reasonably be able to query with an
service or endpoint that knows about an service.
"""
cls.validate(data)
if service is None and endpoint is None:
raise InvalidArguments(service, endpoint)
if endpoint is None:
sid = service['id'] if isinstance(service, Entity) else service
endpoint = 'services/{0}/integrations'.format(sid)
# otherwise endpoint should contain the service path too
return getattr(Entity, 'create').__func__(cls, endpoint=endpoint,
data=data, *args, **kwargs) | [
"def",
"create",
"(",
"cls",
",",
"service",
"=",
"None",
",",
"endpoint",
"=",
"None",
",",
"data",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"cls",
".",
"validate",
"(",
"data",
")",
"if",
"service",
"is",
"None",
"and",... | Create an integration within the scope of an service.
Make sure that they should reasonably be able to query with an
service or endpoint that knows about an service. | [
"Create",
"an",
"integration",
"within",
"the",
"scope",
"of",
"an",
"service",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/integration.py#L63-L80 | train | 211,879 |
PagerDuty/pagerduty-api-python-client | pypd/models/schedule.py | Schedule.get_oncall | def get_oncall(self, **kwargs):
"""Retrieve this schedule's "on call" users."""
endpoint = '/'.join((self.endpoint, self.id, 'users'))
return self.request('GET', endpoint=endpoint, query_params=kwargs) | python | def get_oncall(self, **kwargs):
"""Retrieve this schedule's "on call" users."""
endpoint = '/'.join((self.endpoint, self.id, 'users'))
return self.request('GET', endpoint=endpoint, query_params=kwargs) | [
"def",
"get_oncall",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"endpoint",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"endpoint",
",",
"self",
".",
"id",
",",
"'users'",
")",
")",
"return",
"self",
".",
"request",
"(",
"'GET'",
",",
"e... | Retrieve this schedule's "on call" users. | [
"Retrieve",
"this",
"schedule",
"s",
"on",
"call",
"users",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/schedule.py#L9-L14 | train | 211,880 |
PagerDuty/pagerduty-api-python-client | pypd/mixins.py | ClientMixin._do_request | def _do_request(self, method, *args, **kwargs):
"""
Modularized because API was broken.
Need to be able to inject Mocked response objects here.
"""
log('Doing HTTP [{3}] request: {0} - headers: {1} - payload: {2}'.format(
args[0], kwargs.get('headers'), kwargs.get('json'), method,),
level=logging.DEBUG,)
requests_method = getattr(requests, method)
return self._handle_response(requests_method(*args, **kwargs)) | python | def _do_request(self, method, *args, **kwargs):
"""
Modularized because API was broken.
Need to be able to inject Mocked response objects here.
"""
log('Doing HTTP [{3}] request: {0} - headers: {1} - payload: {2}'.format(
args[0], kwargs.get('headers'), kwargs.get('json'), method,),
level=logging.DEBUG,)
requests_method = getattr(requests, method)
return self._handle_response(requests_method(*args, **kwargs)) | [
"def",
"_do_request",
"(",
"self",
",",
"method",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"log",
"(",
"'Doing HTTP [{3}] request: {0} - headers: {1} - payload: {2}'",
".",
"format",
"(",
"args",
"[",
"0",
"]",
",",
"kwargs",
".",
"get",
"(",
"... | Modularized because API was broken.
Need to be able to inject Mocked response objects here. | [
"Modularized",
"because",
"API",
"was",
"broken",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/mixins.py#L62-L72 | train | 211,881 |
PagerDuty/pagerduty-api-python-client | pypd/models/entity.py | Entity.sanitize_ep | def sanitize_ep(endpoint, plural=False):
"""
Sanitize an endpoint to a singular or plural form.
Used mostly for convenience in the `_parse` method to grab the raw
data from queried datasets.
XXX: this is el cheapo (no bastante bien)
"""
# if we need a plural endpoint (acessing lists)
if plural:
if endpoint.endswith('y'):
endpoint = endpoint[:-1] + 'ies'
elif not endpoint.endswith('s'):
endpoint += 's'
else:
# otherwise make sure it's singular form
if endpoint.endswith('ies'):
endpoint = endpoint[:-3] + 'y'
elif endpoint.endswith('s'):
endpoint = endpoint[:-1]
return endpoint | python | def sanitize_ep(endpoint, plural=False):
"""
Sanitize an endpoint to a singular or plural form.
Used mostly for convenience in the `_parse` method to grab the raw
data from queried datasets.
XXX: this is el cheapo (no bastante bien)
"""
# if we need a plural endpoint (acessing lists)
if plural:
if endpoint.endswith('y'):
endpoint = endpoint[:-1] + 'ies'
elif not endpoint.endswith('s'):
endpoint += 's'
else:
# otherwise make sure it's singular form
if endpoint.endswith('ies'):
endpoint = endpoint[:-3] + 'y'
elif endpoint.endswith('s'):
endpoint = endpoint[:-1]
return endpoint | [
"def",
"sanitize_ep",
"(",
"endpoint",
",",
"plural",
"=",
"False",
")",
":",
"# if we need a plural endpoint (acessing lists)",
"if",
"plural",
":",
"if",
"endpoint",
".",
"endswith",
"(",
"'y'",
")",
":",
"endpoint",
"=",
"endpoint",
"[",
":",
"-",
"1",
"]... | Sanitize an endpoint to a singular or plural form.
Used mostly for convenience in the `_parse` method to grab the raw
data from queried datasets.
XXX: this is el cheapo (no bastante bien) | [
"Sanitize",
"an",
"endpoint",
"to",
"a",
"singular",
"or",
"plural",
"form",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/entity.py#L116-L138 | train | 211,882 |
PagerDuty/pagerduty-api-python-client | pypd/models/entity.py | Entity.get_endpoint | def get_endpoint(cls):
"""
Accessor method to enable omition of endpoint name.
In general we want the class name to be translated to endpoint name,
this way unless otherwise specified will translate class name to
endpoint name.
"""
if cls.endpoint is not None:
return cls.endpoint
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', cls.__name__)
return cls.sanitize_ep(
re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower(),
plural=True
) | python | def get_endpoint(cls):
"""
Accessor method to enable omition of endpoint name.
In general we want the class name to be translated to endpoint name,
this way unless otherwise specified will translate class name to
endpoint name.
"""
if cls.endpoint is not None:
return cls.endpoint
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', cls.__name__)
return cls.sanitize_ep(
re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower(),
plural=True
) | [
"def",
"get_endpoint",
"(",
"cls",
")",
":",
"if",
"cls",
".",
"endpoint",
"is",
"not",
"None",
":",
"return",
"cls",
".",
"endpoint",
"s1",
"=",
"re",
".",
"sub",
"(",
"'(.)([A-Z][a-z]+)'",
",",
"r'\\1_\\2'",
",",
"cls",
".",
"__name__",
")",
"return"... | Accessor method to enable omition of endpoint name.
In general we want the class name to be translated to endpoint name,
this way unless otherwise specified will translate class name to
endpoint name. | [
"Accessor",
"method",
"to",
"enable",
"omition",
"of",
"endpoint",
"name",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/entity.py#L141-L155 | train | 211,883 |
PagerDuty/pagerduty-api-python-client | pypd/models/entity.py | Entity._fetch_all | def _fetch_all(cls, api_key, endpoint=None, offset=0, limit=25, **kwargs):
"""
Call `self._fetch_page` for as many pages as exist.
TODO: should be extended to do async page fetches if API allows it via
exposing total value.
Returns a list of `cls` instances.
"""
output = []
qp = kwargs.copy()
limit = max(1, min(100, limit))
maximum = kwargs.get('maximum')
qp['limit'] = min(limit, maximum) if maximum is not None else limit
qp['offset'] = offset
more, total = None, None
while True:
entities, options = cls._fetch_page(
api_key=api_key, endpoint=endpoint, **qp
)
output += entities
more = options.get('more')
limit = options.get('limit')
offset = options.get('offset')
total = options.get('total')
if more is None:
if total is None or offset is None:
break
more = (limit + offset) < total
if not more or (maximum is not None and len(output) >= maximum):
break
qp['limit'] = limit
qp['offset'] = offset + limit
return output | python | def _fetch_all(cls, api_key, endpoint=None, offset=0, limit=25, **kwargs):
"""
Call `self._fetch_page` for as many pages as exist.
TODO: should be extended to do async page fetches if API allows it via
exposing total value.
Returns a list of `cls` instances.
"""
output = []
qp = kwargs.copy()
limit = max(1, min(100, limit))
maximum = kwargs.get('maximum')
qp['limit'] = min(limit, maximum) if maximum is not None else limit
qp['offset'] = offset
more, total = None, None
while True:
entities, options = cls._fetch_page(
api_key=api_key, endpoint=endpoint, **qp
)
output += entities
more = options.get('more')
limit = options.get('limit')
offset = options.get('offset')
total = options.get('total')
if more is None:
if total is None or offset is None:
break
more = (limit + offset) < total
if not more or (maximum is not None and len(output) >= maximum):
break
qp['limit'] = limit
qp['offset'] = offset + limit
return output | [
"def",
"_fetch_all",
"(",
"cls",
",",
"api_key",
",",
"endpoint",
"=",
"None",
",",
"offset",
"=",
"0",
",",
"limit",
"=",
"25",
",",
"*",
"*",
"kwargs",
")",
":",
"output",
"=",
"[",
"]",
"qp",
"=",
"kwargs",
".",
"copy",
"(",
")",
"limit",
"=... | Call `self._fetch_page` for as many pages as exist.
TODO: should be extended to do async page fetches if API allows it via
exposing total value.
Returns a list of `cls` instances. | [
"Call",
"self",
".",
"_fetch_page",
"for",
"as",
"many",
"pages",
"as",
"exist",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/entity.py#L158-L196 | train | 211,884 |
PagerDuty/pagerduty-api-python-client | pypd/models/entity.py | Entity._fetch_page | def _fetch_page(cls, api_key, endpoint=None, page_index=0, offset=None,
limit=25, **kwargs):
"""
Fetch a single page of `limit` number of results.
Optionally provide `page_index` an integer (0-based) index for the
page to return. Calculated based on `limit` and `offset`.
Optionally provide `offset` which will override `page_index` if both
are passed, will be used to calculate the integer offset of items.
Optionally provide `limit` integer describing how many items pages
ought to have.
Returns a tuple containing a list of `cls` instances and response
options.
"""
# if offset is provided have it overwrite the page_index provided
if offset is not None:
page_index = int(offset / limit)
# limit can be maximum MAX_LIMIT_VALUE for most PD queries
limit = max(1, min(cls.MAX_LIMIT_VALUE, limit))
# make an tmp instance to do query work
inst = cls(api_key=api_key)
kwargs['offset'] = int(page_index * limit)
maximum = kwargs.pop('maximum', None)
# if maximum is valid, make the limit <= maximum
kwargs['limit'] = min(limit, maximum) if maximum is not None else limit
ep = parse_key = cls.sanitize_ep(cls.get_endpoint(), plural=True)
# if an override to the endpoint is provided use that instead
# this is useful for nested value searches ie. for
# `incident_log_entries` but instead of /log_entries querying with
# context of /incident/INCIDENTID/log_entries.
# XXX: could be cleaner
if endpoint is not None:
ep = endpoint
response = inst.request('GET', endpoint=ep, query_params=kwargs)
# XXX: this is a little gross right now. Seems like the best way
# to do the parsing out of something and then return everything else
datas = cls._parse(response, key=parse_key)
response.pop(parse_key, None)
entities = [cls(api_key=api_key, _data=d) for d in datas]
# return a tuple
return entities, response | python | def _fetch_page(cls, api_key, endpoint=None, page_index=0, offset=None,
limit=25, **kwargs):
"""
Fetch a single page of `limit` number of results.
Optionally provide `page_index` an integer (0-based) index for the
page to return. Calculated based on `limit` and `offset`.
Optionally provide `offset` which will override `page_index` if both
are passed, will be used to calculate the integer offset of items.
Optionally provide `limit` integer describing how many items pages
ought to have.
Returns a tuple containing a list of `cls` instances and response
options.
"""
# if offset is provided have it overwrite the page_index provided
if offset is not None:
page_index = int(offset / limit)
# limit can be maximum MAX_LIMIT_VALUE for most PD queries
limit = max(1, min(cls.MAX_LIMIT_VALUE, limit))
# make an tmp instance to do query work
inst = cls(api_key=api_key)
kwargs['offset'] = int(page_index * limit)
maximum = kwargs.pop('maximum', None)
# if maximum is valid, make the limit <= maximum
kwargs['limit'] = min(limit, maximum) if maximum is not None else limit
ep = parse_key = cls.sanitize_ep(cls.get_endpoint(), plural=True)
# if an override to the endpoint is provided use that instead
# this is useful for nested value searches ie. for
# `incident_log_entries` but instead of /log_entries querying with
# context of /incident/INCIDENTID/log_entries.
# XXX: could be cleaner
if endpoint is not None:
ep = endpoint
response = inst.request('GET', endpoint=ep, query_params=kwargs)
# XXX: this is a little gross right now. Seems like the best way
# to do the parsing out of something and then return everything else
datas = cls._parse(response, key=parse_key)
response.pop(parse_key, None)
entities = [cls(api_key=api_key, _data=d) for d in datas]
# return a tuple
return entities, response | [
"def",
"_fetch_page",
"(",
"cls",
",",
"api_key",
",",
"endpoint",
"=",
"None",
",",
"page_index",
"=",
"0",
",",
"offset",
"=",
"None",
",",
"limit",
"=",
"25",
",",
"*",
"*",
"kwargs",
")",
":",
"# if offset is provided have it overwrite the page_index provi... | Fetch a single page of `limit` number of results.
Optionally provide `page_index` an integer (0-based) index for the
page to return. Calculated based on `limit` and `offset`.
Optionally provide `offset` which will override `page_index` if both
are passed, will be used to calculate the integer offset of items.
Optionally provide `limit` integer describing how many items pages
ought to have.
Returns a tuple containing a list of `cls` instances and response
options. | [
"Fetch",
"a",
"single",
"page",
"of",
"limit",
"number",
"of",
"results",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/entity.py#L199-L248 | train | 211,885 |
PagerDuty/pagerduty-api-python-client | pypd/models/entity.py | Entity.fetch | def fetch(cls, id, api_key=None, endpoint=None, add_headers=None,
**kwargs):
"""
Fetch a single entity from the API endpoint.
Used when you know the exact ID that must be queried.
"""
if endpoint is None:
endpoint = cls.get_endpoint()
inst = cls(api_key=api_key)
parse_key = cls.sanitize_ep(endpoint).split("/")[-1]
endpoint = '/'.join((endpoint, id))
data = cls._parse(inst.request('GET',
endpoint=endpoint,
add_headers=add_headers,
query_params=kwargs),
key=parse_key)
inst._set(data)
return inst | python | def fetch(cls, id, api_key=None, endpoint=None, add_headers=None,
**kwargs):
"""
Fetch a single entity from the API endpoint.
Used when you know the exact ID that must be queried.
"""
if endpoint is None:
endpoint = cls.get_endpoint()
inst = cls(api_key=api_key)
parse_key = cls.sanitize_ep(endpoint).split("/")[-1]
endpoint = '/'.join((endpoint, id))
data = cls._parse(inst.request('GET',
endpoint=endpoint,
add_headers=add_headers,
query_params=kwargs),
key=parse_key)
inst._set(data)
return inst | [
"def",
"fetch",
"(",
"cls",
",",
"id",
",",
"api_key",
"=",
"None",
",",
"endpoint",
"=",
"None",
",",
"add_headers",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"endpoint",
"is",
"None",
":",
"endpoint",
"=",
"cls",
".",
"get_endpoint",
... | Fetch a single entity from the API endpoint.
Used when you know the exact ID that must be queried. | [
"Fetch",
"a",
"single",
"entity",
"from",
"the",
"API",
"endpoint",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/entity.py#L251-L270 | train | 211,886 |
PagerDuty/pagerduty-api-python-client | pypd/models/entity.py | Entity.translate_query_params | def translate_query_params(cls, **kwargs):
"""
Translate an arbirtary keyword argument to the expected query.
TODO: refactor this into something less insane.
XXX: Clean this up. It's *too* flexible.
In the v2 API, many endpoints expect a particular query argument to be
in the form of `query=xxx` where `xxx` would be the name of perhaps
the name, ID or otherwise. This function ought to take a more aptly
named parameter specified in `TRANSLATE_QUERY_PARAM`, and substitute it
into the `query` keyword argument. The purpose is so that some models
(optionally) have nicer named keyword arguments than `query` for easier
to read python.
If a query argument is given then the output should be that value. If a
substitute value is given as a keyword specified in
`TRANSLATE_QUERY_PARAM`(and query is not) then the `query` argument
will be that keyword argument.
Eg. No query param
TRANSLATE_QUERY_PARAM = ('name',)
kwargs = {'name': 'PagerDuty',}
...
output = {'query': 'PagerDuty'}
or, query param explicitly
TRANSLATE_QUERY_PARAM = ('name',)
kwargs = {'name': 'PagerDuty', 'query': 'XXXXPlopperDuty'}
...
output = {'query': 'XXXXPlopperDuty'}
or, TRANSLATE_QUERY_PARAM is None
TRANSLATE_QUERY_PARAM = None
kwargs = {'name': 'PagerDuty', 'query': 'XXXXPlopperDuty'}
...
output = {'output': 'XXXXPlopperDuty', 'name': 'PagerDuty'}
"""
values = []
output = kwargs.copy()
query = kwargs.pop('query', None)
# remove any of the TRANSLATE_QUERY_PARAMs in output
for param in (cls.TRANSLATE_QUERY_PARAM or []):
popped = output.pop(param, None)
if popped is not None:
values.append(popped)
# if query is provided, just use it
if query is not None:
output['query'] = query
return output
# if query is not provided, use the first parameter we removed from
# the kwargs
try:
output['query'] = next(iter(values))
except StopIteration:
pass
return output | python | def translate_query_params(cls, **kwargs):
"""
Translate an arbirtary keyword argument to the expected query.
TODO: refactor this into something less insane.
XXX: Clean this up. It's *too* flexible.
In the v2 API, many endpoints expect a particular query argument to be
in the form of `query=xxx` where `xxx` would be the name of perhaps
the name, ID or otherwise. This function ought to take a more aptly
named parameter specified in `TRANSLATE_QUERY_PARAM`, and substitute it
into the `query` keyword argument. The purpose is so that some models
(optionally) have nicer named keyword arguments than `query` for easier
to read python.
If a query argument is given then the output should be that value. If a
substitute value is given as a keyword specified in
`TRANSLATE_QUERY_PARAM`(and query is not) then the `query` argument
will be that keyword argument.
Eg. No query param
TRANSLATE_QUERY_PARAM = ('name',)
kwargs = {'name': 'PagerDuty',}
...
output = {'query': 'PagerDuty'}
or, query param explicitly
TRANSLATE_QUERY_PARAM = ('name',)
kwargs = {'name': 'PagerDuty', 'query': 'XXXXPlopperDuty'}
...
output = {'query': 'XXXXPlopperDuty'}
or, TRANSLATE_QUERY_PARAM is None
TRANSLATE_QUERY_PARAM = None
kwargs = {'name': 'PagerDuty', 'query': 'XXXXPlopperDuty'}
...
output = {'output': 'XXXXPlopperDuty', 'name': 'PagerDuty'}
"""
values = []
output = kwargs.copy()
query = kwargs.pop('query', None)
# remove any of the TRANSLATE_QUERY_PARAMs in output
for param in (cls.TRANSLATE_QUERY_PARAM or []):
popped = output.pop(param, None)
if popped is not None:
values.append(popped)
# if query is provided, just use it
if query is not None:
output['query'] = query
return output
# if query is not provided, use the first parameter we removed from
# the kwargs
try:
output['query'] = next(iter(values))
except StopIteration:
pass
return output | [
"def",
"translate_query_params",
"(",
"cls",
",",
"*",
"*",
"kwargs",
")",
":",
"values",
"=",
"[",
"]",
"output",
"=",
"kwargs",
".",
"copy",
"(",
")",
"query",
"=",
"kwargs",
".",
"pop",
"(",
"'query'",
",",
"None",
")",
"# remove any of the TRANSLATE_... | Translate an arbirtary keyword argument to the expected query.
TODO: refactor this into something less insane.
XXX: Clean this up. It's *too* flexible.
In the v2 API, many endpoints expect a particular query argument to be
in the form of `query=xxx` where `xxx` would be the name of perhaps
the name, ID or otherwise. This function ought to take a more aptly
named parameter specified in `TRANSLATE_QUERY_PARAM`, and substitute it
into the `query` keyword argument. The purpose is so that some models
(optionally) have nicer named keyword arguments than `query` for easier
to read python.
If a query argument is given then the output should be that value. If a
substitute value is given as a keyword specified in
`TRANSLATE_QUERY_PARAM`(and query is not) then the `query` argument
will be that keyword argument.
Eg. No query param
TRANSLATE_QUERY_PARAM = ('name',)
kwargs = {'name': 'PagerDuty',}
...
output = {'query': 'PagerDuty'}
or, query param explicitly
TRANSLATE_QUERY_PARAM = ('name',)
kwargs = {'name': 'PagerDuty', 'query': 'XXXXPlopperDuty'}
...
output = {'query': 'XXXXPlopperDuty'}
or, TRANSLATE_QUERY_PARAM is None
TRANSLATE_QUERY_PARAM = None
kwargs = {'name': 'PagerDuty', 'query': 'XXXXPlopperDuty'}
...
output = {'output': 'XXXXPlopperDuty', 'name': 'PagerDuty'} | [
"Translate",
"an",
"arbirtary",
"keyword",
"argument",
"to",
"the",
"expected",
"query",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/entity.py#L315-L379 | train | 211,887 |
PagerDuty/pagerduty-api-python-client | pypd/models/entity.py | Entity.find | def find(cls, api_key=None, fetch_all=True, endpoint=None, maximum=None,
**kwargs):
"""
Find some entities from the API endpoint.
If no api_key is provided, the global api key will be used.
If fetch_all is True, page through all the data and find every record
that exists.
If add_headers is provided (as a dict) use it to add headers to the
HTTP request, eg.
{'host': 'some.hidden.host'}
Capitalizing header keys does not matter.
Remaining keyword arguments will be passed as `query_params` to the
instant method `request` (ClientMixin).
"""
exclude = kwargs.pop('exclude', None)
# if exclude param was passed a a string, list-ify it
if isinstance(exclude, six.string_types):
exclude = [exclude, ]
query_params = cls.translate_query_params(**kwargs)
# unless otherwise specified use the class variable for the endpoint
if endpoint is None:
endpoint = cls.get_endpoint()
if fetch_all:
result = cls._fetch_all(api_key=api_key, endpoint=endpoint,
maximum=maximum,
**query_params)
else:
result = cls._fetch_page(api_key=api_key, endpoint=endpoint,
maximum=maximum,
**query_params)
# for each result run it through an exlcusion filter
collection = [r for r in result
if not cls._find_exclude_filter(exclude, r)]
return collection | python | def find(cls, api_key=None, fetch_all=True, endpoint=None, maximum=None,
**kwargs):
"""
Find some entities from the API endpoint.
If no api_key is provided, the global api key will be used.
If fetch_all is True, page through all the data and find every record
that exists.
If add_headers is provided (as a dict) use it to add headers to the
HTTP request, eg.
{'host': 'some.hidden.host'}
Capitalizing header keys does not matter.
Remaining keyword arguments will be passed as `query_params` to the
instant method `request` (ClientMixin).
"""
exclude = kwargs.pop('exclude', None)
# if exclude param was passed a a string, list-ify it
if isinstance(exclude, six.string_types):
exclude = [exclude, ]
query_params = cls.translate_query_params(**kwargs)
# unless otherwise specified use the class variable for the endpoint
if endpoint is None:
endpoint = cls.get_endpoint()
if fetch_all:
result = cls._fetch_all(api_key=api_key, endpoint=endpoint,
maximum=maximum,
**query_params)
else:
result = cls._fetch_page(api_key=api_key, endpoint=endpoint,
maximum=maximum,
**query_params)
# for each result run it through an exlcusion filter
collection = [r for r in result
if not cls._find_exclude_filter(exclude, r)]
return collection | [
"def",
"find",
"(",
"cls",
",",
"api_key",
"=",
"None",
",",
"fetch_all",
"=",
"True",
",",
"endpoint",
"=",
"None",
",",
"maximum",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"exclude",
"=",
"kwargs",
".",
"pop",
"(",
"'exclude'",
",",
"None"... | Find some entities from the API endpoint.
If no api_key is provided, the global api key will be used.
If fetch_all is True, page through all the data and find every record
that exists.
If add_headers is provided (as a dict) use it to add headers to the
HTTP request, eg.
{'host': 'some.hidden.host'}
Capitalizing header keys does not matter.
Remaining keyword arguments will be passed as `query_params` to the
instant method `request` (ClientMixin). | [
"Find",
"some",
"entities",
"from",
"the",
"API",
"endpoint",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/entity.py#L382-L424 | train | 211,888 |
PagerDuty/pagerduty-api-python-client | pypd/models/entity.py | Entity.create | def create(cls, data=None, api_key=None, endpoint=None, add_headers=None,
data_key=None, response_data_key=None, method='POST', **kwargs):
"""
Create an instance of the Entity model by calling to the API endpoint.
This ensures that server knows about the creation before returning
the class instance.
NOTE: The server must return a response with the schema containing
the entire entity value. A True or False response is no bueno.
"""
inst = cls(api_key=api_key)
if data_key is None:
data_key = cls.sanitize_ep(cls.get_endpoint())
if response_data_key is None:
response_data_key = cls.sanitize_ep(cls.get_endpoint())
body = {}
body[data_key] = data
if endpoint is None:
endpoint = cls.get_endpoint()
inst._set(cls._parse(inst.request(method,
endpoint=endpoint,
data=body,
query_params=kwargs,
add_headers=add_headers,
),
key=response_data_key))
return inst | python | def create(cls, data=None, api_key=None, endpoint=None, add_headers=None,
data_key=None, response_data_key=None, method='POST', **kwargs):
"""
Create an instance of the Entity model by calling to the API endpoint.
This ensures that server knows about the creation before returning
the class instance.
NOTE: The server must return a response with the schema containing
the entire entity value. A True or False response is no bueno.
"""
inst = cls(api_key=api_key)
if data_key is None:
data_key = cls.sanitize_ep(cls.get_endpoint())
if response_data_key is None:
response_data_key = cls.sanitize_ep(cls.get_endpoint())
body = {}
body[data_key] = data
if endpoint is None:
endpoint = cls.get_endpoint()
inst._set(cls._parse(inst.request(method,
endpoint=endpoint,
data=body,
query_params=kwargs,
add_headers=add_headers,
),
key=response_data_key))
return inst | [
"def",
"create",
"(",
"cls",
",",
"data",
"=",
"None",
",",
"api_key",
"=",
"None",
",",
"endpoint",
"=",
"None",
",",
"add_headers",
"=",
"None",
",",
"data_key",
"=",
"None",
",",
"response_data_key",
"=",
"None",
",",
"method",
"=",
"'POST'",
",",
... | Create an instance of the Entity model by calling to the API endpoint.
This ensures that server knows about the creation before returning
the class instance.
NOTE: The server must return a response with the schema containing
the entire entity value. A True or False response is no bueno. | [
"Create",
"an",
"instance",
"of",
"the",
"Entity",
"model",
"by",
"calling",
"to",
"the",
"API",
"endpoint",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/entity.py#L443-L475 | train | 211,889 |
PagerDuty/pagerduty-api-python-client | pypd/models/entity.py | Entity._parse | def _parse(cls, data, key=None):
"""
Parse a set of data to extract entity-only data.
Use classmethod `parse` if available, otherwise use the `endpoint`
class variable to extract data from a data blob.
"""
parse = cls.parse if cls.parse is not None else cls.get_endpoint()
if callable(parse):
data = parse(data)
elif isinstance(parse, str):
data = data[key]
else:
raise Exception('"parse" should be a callable or string got, {0}'
.format(parse))
return data | python | def _parse(cls, data, key=None):
"""
Parse a set of data to extract entity-only data.
Use classmethod `parse` if available, otherwise use the `endpoint`
class variable to extract data from a data blob.
"""
parse = cls.parse if cls.parse is not None else cls.get_endpoint()
if callable(parse):
data = parse(data)
elif isinstance(parse, str):
data = data[key]
else:
raise Exception('"parse" should be a callable or string got, {0}'
.format(parse))
return data | [
"def",
"_parse",
"(",
"cls",
",",
"data",
",",
"key",
"=",
"None",
")",
":",
"parse",
"=",
"cls",
".",
"parse",
"if",
"cls",
".",
"parse",
"is",
"not",
"None",
"else",
"cls",
".",
"get_endpoint",
"(",
")",
"if",
"callable",
"(",
"parse",
")",
":"... | Parse a set of data to extract entity-only data.
Use classmethod `parse` if available, otherwise use the `endpoint`
class variable to extract data from a data blob. | [
"Parse",
"a",
"set",
"of",
"data",
"to",
"extract",
"entity",
"-",
"only",
"data",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/entity.py#L497-L513 | train | 211,890 |
PagerDuty/pagerduty-api-python-client | pypd/log.py | log | def log(*args, **kwargs):
"""Log things with the global logger."""
level = kwargs.pop('level', logging.INFO)
logger.log(level, *args, **kwargs) | python | def log(*args, **kwargs):
"""Log things with the global logger."""
level = kwargs.pop('level', logging.INFO)
logger.log(level, *args, **kwargs) | [
"def",
"log",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"level",
"=",
"kwargs",
".",
"pop",
"(",
"'level'",
",",
"logging",
".",
"INFO",
")",
"logger",
".",
"log",
"(",
"level",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Log things with the global logger. | [
"Log",
"things",
"with",
"the",
"global",
"logger",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/log.py#L28-L31 | train | 211,891 |
PagerDuty/pagerduty-api-python-client | pypd/models/event.py | Event.create | def create(cls, data=None, api_key=None, endpoint=None, add_headers=None,
**kwargs):
"""Create an event on your PagerDuty account."""
cls.validate(data)
inst = cls(api_key=api_key)
endpoint = ''
return inst.request('POST',
endpoint=endpoint,
data=data,
query_params=kwargs,
add_headers=add_headers,
) | python | def create(cls, data=None, api_key=None, endpoint=None, add_headers=None,
**kwargs):
"""Create an event on your PagerDuty account."""
cls.validate(data)
inst = cls(api_key=api_key)
endpoint = ''
return inst.request('POST',
endpoint=endpoint,
data=data,
query_params=kwargs,
add_headers=add_headers,
) | [
"def",
"create",
"(",
"cls",
",",
"data",
"=",
"None",
",",
"api_key",
"=",
"None",
",",
"endpoint",
"=",
"None",
",",
"add_headers",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"cls",
".",
"validate",
"(",
"data",
")",
"inst",
"=",
"cls",
"(... | Create an event on your PagerDuty account. | [
"Create",
"an",
"event",
"on",
"your",
"PagerDuty",
"account",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/event.py#L42-L53 | train | 211,892 |
PagerDuty/pagerduty-api-python-client | pypd/models/team.py | Team.remove_escalation_policy | def remove_escalation_policy(self, escalation_policy, **kwargs):
"""Remove an escalation policy from this team."""
if isinstance(escalation_policy, Entity):
escalation_policy = escalation_policy['id']
assert isinstance(escalation_policy, six.string_types)
endpoint = '{0}/{1}/escalation_policies/{2}'.format(
self.endpoint,
self['id'],
escalation_policy,
)
return self.request('DELETE', endpoint=endpoint, query_params=kwargs) | python | def remove_escalation_policy(self, escalation_policy, **kwargs):
"""Remove an escalation policy from this team."""
if isinstance(escalation_policy, Entity):
escalation_policy = escalation_policy['id']
assert isinstance(escalation_policy, six.string_types)
endpoint = '{0}/{1}/escalation_policies/{2}'.format(
self.endpoint,
self['id'],
escalation_policy,
)
return self.request('DELETE', endpoint=endpoint, query_params=kwargs) | [
"def",
"remove_escalation_policy",
"(",
"self",
",",
"escalation_policy",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"escalation_policy",
",",
"Entity",
")",
":",
"escalation_policy",
"=",
"escalation_policy",
"[",
"'id'",
"]",
"assert",
"isinst... | Remove an escalation policy from this team. | [
"Remove",
"an",
"escalation",
"policy",
"from",
"this",
"team",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/team.py#L16-L29 | train | 211,893 |
PagerDuty/pagerduty-api-python-client | pypd/models/team.py | Team.remove_user | def remove_user(self, user, **kwargs):
"""Remove a user from this team."""
if isinstance(user, Entity):
user = user['id']
assert isinstance(user, six.string_types)
endpoint = '{0}/{1}/users/{2}'.format(
self.endpoint,
self['id'],
user,
)
return self.request('DELETE', endpoint=endpoint, query_params=kwargs) | python | def remove_user(self, user, **kwargs):
"""Remove a user from this team."""
if isinstance(user, Entity):
user = user['id']
assert isinstance(user, six.string_types)
endpoint = '{0}/{1}/users/{2}'.format(
self.endpoint,
self['id'],
user,
)
return self.request('DELETE', endpoint=endpoint, query_params=kwargs) | [
"def",
"remove_user",
"(",
"self",
",",
"user",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"user",
",",
"Entity",
")",
":",
"user",
"=",
"user",
"[",
"'id'",
"]",
"assert",
"isinstance",
"(",
"user",
",",
"six",
".",
"string_types",
... | Remove a user from this team. | [
"Remove",
"a",
"user",
"from",
"this",
"team",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/team.py#L45-L57 | train | 211,894 |
PagerDuty/pagerduty-api-python-client | pypd/models/team.py | Team.add_user | def add_user(self, user, **kwargs):
"""Add a user to this team."""
if isinstance(user, User):
user = user['id']
assert isinstance(user, six.string_types)
endpoint = '{0}/{1}/users/{2}'.format(
self.endpoint,
self['id'],
user,
)
result = self.request('PUT', endpoint=endpoint, query_params=kwargs)
return result | python | def add_user(self, user, **kwargs):
"""Add a user to this team."""
if isinstance(user, User):
user = user['id']
assert isinstance(user, six.string_types)
endpoint = '{0}/{1}/users/{2}'.format(
self.endpoint,
self['id'],
user,
)
result = self.request('PUT', endpoint=endpoint, query_params=kwargs)
return result | [
"def",
"add_user",
"(",
"self",
",",
"user",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"user",
",",
"User",
")",
":",
"user",
"=",
"user",
"[",
"'id'",
"]",
"assert",
"isinstance",
"(",
"user",
",",
"six",
".",
"string_types",
")"... | Add a user to this team. | [
"Add",
"a",
"user",
"to",
"this",
"team",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/team.py#L59-L72 | train | 211,895 |
PagerDuty/pagerduty-api-python-client | pypd/models/service.py | Service.create_integration | def create_integration(self, integration_info, **kwargs):
"""
Create an integration for this service.
See: https://v2.developer.pagerduty.com/v2/page/api-reference#!/
Services/post_services_id_integrations
"""
service_info = integration_info.get('service')
vendor_info = integration_info.get('vendor')
if service_info is not None:
self.__class__.validate(service_info)
if vendor_info is not None:
self.vendorFactory.validate(vendor_info)
endpoint = '{0}/{1}/integrations'.format(
self.endpoint,
self['id'],
)
return self.integrationFactory.create(
endpoint=endpoint,
api_key=self.api_key,
data=integration_info,
query_params=kwargs
) | python | def create_integration(self, integration_info, **kwargs):
"""
Create an integration for this service.
See: https://v2.developer.pagerduty.com/v2/page/api-reference#!/
Services/post_services_id_integrations
"""
service_info = integration_info.get('service')
vendor_info = integration_info.get('vendor')
if service_info is not None:
self.__class__.validate(service_info)
if vendor_info is not None:
self.vendorFactory.validate(vendor_info)
endpoint = '{0}/{1}/integrations'.format(
self.endpoint,
self['id'],
)
return self.integrationFactory.create(
endpoint=endpoint,
api_key=self.api_key,
data=integration_info,
query_params=kwargs
) | [
"def",
"create_integration",
"(",
"self",
",",
"integration_info",
",",
"*",
"*",
"kwargs",
")",
":",
"service_info",
"=",
"integration_info",
".",
"get",
"(",
"'service'",
")",
"vendor_info",
"=",
"integration_info",
".",
"get",
"(",
"'vendor'",
")",
"if",
... | Create an integration for this service.
See: https://v2.developer.pagerduty.com/v2/page/api-reference#!/
Services/post_services_id_integrations | [
"Create",
"an",
"integration",
"for",
"this",
"service",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/service.py#L26-L51 | train | 211,896 |
PagerDuty/pagerduty-api-python-client | pypd/models/service.py | Service.integrations | def integrations(self, **kwargs):
"""Retrieve all this services integrations."""
ids = [ref['id'] for ref in self['integrations']]
return [Integration.fetch(id, service=self, query_params=kwargs) for id in ids] | python | def integrations(self, **kwargs):
"""Retrieve all this services integrations."""
ids = [ref['id'] for ref in self['integrations']]
return [Integration.fetch(id, service=self, query_params=kwargs) for id in ids] | [
"def",
"integrations",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"ids",
"=",
"[",
"ref",
"[",
"'id'",
"]",
"for",
"ref",
"in",
"self",
"[",
"'integrations'",
"]",
"]",
"return",
"[",
"Integration",
".",
"fetch",
"(",
"id",
",",
"service",
"="... | Retrieve all this services integrations. | [
"Retrieve",
"all",
"this",
"services",
"integrations",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/service.py#L53-L56 | train | 211,897 |
PagerDuty/pagerduty-api-python-client | pypd/models/service.py | Service.get_integration | def get_integration(self, id, **kwargs):
"""Retrieve a single integration by id."""
return Integration.fetch(id, service=self, query_params=kwargs) | python | def get_integration(self, id, **kwargs):
"""Retrieve a single integration by id."""
return Integration.fetch(id, service=self, query_params=kwargs) | [
"def",
"get_integration",
"(",
"self",
",",
"id",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"Integration",
".",
"fetch",
"(",
"id",
",",
"service",
"=",
"self",
",",
"query_params",
"=",
"kwargs",
")"
] | Retrieve a single integration by id. | [
"Retrieve",
"a",
"single",
"integration",
"by",
"id",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/service.py#L58-L60 | train | 211,898 |
PagerDuty/pagerduty-api-python-client | pypd/models/user.py | User.contact_methods | def contact_methods(self, **kwargs):
"""Get all contact methods for this user."""
endpoint = '{0}/{1}/contact_methods'.format(
self.endpoint,
self['id'],
)
result = self.request('GET', endpoint=endpoint, query_params=kwargs)
return result['contact_methods'] | python | def contact_methods(self, **kwargs):
"""Get all contact methods for this user."""
endpoint = '{0}/{1}/contact_methods'.format(
self.endpoint,
self['id'],
)
result = self.request('GET', endpoint=endpoint, query_params=kwargs)
return result['contact_methods'] | [
"def",
"contact_methods",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"endpoint",
"=",
"'{0}/{1}/contact_methods'",
".",
"format",
"(",
"self",
".",
"endpoint",
",",
"self",
"[",
"'id'",
"]",
",",
")",
"result",
"=",
"self",
".",
"request",
"(",
"'... | Get all contact methods for this user. | [
"Get",
"all",
"contact",
"methods",
"for",
"this",
"user",
"."
] | f420b34ca9b29689cc2ecc9adca6dc5d56ae7161 | https://github.com/PagerDuty/pagerduty-api-python-client/blob/f420b34ca9b29689cc2ecc9adca6dc5d56ae7161/pypd/models/user.py#L34-L41 | train | 211,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.