repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
vhf/confusable_homoglyphs | confusable_homoglyphs/cli.py | generate_categories | def generate_categories():
"""Generates the categories JSON data file from the unicode specification.
:return: True for success, raises otherwise.
:rtype: bool
"""
# inspired by https://gist.github.com/anonymous/2204527
code_points_ranges = []
iso_15924_aliases = []
categories = []
match = re.compile(r'([0-9A-F]+)(?:\.\.([0-9A-F]+))?\W+(\w+)\s*#\s*(\w+)',
re.UNICODE)
url = 'ftp://ftp.unicode.org/Public/UNIDATA/Scripts.txt'
file = get(url)
for line in file:
p = re.findall(match, line)
if p:
code_point_range_from, code_point_range_to, alias, category = p[0]
alias = u(alias.upper())
category = u(category)
if alias not in iso_15924_aliases:
iso_15924_aliases.append(alias)
if category not in categories:
categories.append(category)
code_points_ranges.append((
int(code_point_range_from, 16),
int(code_point_range_to or code_point_range_from, 16),
iso_15924_aliases.index(alias), categories.index(category))
)
code_points_ranges.sort()
categories_data = {
'iso_15924_aliases': iso_15924_aliases,
'categories': categories,
'code_points_ranges': code_points_ranges,
}
dump('categories.json', categories_data) | python | def generate_categories():
"""Generates the categories JSON data file from the unicode specification.
:return: True for success, raises otherwise.
:rtype: bool
"""
# inspired by https://gist.github.com/anonymous/2204527
code_points_ranges = []
iso_15924_aliases = []
categories = []
match = re.compile(r'([0-9A-F]+)(?:\.\.([0-9A-F]+))?\W+(\w+)\s*#\s*(\w+)',
re.UNICODE)
url = 'ftp://ftp.unicode.org/Public/UNIDATA/Scripts.txt'
file = get(url)
for line in file:
p = re.findall(match, line)
if p:
code_point_range_from, code_point_range_to, alias, category = p[0]
alias = u(alias.upper())
category = u(category)
if alias not in iso_15924_aliases:
iso_15924_aliases.append(alias)
if category not in categories:
categories.append(category)
code_points_ranges.append((
int(code_point_range_from, 16),
int(code_point_range_to or code_point_range_from, 16),
iso_15924_aliases.index(alias), categories.index(category))
)
code_points_ranges.sort()
categories_data = {
'iso_15924_aliases': iso_15924_aliases,
'categories': categories,
'code_points_ranges': code_points_ranges,
}
dump('categories.json', categories_data) | [
"def",
"generate_categories",
"(",
")",
":",
"# inspired by https://gist.github.com/anonymous/2204527",
"code_points_ranges",
"=",
"[",
"]",
"iso_15924_aliases",
"=",
"[",
"]",
"categories",
"=",
"[",
"]",
"match",
"=",
"re",
".",
"compile",
"(",
"r'([0-9A-F]+)(?:\\.\... | Generates the categories JSON data file from the unicode specification.
:return: True for success, raises otherwise.
:rtype: bool | [
"Generates",
"the",
"categories",
"JSON",
"data",
"file",
"from",
"the",
"unicode",
"specification",
"."
] | 14f43ddd74099520ddcda29fac557c27a28190e6 | https://github.com/vhf/confusable_homoglyphs/blob/14f43ddd74099520ddcda29fac557c27a28190e6/confusable_homoglyphs/cli.py#L28-L67 | train | 29,800 |
rasbt/biopandas | docs/make_api.py | docstring_to_markdown | def docstring_to_markdown(docstring):
"""Convert a Python object's docstring to markdown
Parameters
----------
docstring : str
The docstring body.
Returns
----------
clean_lst : list
The markdown formatted docstring as lines (str) in a Python list.
"""
new_docstring_lst = []
for idx, line in enumerate(docstring.split('\n')):
line = line.strip()
if set(line) in ({'-'}, {'='}):
new_docstring_lst[idx-1] = '**%s**' % new_docstring_lst[idx-1]
elif line.startswith('>>>'):
line = ' %s' % line
new_docstring_lst.append(line)
for idx, line in enumerate(new_docstring_lst[1:]):
if line:
if line.startswith('Description : '):
new_docstring_lst[idx+1] = (new_docstring_lst[idx+1]
.replace('Description : ', ''))
elif ' : ' in line:
line = line.replace(' : ', '` : ')
new_docstring_lst[idx+1] = '\n- `%s\n' % line
elif '**' in new_docstring_lst[idx-1] and '**' not in line:
new_docstring_lst[idx+1] = '\n%s' % line.lstrip()
elif '**' not in line:
new_docstring_lst[idx+1] = ' %s' % line.lstrip()
clean_lst = []
for line in new_docstring_lst:
if set(line.strip()) not in ({'-'}, {'='}):
clean_lst.append(line)
return clean_lst | python | def docstring_to_markdown(docstring):
"""Convert a Python object's docstring to markdown
Parameters
----------
docstring : str
The docstring body.
Returns
----------
clean_lst : list
The markdown formatted docstring as lines (str) in a Python list.
"""
new_docstring_lst = []
for idx, line in enumerate(docstring.split('\n')):
line = line.strip()
if set(line) in ({'-'}, {'='}):
new_docstring_lst[idx-1] = '**%s**' % new_docstring_lst[idx-1]
elif line.startswith('>>>'):
line = ' %s' % line
new_docstring_lst.append(line)
for idx, line in enumerate(new_docstring_lst[1:]):
if line:
if line.startswith('Description : '):
new_docstring_lst[idx+1] = (new_docstring_lst[idx+1]
.replace('Description : ', ''))
elif ' : ' in line:
line = line.replace(' : ', '` : ')
new_docstring_lst[idx+1] = '\n- `%s\n' % line
elif '**' in new_docstring_lst[idx-1] and '**' not in line:
new_docstring_lst[idx+1] = '\n%s' % line.lstrip()
elif '**' not in line:
new_docstring_lst[idx+1] = ' %s' % line.lstrip()
clean_lst = []
for line in new_docstring_lst:
if set(line.strip()) not in ({'-'}, {'='}):
clean_lst.append(line)
return clean_lst | [
"def",
"docstring_to_markdown",
"(",
"docstring",
")",
":",
"new_docstring_lst",
"=",
"[",
"]",
"for",
"idx",
",",
"line",
"in",
"enumerate",
"(",
"docstring",
".",
"split",
"(",
"'\\n'",
")",
")",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if"... | Convert a Python object's docstring to markdown
Parameters
----------
docstring : str
The docstring body.
Returns
----------
clean_lst : list
The markdown formatted docstring as lines (str) in a Python list. | [
"Convert",
"a",
"Python",
"object",
"s",
"docstring",
"to",
"markdown"
] | 615a7cf272692c12bbcfd9d1f217eab440120235 | https://github.com/rasbt/biopandas/blob/615a7cf272692c12bbcfd9d1f217eab440120235/docs/make_api.py#L24-L65 | train | 29,801 |
rasbt/biopandas | docs/make_api.py | object_to_markdownpage | def object_to_markdownpage(obj_name, obj, s=''):
"""Generate the markdown documentation of a Python object.
Parameters
----------
obj_name : str
Name of the Python object.
obj : object
Python object (class, method, function, ...)
s : str (default: '')
A string to which the documentation will be appended to.
Returns
---------
s : str
The markdown page.
"""
# header
s += '## %s\n' % obj_name
# function/class/method signature
sig = str(inspect.signature(obj)).replace('(self, ', '(')
s += '\n*%s%s*\n\n' % (obj_name, sig)
# docstring body
doc = str(inspect.getdoc(obj))
ds = docstring_to_markdown(doc)
s += '\n'.join(ds)
# document methods
if inspect.isclass(obj):
methods, properties = '\n\n### Methods', '\n\n### Properties'
members = inspect.getmembers(obj)
for m in members:
if not m[0].startswith('_') and len(m) >= 2:
if isinstance(m[1], property):
properties += '\n\n<hr>\n\n*%s*\n\n' % m[0]
m_doc = docstring_to_markdown(str(inspect.getdoc(m[1])))
properties += '\n'.join(m_doc)
else:
sig = str(inspect.signature(m[1]))
sig = sig.replace('(self, ', '(').replace('(self)', '()')
sig = sig.replace('(self)', '()')
methods += '\n\n<hr>\n\n*%s%s*\n\n' % (m[0], sig)
m_doc = docstring_to_markdown(str(inspect.getdoc(m[1])))
methods += '\n'.join(m_doc)
s += methods
s += properties
return s + '\n\n' | python | def object_to_markdownpage(obj_name, obj, s=''):
"""Generate the markdown documentation of a Python object.
Parameters
----------
obj_name : str
Name of the Python object.
obj : object
Python object (class, method, function, ...)
s : str (default: '')
A string to which the documentation will be appended to.
Returns
---------
s : str
The markdown page.
"""
# header
s += '## %s\n' % obj_name
# function/class/method signature
sig = str(inspect.signature(obj)).replace('(self, ', '(')
s += '\n*%s%s*\n\n' % (obj_name, sig)
# docstring body
doc = str(inspect.getdoc(obj))
ds = docstring_to_markdown(doc)
s += '\n'.join(ds)
# document methods
if inspect.isclass(obj):
methods, properties = '\n\n### Methods', '\n\n### Properties'
members = inspect.getmembers(obj)
for m in members:
if not m[0].startswith('_') and len(m) >= 2:
if isinstance(m[1], property):
properties += '\n\n<hr>\n\n*%s*\n\n' % m[0]
m_doc = docstring_to_markdown(str(inspect.getdoc(m[1])))
properties += '\n'.join(m_doc)
else:
sig = str(inspect.signature(m[1]))
sig = sig.replace('(self, ', '(').replace('(self)', '()')
sig = sig.replace('(self)', '()')
methods += '\n\n<hr>\n\n*%s%s*\n\n' % (m[0], sig)
m_doc = docstring_to_markdown(str(inspect.getdoc(m[1])))
methods += '\n'.join(m_doc)
s += methods
s += properties
return s + '\n\n' | [
"def",
"object_to_markdownpage",
"(",
"obj_name",
",",
"obj",
",",
"s",
"=",
"''",
")",
":",
"# header",
"s",
"+=",
"'## %s\\n'",
"%",
"obj_name",
"# function/class/method signature",
"sig",
"=",
"str",
"(",
"inspect",
".",
"signature",
"(",
"obj",
")",
")",... | Generate the markdown documentation of a Python object.
Parameters
----------
obj_name : str
Name of the Python object.
obj : object
Python object (class, method, function, ...)
s : str (default: '')
A string to which the documentation will be appended to.
Returns
---------
s : str
The markdown page. | [
"Generate",
"the",
"markdown",
"documentation",
"of",
"a",
"Python",
"object",
"."
] | 615a7cf272692c12bbcfd9d1f217eab440120235 | https://github.com/rasbt/biopandas/blob/615a7cf272692c12bbcfd9d1f217eab440120235/docs/make_api.py#L68-L117 | train | 29,802 |
rasbt/biopandas | docs/make_api.py | import_package | def import_package(rel_path_to_package, package_name):
"""Imports a python package into the current namespace.
Parameters
----------
rel_path_to_package : str
Path to the package containing director relative from this script's
directory.
package_name : str
The name of the package to be imported.
Returns
---------
package : The imported package object.
"""
try:
curr_dir = os.path.dirname(os.path.realpath(__file__))
except NameError:
curr_dir = os.path.dirname(os.path.realpath(os.getcwd()))
package_path = os.path.join(curr_dir, rel_path_to_package)
if package_path not in sys.path:
sys.path = [package_path] + sys.path
package = __import__(package_name)
return package | python | def import_package(rel_path_to_package, package_name):
"""Imports a python package into the current namespace.
Parameters
----------
rel_path_to_package : str
Path to the package containing director relative from this script's
directory.
package_name : str
The name of the package to be imported.
Returns
---------
package : The imported package object.
"""
try:
curr_dir = os.path.dirname(os.path.realpath(__file__))
except NameError:
curr_dir = os.path.dirname(os.path.realpath(os.getcwd()))
package_path = os.path.join(curr_dir, rel_path_to_package)
if package_path not in sys.path:
sys.path = [package_path] + sys.path
package = __import__(package_name)
return package | [
"def",
"import_package",
"(",
"rel_path_to_package",
",",
"package_name",
")",
":",
"try",
":",
"curr_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
"except",
"NameError",
":",
"curr_dir",
... | Imports a python package into the current namespace.
Parameters
----------
rel_path_to_package : str
Path to the package containing director relative from this script's
directory.
package_name : str
The name of the package to be imported.
Returns
---------
package : The imported package object. | [
"Imports",
"a",
"python",
"package",
"into",
"the",
"current",
"namespace",
"."
] | 615a7cf272692c12bbcfd9d1f217eab440120235 | https://github.com/rasbt/biopandas/blob/615a7cf272692c12bbcfd9d1f217eab440120235/docs/make_api.py#L120-L144 | train | 29,803 |
rasbt/biopandas | docs/make_api.py | get_functions_and_classes | def get_functions_and_classes(package):
"""Retun lists of functions and classes from a package.
Parameters
----------
package : python package object
Returns
--------
list, list : list of classes and functions
Each sublist consists of [name, member] sublists.
"""
classes, functions = [], []
for name, member in inspect.getmembers(package):
if not name.startswith('_'):
if inspect.isclass(member):
classes.append([name, member])
elif inspect.isfunction(member):
functions.append([name, member])
return classes, functions | python | def get_functions_and_classes(package):
"""Retun lists of functions and classes from a package.
Parameters
----------
package : python package object
Returns
--------
list, list : list of classes and functions
Each sublist consists of [name, member] sublists.
"""
classes, functions = [], []
for name, member in inspect.getmembers(package):
if not name.startswith('_'):
if inspect.isclass(member):
classes.append([name, member])
elif inspect.isfunction(member):
functions.append([name, member])
return classes, functions | [
"def",
"get_functions_and_classes",
"(",
"package",
")",
":",
"classes",
",",
"functions",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"name",
",",
"member",
"in",
"inspect",
".",
"getmembers",
"(",
"package",
")",
":",
"if",
"not",
"name",
".",
"startswith",
... | Retun lists of functions and classes from a package.
Parameters
----------
package : python package object
Returns
--------
list, list : list of classes and functions
Each sublist consists of [name, member] sublists. | [
"Retun",
"lists",
"of",
"functions",
"and",
"classes",
"from",
"a",
"package",
"."
] | 615a7cf272692c12bbcfd9d1f217eab440120235 | https://github.com/rasbt/biopandas/blob/615a7cf272692c12bbcfd9d1f217eab440120235/docs/make_api.py#L177-L197 | train | 29,804 |
rasbt/biopandas | docs/make_api.py | generate_api_docs | def generate_api_docs(package, api_dir, clean=False, printlog=True):
"""Generate a module level API documentation of a python package.
Description
-----------
Generates markdown API files for each module in a Python package whereas
the structure is as follows:
`package/package.subpackage/package.subpackage.module.md`
Parameters
-----------
package : Python package object
api_dir : str
Output directory path for the top-level package directory
clean : bool (default: False)
Removes previously existing API directory if True.
printlog : bool (default: True)
Prints a progress log to the standard output screen if True.
"""
if printlog:
print('\n\nGenerating Module Files\n%s\n' % (50 * '='))
prefix = package.__name__ + "."
# clear the previous version
if clean:
if os.path.isdir(api_dir):
shutil.rmtree(api_dir)
# get subpackages
api_docs = {}
for importer, pkg_name, is_pkg in pkgutil.iter_modules(
package.__path__,
prefix):
if is_pkg:
subpackage = __import__(pkg_name, fromlist="dummy")
prefix = subpackage.__name__ + "."
# get functions and classes
classes, functions = get_functions_and_classes(subpackage)
target_dir = os.path.join(api_dir, subpackage.__name__)
# create the subdirs
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
if printlog:
print('created %s' % target_dir)
# create markdown documents in memory
for obj in classes + functions:
md_path = os.path.join(target_dir, obj[0]) + '.md'
if md_path not in api_docs:
api_docs[md_path] = object_to_markdownpage(obj_name=obj[0],
obj=obj[1],
s='')
else:
api_docs[md_path] += object_to_markdownpage(obj_name=(
obj[0]),
obj=obj[1],
s='')
# write to files
for d in sorted(api_docs):
prev = ''
if os.path.isfile(d):
with open(d, 'r') as f:
prev = f.read()
if prev == api_docs[d]:
msg = 'skipped'
else:
msg = 'updated'
else:
msg = 'created'
if msg != 'skipped':
with open(d, 'w') as f:
f.write(api_docs[d])
if printlog:
print('%s %s' % (msg, d)) | python | def generate_api_docs(package, api_dir, clean=False, printlog=True):
"""Generate a module level API documentation of a python package.
Description
-----------
Generates markdown API files for each module in a Python package whereas
the structure is as follows:
`package/package.subpackage/package.subpackage.module.md`
Parameters
-----------
package : Python package object
api_dir : str
Output directory path for the top-level package directory
clean : bool (default: False)
Removes previously existing API directory if True.
printlog : bool (default: True)
Prints a progress log to the standard output screen if True.
"""
if printlog:
print('\n\nGenerating Module Files\n%s\n' % (50 * '='))
prefix = package.__name__ + "."
# clear the previous version
if clean:
if os.path.isdir(api_dir):
shutil.rmtree(api_dir)
# get subpackages
api_docs = {}
for importer, pkg_name, is_pkg in pkgutil.iter_modules(
package.__path__,
prefix):
if is_pkg:
subpackage = __import__(pkg_name, fromlist="dummy")
prefix = subpackage.__name__ + "."
# get functions and classes
classes, functions = get_functions_and_classes(subpackage)
target_dir = os.path.join(api_dir, subpackage.__name__)
# create the subdirs
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
if printlog:
print('created %s' % target_dir)
# create markdown documents in memory
for obj in classes + functions:
md_path = os.path.join(target_dir, obj[0]) + '.md'
if md_path not in api_docs:
api_docs[md_path] = object_to_markdownpage(obj_name=obj[0],
obj=obj[1],
s='')
else:
api_docs[md_path] += object_to_markdownpage(obj_name=(
obj[0]),
obj=obj[1],
s='')
# write to files
for d in sorted(api_docs):
prev = ''
if os.path.isfile(d):
with open(d, 'r') as f:
prev = f.read()
if prev == api_docs[d]:
msg = 'skipped'
else:
msg = 'updated'
else:
msg = 'created'
if msg != 'skipped':
with open(d, 'w') as f:
f.write(api_docs[d])
if printlog:
print('%s %s' % (msg, d)) | [
"def",
"generate_api_docs",
"(",
"package",
",",
"api_dir",
",",
"clean",
"=",
"False",
",",
"printlog",
"=",
"True",
")",
":",
"if",
"printlog",
":",
"print",
"(",
"'\\n\\nGenerating Module Files\\n%s\\n'",
"%",
"(",
"50",
"*",
"'='",
")",
")",
"prefix",
... | Generate a module level API documentation of a python package.
Description
-----------
Generates markdown API files for each module in a Python package whereas
the structure is as follows:
`package/package.subpackage/package.subpackage.module.md`
Parameters
-----------
package : Python package object
api_dir : str
Output directory path for the top-level package directory
clean : bool (default: False)
Removes previously existing API directory if True.
printlog : bool (default: True)
Prints a progress log to the standard output screen if True. | [
"Generate",
"a",
"module",
"level",
"API",
"documentation",
"of",
"a",
"python",
"package",
"."
] | 615a7cf272692c12bbcfd9d1f217eab440120235 | https://github.com/rasbt/biopandas/blob/615a7cf272692c12bbcfd9d1f217eab440120235/docs/make_api.py#L200-L281 | train | 29,805 |
rasbt/biopandas | docs/make_api.py | summarize_methdods_and_functions | def summarize_methdods_and_functions(api_modules, out_dir,
printlog=False, clean=True,
str_above_header=''):
"""Generates subpacke-level summary files.
Description
-----------
A function to generate subpacke-level summary markdown API files from
a module-level API documentation previously created via the
`generate_api_docs` function.
The output structure is:
package/package.subpackage.md
Parameters
----------
api_modules : str
Path to the API documentation crated via `generate_api_docs`
out_dir : str
Path to the desired output directory for the new markdown files.
clean : bool (default: False)
Removes previously existing API directory if True.
printlog : bool (default: True)
Prints a progress log to the standard output screen if True.
str_above_header : str (default: '')
Places a string just above the header.
"""
if printlog:
print('\n\nGenerating Subpackage Files\n%s\n' % (50 * '='))
if clean:
if os.path.isdir(out_dir):
shutil.rmtree(out_dir)
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
if printlog:
print('created %s' % out_dir)
subdir_paths = [os.path.join(api_modules, d)
for d in os.listdir(api_modules)
if not d.startswith('.')]
out_files = [os.path.join(out_dir, os.path.basename(d)) + '.md'
for d in subdir_paths]
for sub_p, out_f in zip(subdir_paths, out_files):
module_paths = (os.path.join(sub_p, m)
for m in os.listdir(sub_p)
if not m.startswith('.'))
new_output = []
if str_above_header:
new_output.append(str_above_header)
for p in module_paths:
with open(p, 'r') as r:
new_output.extend(r.readlines())
msg = ''
if not os.path.isfile(out_f):
msg = 'created'
if msg != 'created':
with open(out_f, 'r') as f:
prev = f.readlines()
if prev != new_output:
msg = 'updated'
else:
msg = 'skipped'
if msg != 'skipped':
with open(out_f, 'w') as f:
f.write(''.join(new_output))
if printlog:
print('%s %s' % (msg, out_f)) | python | def summarize_methdods_and_functions(api_modules, out_dir,
printlog=False, clean=True,
str_above_header=''):
"""Generates subpacke-level summary files.
Description
-----------
A function to generate subpacke-level summary markdown API files from
a module-level API documentation previously created via the
`generate_api_docs` function.
The output structure is:
package/package.subpackage.md
Parameters
----------
api_modules : str
Path to the API documentation crated via `generate_api_docs`
out_dir : str
Path to the desired output directory for the new markdown files.
clean : bool (default: False)
Removes previously existing API directory if True.
printlog : bool (default: True)
Prints a progress log to the standard output screen if True.
str_above_header : str (default: '')
Places a string just above the header.
"""
if printlog:
print('\n\nGenerating Subpackage Files\n%s\n' % (50 * '='))
if clean:
if os.path.isdir(out_dir):
shutil.rmtree(out_dir)
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
if printlog:
print('created %s' % out_dir)
subdir_paths = [os.path.join(api_modules, d)
for d in os.listdir(api_modules)
if not d.startswith('.')]
out_files = [os.path.join(out_dir, os.path.basename(d)) + '.md'
for d in subdir_paths]
for sub_p, out_f in zip(subdir_paths, out_files):
module_paths = (os.path.join(sub_p, m)
for m in os.listdir(sub_p)
if not m.startswith('.'))
new_output = []
if str_above_header:
new_output.append(str_above_header)
for p in module_paths:
with open(p, 'r') as r:
new_output.extend(r.readlines())
msg = ''
if not os.path.isfile(out_f):
msg = 'created'
if msg != 'created':
with open(out_f, 'r') as f:
prev = f.readlines()
if prev != new_output:
msg = 'updated'
else:
msg = 'skipped'
if msg != 'skipped':
with open(out_f, 'w') as f:
f.write(''.join(new_output))
if printlog:
print('%s %s' % (msg, out_f)) | [
"def",
"summarize_methdods_and_functions",
"(",
"api_modules",
",",
"out_dir",
",",
"printlog",
"=",
"False",
",",
"clean",
"=",
"True",
",",
"str_above_header",
"=",
"''",
")",
":",
"if",
"printlog",
":",
"print",
"(",
"'\\n\\nGenerating Subpackage Files\\n%s\\n'",... | Generates subpacke-level summary files.
Description
-----------
A function to generate subpacke-level summary markdown API files from
a module-level API documentation previously created via the
`generate_api_docs` function.
The output structure is:
package/package.subpackage.md
Parameters
----------
api_modules : str
Path to the API documentation crated via `generate_api_docs`
out_dir : str
Path to the desired output directory for the new markdown files.
clean : bool (default: False)
Removes previously existing API directory if True.
printlog : bool (default: True)
Prints a progress log to the standard output screen if True.
str_above_header : str (default: '')
Places a string just above the header. | [
"Generates",
"subpacke",
"-",
"level",
"summary",
"files",
"."
] | 615a7cf272692c12bbcfd9d1f217eab440120235 | https://github.com/rasbt/biopandas/blob/615a7cf272692c12bbcfd9d1f217eab440120235/docs/make_api.py#L284-L359 | train | 29,806 |
rasbt/biopandas | biopandas/pdb/pandas_pdb.py | PandasPdb.fetch_pdb | def fetch_pdb(self, pdb_code):
"""Fetches PDB file contents from the Protein Databank at rcsb.org.
Parameters
----------
pdb_code : str
A 4-letter PDB code, e.g., "3eiy".
Returns
---------
self
"""
self.pdb_path, self.pdb_text = self._fetch_pdb(pdb_code)
self._df = self._construct_df(pdb_lines=self.pdb_text.splitlines(True))
return self | python | def fetch_pdb(self, pdb_code):
"""Fetches PDB file contents from the Protein Databank at rcsb.org.
Parameters
----------
pdb_code : str
A 4-letter PDB code, e.g., "3eiy".
Returns
---------
self
"""
self.pdb_path, self.pdb_text = self._fetch_pdb(pdb_code)
self._df = self._construct_df(pdb_lines=self.pdb_text.splitlines(True))
return self | [
"def",
"fetch_pdb",
"(",
"self",
",",
"pdb_code",
")",
":",
"self",
".",
"pdb_path",
",",
"self",
".",
"pdb_text",
"=",
"self",
".",
"_fetch_pdb",
"(",
"pdb_code",
")",
"self",
".",
"_df",
"=",
"self",
".",
"_construct_df",
"(",
"pdb_lines",
"=",
"self... | Fetches PDB file contents from the Protein Databank at rcsb.org.
Parameters
----------
pdb_code : str
A 4-letter PDB code, e.g., "3eiy".
Returns
---------
self | [
"Fetches",
"PDB",
"file",
"contents",
"from",
"the",
"Protein",
"Databank",
"at",
"rcsb",
".",
"org",
"."
] | 615a7cf272692c12bbcfd9d1f217eab440120235 | https://github.com/rasbt/biopandas/blob/615a7cf272692c12bbcfd9d1f217eab440120235/biopandas/pdb/pandas_pdb.py#L95-L110 | train | 29,807 |
rasbt/biopandas | biopandas/pdb/pandas_pdb.py | PandasPdb.get | def get(self, s, df=None, invert=False, records=('ATOM', 'HETATM')):
"""Filter PDB DataFrames by properties
Parameters
----------
s : str in {'main chain', 'hydrogen', 'c-alpha', 'heavy'}
String to specify which entries to return.
df : pandas.DataFrame, default: None
Optional DataFrame to perform the filter operation on.
If df=None, filters on self.df['ATOM'].
invert : bool, default: True
Inverts the search query. For example if s='hydrogen' and
invert=True, all but hydrogen entries are returned.
records : iterable, default: ('ATOM', 'HETATM')
Specify which record sections to consider. For example, to consider
both protein and ligand atoms, set `records=('ATOM', 'HETATM')`.
This setting is ignored if `df` is not set to None.
For downward compatibility, a string argument is still supported
but deprecated and will be removed in future versions.
Returns
--------
df : pandas.DataFrame
Returns a DataFrame view on the filtered entries.
"""
if isinstance(records, str):
warnings.warn('Using a string as `records` argument is '
'deprecated and will not be supported in future'
' versions. Please use a tuple or'
' other iterable instead', DeprecationWarning)
records = (records,)
if not self._get_dict:
self._get_dict = self._init_get_dict()
if s not in self._get_dict.keys():
raise AttributeError('s must be in %s' % self._get_dict.keys())
if not df:
df = pd.concat(objs=[self.df[i] for i in records])
return self._get_dict[s](df, invert=invert) | python | def get(self, s, df=None, invert=False, records=('ATOM', 'HETATM')):
"""Filter PDB DataFrames by properties
Parameters
----------
s : str in {'main chain', 'hydrogen', 'c-alpha', 'heavy'}
String to specify which entries to return.
df : pandas.DataFrame, default: None
Optional DataFrame to perform the filter operation on.
If df=None, filters on self.df['ATOM'].
invert : bool, default: True
Inverts the search query. For example if s='hydrogen' and
invert=True, all but hydrogen entries are returned.
records : iterable, default: ('ATOM', 'HETATM')
Specify which record sections to consider. For example, to consider
both protein and ligand atoms, set `records=('ATOM', 'HETATM')`.
This setting is ignored if `df` is not set to None.
For downward compatibility, a string argument is still supported
but deprecated and will be removed in future versions.
Returns
--------
df : pandas.DataFrame
Returns a DataFrame view on the filtered entries.
"""
if isinstance(records, str):
warnings.warn('Using a string as `records` argument is '
'deprecated and will not be supported in future'
' versions. Please use a tuple or'
' other iterable instead', DeprecationWarning)
records = (records,)
if not self._get_dict:
self._get_dict = self._init_get_dict()
if s not in self._get_dict.keys():
raise AttributeError('s must be in %s' % self._get_dict.keys())
if not df:
df = pd.concat(objs=[self.df[i] for i in records])
return self._get_dict[s](df, invert=invert) | [
"def",
"get",
"(",
"self",
",",
"s",
",",
"df",
"=",
"None",
",",
"invert",
"=",
"False",
",",
"records",
"=",
"(",
"'ATOM'",
",",
"'HETATM'",
")",
")",
":",
"if",
"isinstance",
"(",
"records",
",",
"str",
")",
":",
"warnings",
".",
"warn",
"(",
... | Filter PDB DataFrames by properties
Parameters
----------
s : str in {'main chain', 'hydrogen', 'c-alpha', 'heavy'}
String to specify which entries to return.
df : pandas.DataFrame, default: None
Optional DataFrame to perform the filter operation on.
If df=None, filters on self.df['ATOM'].
invert : bool, default: True
Inverts the search query. For example if s='hydrogen' and
invert=True, all but hydrogen entries are returned.
records : iterable, default: ('ATOM', 'HETATM')
Specify which record sections to consider. For example, to consider
both protein and ligand atoms, set `records=('ATOM', 'HETATM')`.
This setting is ignored if `df` is not set to None.
For downward compatibility, a string argument is still supported
but deprecated and will be removed in future versions.
Returns
--------
df : pandas.DataFrame
Returns a DataFrame view on the filtered entries. | [
"Filter",
"PDB",
"DataFrames",
"by",
"properties"
] | 615a7cf272692c12bbcfd9d1f217eab440120235 | https://github.com/rasbt/biopandas/blob/615a7cf272692c12bbcfd9d1f217eab440120235/biopandas/pdb/pandas_pdb.py#L112-L154 | train | 29,808 |
rasbt/biopandas | biopandas/pdb/pandas_pdb.py | PandasPdb.impute_element | def impute_element(self, records=('ATOM', 'HETATM'), inplace=False):
"""Impute element_symbol from atom_name section.
Parameters
----------
records : iterable, default: ('ATOM', 'HETATM')
Coordinate sections for which the element symbols should be
imputed.
inplace : bool, (default: False
Performs the operation in-place if True and returns a copy of the
PDB DataFrame otherwise.
Returns
---------
DataFrame
"""
if inplace:
t = self.df
else:
t = self.df.copy()
for d in self.df:
t[d] = self.df[d].copy()
for sec in records:
t[sec]['element_symbol'] = \
t[sec][['atom_name', 'element_symbol']].\
apply(lambda x: x[0][1]
if len(x[1]) == 3
else x[0][0], axis=1)
return t | python | def impute_element(self, records=('ATOM', 'HETATM'), inplace=False):
"""Impute element_symbol from atom_name section.
Parameters
----------
records : iterable, default: ('ATOM', 'HETATM')
Coordinate sections for which the element symbols should be
imputed.
inplace : bool, (default: False
Performs the operation in-place if True and returns a copy of the
PDB DataFrame otherwise.
Returns
---------
DataFrame
"""
if inplace:
t = self.df
else:
t = self.df.copy()
for d in self.df:
t[d] = self.df[d].copy()
for sec in records:
t[sec]['element_symbol'] = \
t[sec][['atom_name', 'element_symbol']].\
apply(lambda x: x[0][1]
if len(x[1]) == 3
else x[0][0], axis=1)
return t | [
"def",
"impute_element",
"(",
"self",
",",
"records",
"=",
"(",
"'ATOM'",
",",
"'HETATM'",
")",
",",
"inplace",
"=",
"False",
")",
":",
"if",
"inplace",
":",
"t",
"=",
"self",
".",
"df",
"else",
":",
"t",
"=",
"self",
".",
"df",
".",
"copy",
"(",... | Impute element_symbol from atom_name section.
Parameters
----------
records : iterable, default: ('ATOM', 'HETATM')
Coordinate sections for which the element symbols should be
imputed.
inplace : bool, (default: False
Performs the operation in-place if True and returns a copy of the
PDB DataFrame otherwise.
Returns
---------
DataFrame | [
"Impute",
"element_symbol",
"from",
"atom_name",
"section",
"."
] | 615a7cf272692c12bbcfd9d1f217eab440120235 | https://github.com/rasbt/biopandas/blob/615a7cf272692c12bbcfd9d1f217eab440120235/biopandas/pdb/pandas_pdb.py#L156-L187 | train | 29,809 |
rasbt/biopandas | biopandas/pdb/pandas_pdb.py | PandasPdb.rmsd | def rmsd(df1, df2, s=None, invert=False):
"""Compute the Root Mean Square Deviation between molecules.
Parameters
----------
df1 : pandas.DataFrame
DataFrame with HETATM, ATOM, and/or ANISOU entries.
df2 : pandas.DataFrame
Second DataFrame for RMSD computation against df1. Must have the
same number of entries as df1.
s : {'main chain', 'hydrogen', 'c-alpha', 'heavy', 'carbon'} or None,
default: None
String to specify which entries to consider. If None, considers
all atoms for comparison.
invert : bool, default: False
Inverts the string query if true. For example, the setting
`s='hydrogen', invert=True` computes the RMSD based on all
but hydrogen atoms.
Returns
---------
rmsd : float
Root Mean Square Deviation between df1 and df2
"""
if df1.shape[0] != df2.shape[0]:
raise AttributeError('DataFrames have unequal lengths')
get_dict = PandasPdb._init_get_dict()
if s:
if s not in get_dict.keys():
raise AttributeError('s must be in '
'%s or None' % get_dict.keys())
df1 = get_dict[s](df1, invert=invert)
df2 = get_dict[s](df2, invert=invert)
total = ((df1['x_coord'].values - df2['x_coord'].values)**2 +
(df1['y_coord'].values - df2['y_coord'].values)**2 +
(df1['z_coord'].values - df2['z_coord'].values)**2)
rmsd = round((total.sum() / df1.shape[0])**0.5, 4)
return rmsd | python | def rmsd(df1, df2, s=None, invert=False):
"""Compute the Root Mean Square Deviation between molecules.
Parameters
----------
df1 : pandas.DataFrame
DataFrame with HETATM, ATOM, and/or ANISOU entries.
df2 : pandas.DataFrame
Second DataFrame for RMSD computation against df1. Must have the
same number of entries as df1.
s : {'main chain', 'hydrogen', 'c-alpha', 'heavy', 'carbon'} or None,
default: None
String to specify which entries to consider. If None, considers
all atoms for comparison.
invert : bool, default: False
Inverts the string query if true. For example, the setting
`s='hydrogen', invert=True` computes the RMSD based on all
but hydrogen atoms.
Returns
---------
rmsd : float
Root Mean Square Deviation between df1 and df2
"""
if df1.shape[0] != df2.shape[0]:
raise AttributeError('DataFrames have unequal lengths')
get_dict = PandasPdb._init_get_dict()
if s:
if s not in get_dict.keys():
raise AttributeError('s must be in '
'%s or None' % get_dict.keys())
df1 = get_dict[s](df1, invert=invert)
df2 = get_dict[s](df2, invert=invert)
total = ((df1['x_coord'].values - df2['x_coord'].values)**2 +
(df1['y_coord'].values - df2['y_coord'].values)**2 +
(df1['z_coord'].values - df2['z_coord'].values)**2)
rmsd = round((total.sum() / df1.shape[0])**0.5, 4)
return rmsd | [
"def",
"rmsd",
"(",
"df1",
",",
"df2",
",",
"s",
"=",
"None",
",",
"invert",
"=",
"False",
")",
":",
"if",
"df1",
".",
"shape",
"[",
"0",
"]",
"!=",
"df2",
".",
"shape",
"[",
"0",
"]",
":",
"raise",
"AttributeError",
"(",
"'DataFrames have unequal ... | Compute the Root Mean Square Deviation between molecules.
Parameters
----------
df1 : pandas.DataFrame
DataFrame with HETATM, ATOM, and/or ANISOU entries.
df2 : pandas.DataFrame
Second DataFrame for RMSD computation against df1. Must have the
same number of entries as df1.
s : {'main chain', 'hydrogen', 'c-alpha', 'heavy', 'carbon'} or None,
default: None
String to specify which entries to consider. If None, considers
all atoms for comparison.
invert : bool, default: False
Inverts the string query if true. For example, the setting
`s='hydrogen', invert=True` computes the RMSD based on all
but hydrogen atoms.
Returns
---------
rmsd : float
Root Mean Square Deviation between df1 and df2 | [
"Compute",
"the",
"Root",
"Mean",
"Square",
"Deviation",
"between",
"molecules",
"."
] | 615a7cf272692c12bbcfd9d1f217eab440120235 | https://github.com/rasbt/biopandas/blob/615a7cf272692c12bbcfd9d1f217eab440120235/biopandas/pdb/pandas_pdb.py#L190-L232 | train | 29,810 |
rasbt/biopandas | biopandas/pdb/pandas_pdb.py | PandasPdb._init_get_dict | def _init_get_dict():
"""Initialize dictionary for filter operations."""
get_dict = {'main chain': PandasPdb._get_mainchain,
'hydrogen': PandasPdb._get_hydrogen,
'c-alpha': PandasPdb._get_calpha,
'carbon': PandasPdb._get_carbon,
'heavy': PandasPdb._get_heavy}
return get_dict | python | def _init_get_dict():
"""Initialize dictionary for filter operations."""
get_dict = {'main chain': PandasPdb._get_mainchain,
'hydrogen': PandasPdb._get_hydrogen,
'c-alpha': PandasPdb._get_calpha,
'carbon': PandasPdb._get_carbon,
'heavy': PandasPdb._get_heavy}
return get_dict | [
"def",
"_init_get_dict",
"(",
")",
":",
"get_dict",
"=",
"{",
"'main chain'",
":",
"PandasPdb",
".",
"_get_mainchain",
",",
"'hydrogen'",
":",
"PandasPdb",
".",
"_get_hydrogen",
",",
"'c-alpha'",
":",
"PandasPdb",
".",
"_get_calpha",
",",
"'carbon'",
":",
"Pan... | Initialize dictionary for filter operations. | [
"Initialize",
"dictionary",
"for",
"filter",
"operations",
"."
] | 615a7cf272692c12bbcfd9d1f217eab440120235 | https://github.com/rasbt/biopandas/blob/615a7cf272692c12bbcfd9d1f217eab440120235/biopandas/pdb/pandas_pdb.py#L235-L242 | train | 29,811 |
rasbt/biopandas | biopandas/pdb/pandas_pdb.py | PandasPdb._read_pdb | def _read_pdb(path):
"""Read PDB file from local drive."""
r_mode = 'r'
openf = open
if path.endswith('.gz'):
r_mode = 'rb'
openf = gzip.open
with openf(path, r_mode) as f:
txt = f.read()
if path.endswith('.gz'):
if sys.version_info[0] >= 3:
txt = txt.decode('utf-8')
else:
txt = txt.encode('ascii')
return path, txt | python | def _read_pdb(path):
"""Read PDB file from local drive."""
r_mode = 'r'
openf = open
if path.endswith('.gz'):
r_mode = 'rb'
openf = gzip.open
with openf(path, r_mode) as f:
txt = f.read()
if path.endswith('.gz'):
if sys.version_info[0] >= 3:
txt = txt.decode('utf-8')
else:
txt = txt.encode('ascii')
return path, txt | [
"def",
"_read_pdb",
"(",
"path",
")",
":",
"r_mode",
"=",
"'r'",
"openf",
"=",
"open",
"if",
"path",
".",
"endswith",
"(",
"'.gz'",
")",
":",
"r_mode",
"=",
"'rb'",
"openf",
"=",
"gzip",
".",
"open",
"with",
"openf",
"(",
"path",
",",
"r_mode",
")"... | Read PDB file from local drive. | [
"Read",
"PDB",
"file",
"from",
"local",
"drive",
"."
] | 615a7cf272692c12bbcfd9d1f217eab440120235 | https://github.com/rasbt/biopandas/blob/615a7cf272692c12bbcfd9d1f217eab440120235/biopandas/pdb/pandas_pdb.py#L245-L259 | train | 29,812 |
rasbt/biopandas | biopandas/pdb/pandas_pdb.py | PandasPdb._fetch_pdb | def _fetch_pdb(pdb_code):
"""Load PDB file from rcsb.org."""
txt = None
url = 'http://www.rcsb.org/pdb/files/%s.pdb' % pdb_code.lower()
try:
response = urlopen(url)
txt = response.read()
if sys.version_info[0] >= 3:
txt = txt.decode('utf-8')
else:
txt = txt.encode('ascii')
except HTTPError as e:
print('HTTP Error %s' % e.code)
except URLError as e:
print('URL Error %s' % e.args)
return url, txt | python | def _fetch_pdb(pdb_code):
"""Load PDB file from rcsb.org."""
txt = None
url = 'http://www.rcsb.org/pdb/files/%s.pdb' % pdb_code.lower()
try:
response = urlopen(url)
txt = response.read()
if sys.version_info[0] >= 3:
txt = txt.decode('utf-8')
else:
txt = txt.encode('ascii')
except HTTPError as e:
print('HTTP Error %s' % e.code)
except URLError as e:
print('URL Error %s' % e.args)
return url, txt | [
"def",
"_fetch_pdb",
"(",
"pdb_code",
")",
":",
"txt",
"=",
"None",
"url",
"=",
"'http://www.rcsb.org/pdb/files/%s.pdb'",
"%",
"pdb_code",
".",
"lower",
"(",
")",
"try",
":",
"response",
"=",
"urlopen",
"(",
"url",
")",
"txt",
"=",
"response",
".",
"read",... | Load PDB file from rcsb.org. | [
"Load",
"PDB",
"file",
"from",
"rcsb",
".",
"org",
"."
] | 615a7cf272692c12bbcfd9d1f217eab440120235 | https://github.com/rasbt/biopandas/blob/615a7cf272692c12bbcfd9d1f217eab440120235/biopandas/pdb/pandas_pdb.py#L262-L277 | train | 29,813 |
rasbt/biopandas | biopandas/pdb/pandas_pdb.py | PandasPdb._parse_header_code | def _parse_header_code(self):
"""Extract header information and PDB code."""
code, header = '', ''
if 'OTHERS' in self.df:
header = (self.df['OTHERS'][self.df['OTHERS']['record_name'] ==
'HEADER'])
if not header.empty:
header = header['entry'].values[0]
s = header.split()
if s:
code = s[-1].lower()
return header, code | python | def _parse_header_code(self):
"""Extract header information and PDB code."""
code, header = '', ''
if 'OTHERS' in self.df:
header = (self.df['OTHERS'][self.df['OTHERS']['record_name'] ==
'HEADER'])
if not header.empty:
header = header['entry'].values[0]
s = header.split()
if s:
code = s[-1].lower()
return header, code | [
"def",
"_parse_header_code",
"(",
"self",
")",
":",
"code",
",",
"header",
"=",
"''",
",",
"''",
"if",
"'OTHERS'",
"in",
"self",
".",
"df",
":",
"header",
"=",
"(",
"self",
".",
"df",
"[",
"'OTHERS'",
"]",
"[",
"self",
".",
"df",
"[",
"'OTHERS'",
... | Extract header information and PDB code. | [
"Extract",
"header",
"information",
"and",
"PDB",
"code",
"."
] | 615a7cf272692c12bbcfd9d1f217eab440120235 | https://github.com/rasbt/biopandas/blob/615a7cf272692c12bbcfd9d1f217eab440120235/biopandas/pdb/pandas_pdb.py#L279-L291 | train | 29,814 |
rasbt/biopandas | biopandas/pdb/pandas_pdb.py | PandasPdb._get_mainchain | def _get_mainchain(df, invert):
"""Return only main chain atom entries from a DataFrame"""
if invert:
mc = df[(df['atom_name'] != 'C') &
(df['atom_name'] != 'O') &
(df['atom_name'] != 'N') &
(df['atom_name'] != 'CA')]
else:
mc = df[(df['atom_name'] == 'C') |
(df['atom_name'] == 'O') |
(df['atom_name'] == 'N') |
(df['atom_name'] == 'CA')]
return mc | python | def _get_mainchain(df, invert):
"""Return only main chain atom entries from a DataFrame"""
if invert:
mc = df[(df['atom_name'] != 'C') &
(df['atom_name'] != 'O') &
(df['atom_name'] != 'N') &
(df['atom_name'] != 'CA')]
else:
mc = df[(df['atom_name'] == 'C') |
(df['atom_name'] == 'O') |
(df['atom_name'] == 'N') |
(df['atom_name'] == 'CA')]
return mc | [
"def",
"_get_mainchain",
"(",
"df",
",",
"invert",
")",
":",
"if",
"invert",
":",
"mc",
"=",
"df",
"[",
"(",
"df",
"[",
"'atom_name'",
"]",
"!=",
"'C'",
")",
"&",
"(",
"df",
"[",
"'atom_name'",
"]",
"!=",
"'O'",
")",
"&",
"(",
"df",
"[",
"'atom... | Return only main chain atom entries from a DataFrame | [
"Return",
"only",
"main",
"chain",
"atom",
"entries",
"from",
"a",
"DataFrame"
] | 615a7cf272692c12bbcfd9d1f217eab440120235 | https://github.com/rasbt/biopandas/blob/615a7cf272692c12bbcfd9d1f217eab440120235/biopandas/pdb/pandas_pdb.py#L294-L306 | train | 29,815 |
rasbt/biopandas | biopandas/pdb/pandas_pdb.py | PandasPdb.amino3to1 | def amino3to1(self, record='ATOM',
residue_col='residue_name', fillna='?'):
"""Creates 1-letter amino acid codes from DataFrame
Non-canonical amino-acids are converted as follows:
ASH (protonated ASP) => D
CYX (disulfide-bonded CYS) => C
GLH (protonated GLU) => E
HID/HIE/HIP (different protonation states of HIS) = H
HYP (hydroxyproline) => P
MSE (selenomethionine) => M
Parameters
----------
record : str, default: 'ATOM'
Specfies the record DataFrame.
residue_col : str, default: 'residue_name'
Column in `record` DataFrame to look for 3-letter amino acid
codes for the conversion.
fillna : str, default: '?'
Placeholder string to use for unknown amino acids.
Returns
---------
pandas.DataFrame : Pandas DataFrame object consisting of two columns,
`'chain_id'` and `'residue_name'`, where the former contains
the chain ID of the amino acid and the latter
contains the 1-letter amino acid code, respectively.
"""
tmp = self.df[record]
cmp = 'placeholder'
indices = []
for num, ind in zip(tmp['residue_number'], np.arange(tmp.shape[0])):
if num != cmp:
indices.append(ind)
cmp = num
transl = tmp.iloc[indices][residue_col].map(
amino3to1dict).fillna(fillna)
return pd.concat((tmp.iloc[indices]['chain_id'], transl), axis=1) | python | def amino3to1(self, record='ATOM',
residue_col='residue_name', fillna='?'):
"""Creates 1-letter amino acid codes from DataFrame
Non-canonical amino-acids are converted as follows:
ASH (protonated ASP) => D
CYX (disulfide-bonded CYS) => C
GLH (protonated GLU) => E
HID/HIE/HIP (different protonation states of HIS) = H
HYP (hydroxyproline) => P
MSE (selenomethionine) => M
Parameters
----------
record : str, default: 'ATOM'
Specfies the record DataFrame.
residue_col : str, default: 'residue_name'
Column in `record` DataFrame to look for 3-letter amino acid
codes for the conversion.
fillna : str, default: '?'
Placeholder string to use for unknown amino acids.
Returns
---------
pandas.DataFrame : Pandas DataFrame object consisting of two columns,
`'chain_id'` and `'residue_name'`, where the former contains
the chain ID of the amino acid and the latter
contains the 1-letter amino acid code, respectively.
"""
tmp = self.df[record]
cmp = 'placeholder'
indices = []
for num, ind in zip(tmp['residue_number'], np.arange(tmp.shape[0])):
if num != cmp:
indices.append(ind)
cmp = num
transl = tmp.iloc[indices][residue_col].map(
amino3to1dict).fillna(fillna)
return pd.concat((tmp.iloc[indices]['chain_id'], transl), axis=1) | [
"def",
"amino3to1",
"(",
"self",
",",
"record",
"=",
"'ATOM'",
",",
"residue_col",
"=",
"'residue_name'",
",",
"fillna",
"=",
"'?'",
")",
":",
"tmp",
"=",
"self",
".",
"df",
"[",
"record",
"]",
"cmp",
"=",
"'placeholder'",
"indices",
"=",
"[",
"]",
"... | Creates 1-letter amino acid codes from DataFrame
Non-canonical amino-acids are converted as follows:
ASH (protonated ASP) => D
CYX (disulfide-bonded CYS) => C
GLH (protonated GLU) => E
HID/HIE/HIP (different protonation states of HIS) = H
HYP (hydroxyproline) => P
MSE (selenomethionine) => M
Parameters
----------
record : str, default: 'ATOM'
Specfies the record DataFrame.
residue_col : str, default: 'residue_name'
Column in `record` DataFrame to look for 3-letter amino acid
codes for the conversion.
fillna : str, default: '?'
Placeholder string to use for unknown amino acids.
Returns
---------
pandas.DataFrame : Pandas DataFrame object consisting of two columns,
`'chain_id'` and `'residue_name'`, where the former contains
the chain ID of the amino acid and the latter
contains the 1-letter amino acid code, respectively. | [
"Creates",
"1",
"-",
"letter",
"amino",
"acid",
"codes",
"from",
"DataFrame"
] | 615a7cf272692c12bbcfd9d1f217eab440120235 | https://github.com/rasbt/biopandas/blob/615a7cf272692c12bbcfd9d1f217eab440120235/biopandas/pdb/pandas_pdb.py#L375-L417 | train | 29,816 |
rasbt/biopandas | biopandas/pdb/pandas_pdb.py | PandasPdb.to_pdb | def to_pdb(self, path, records=None, gz=False, append_newline=True):
"""Write record DataFrames to a PDB file or gzipped PDB file.
Parameters
----------
path : str
A valid output path for the pdb file
records : iterable, default: None
A list of PDB record sections in
{'ATOM', 'HETATM', 'ANISOU', 'OTHERS'} that are to be written.
Writes all lines to PDB if `records=None`.
gz : bool, default: False
Writes a gzipped PDB file if True.
append_newline : bool, default: True
Appends a new line at the end of the PDB file if True
"""
if gz:
openf = gzip.open
w_mode = 'wt'
else:
openf = open
w_mode = 'w'
if not records:
records = self.df.keys()
dfs = {r: self.df[r].copy() for r in records if not self.df[r].empty}
for r in dfs.keys():
for col in pdb_records[r]:
dfs[r][col['id']] = dfs[r][col['id']].apply(col['strf'])
dfs[r]['OUT'] = pd.Series('', index=dfs[r].index)
for c in dfs[r].columns:
if c in {'line_idx', 'OUT'}:
pass
elif r in {'ATOM', 'HETATM'} and c not in pdb_df_columns:
warn('Column %s is not an expected column and'
' will be skipped.' % c)
else:
dfs[r]['OUT'] = dfs[r]['OUT'] + dfs[r][c]
if pd_version < LooseVersion('0.17.0'):
warn("You are using an old pandas version (< 0.17)"
" that relies on the old sorting syntax."
" Please consider updating your pandas"
" installation to a more recent version.",
DeprecationWarning)
df.sort(columns='line_idx', inplace=True)
elif pd_version < LooseVersion('0.23.0'):
df = pd.concat(dfs)
else:
df = pd.concat(dfs, sort=False)
df.sort_values(by='line_idx', inplace=True)
with openf(path, w_mode) as f:
s = df['OUT'].tolist()
for idx in range(len(s)):
if len(s[idx]) < 80:
s[idx] = '%s%s' % (s[idx], ' ' * (80 - len(s[idx])))
to_write = '\n'.join(s)
f.write(to_write)
if append_newline:
if gz:
f.write('\n')
else:
f.write('\n') | python | def to_pdb(self, path, records=None, gz=False, append_newline=True):
"""Write record DataFrames to a PDB file or gzipped PDB file.
Parameters
----------
path : str
A valid output path for the pdb file
records : iterable, default: None
A list of PDB record sections in
{'ATOM', 'HETATM', 'ANISOU', 'OTHERS'} that are to be written.
Writes all lines to PDB if `records=None`.
gz : bool, default: False
Writes a gzipped PDB file if True.
append_newline : bool, default: True
Appends a new line at the end of the PDB file if True
"""
if gz:
openf = gzip.open
w_mode = 'wt'
else:
openf = open
w_mode = 'w'
if not records:
records = self.df.keys()
dfs = {r: self.df[r].copy() for r in records if not self.df[r].empty}
for r in dfs.keys():
for col in pdb_records[r]:
dfs[r][col['id']] = dfs[r][col['id']].apply(col['strf'])
dfs[r]['OUT'] = pd.Series('', index=dfs[r].index)
for c in dfs[r].columns:
if c in {'line_idx', 'OUT'}:
pass
elif r in {'ATOM', 'HETATM'} and c not in pdb_df_columns:
warn('Column %s is not an expected column and'
' will be skipped.' % c)
else:
dfs[r]['OUT'] = dfs[r]['OUT'] + dfs[r][c]
if pd_version < LooseVersion('0.17.0'):
warn("You are using an old pandas version (< 0.17)"
" that relies on the old sorting syntax."
" Please consider updating your pandas"
" installation to a more recent version.",
DeprecationWarning)
df.sort(columns='line_idx', inplace=True)
elif pd_version < LooseVersion('0.23.0'):
df = pd.concat(dfs)
else:
df = pd.concat(dfs, sort=False)
df.sort_values(by='line_idx', inplace=True)
with openf(path, w_mode) as f:
s = df['OUT'].tolist()
for idx in range(len(s)):
if len(s[idx]) < 80:
s[idx] = '%s%s' % (s[idx], ' ' * (80 - len(s[idx])))
to_write = '\n'.join(s)
f.write(to_write)
if append_newline:
if gz:
f.write('\n')
else:
f.write('\n') | [
"def",
"to_pdb",
"(",
"self",
",",
"path",
",",
"records",
"=",
"None",
",",
"gz",
"=",
"False",
",",
"append_newline",
"=",
"True",
")",
":",
"if",
"gz",
":",
"openf",
"=",
"gzip",
".",
"open",
"w_mode",
"=",
"'wt'",
"else",
":",
"openf",
"=",
"... | Write record DataFrames to a PDB file or gzipped PDB file.
Parameters
----------
path : str
A valid output path for the pdb file
records : iterable, default: None
A list of PDB record sections in
{'ATOM', 'HETATM', 'ANISOU', 'OTHERS'} that are to be written.
Writes all lines to PDB if `records=None`.
gz : bool, default: False
Writes a gzipped PDB file if True.
append_newline : bool, default: True
Appends a new line at the end of the PDB file if True | [
"Write",
"record",
"DataFrames",
"to",
"a",
"PDB",
"file",
"or",
"gzipped",
"PDB",
"file",
"."
] | 615a7cf272692c12bbcfd9d1f217eab440120235 | https://github.com/rasbt/biopandas/blob/615a7cf272692c12bbcfd9d1f217eab440120235/biopandas/pdb/pandas_pdb.py#L478-L551 | train | 29,817 |
rasbt/biopandas | biopandas/mol2/mol2_io.py | split_multimol2 | def split_multimol2(mol2_path):
r"""
Splits a multi-mol2 file into individual Mol2 file contents.
Parameters
-----------
mol2_path : str
Path to the multi-mol2 file. Parses gzip files if the filepath
ends on .gz.
Returns
-----------
A generator object for lists for every extracted mol2-file. Lists contain
the molecule ID and the mol2 file contents.
e.g., ['ID1234', ['@<TRIPOS>MOLECULE\n', '...']]. Note that bytestrings
are returned (for reasons of efficieny) if the Mol2 content is read
from a gzip (.gz) file.
"""
if mol2_path.endswith('.gz'):
open_file = gzip.open
read_mode = 'rb'
else:
open_file = open
read_mode = 'r'
check = {'rb': b'@<TRIPOS>MOLECULE', 'r': '@<TRIPOS>MOLECULE'}
with open_file(mol2_path, read_mode) as f:
mol2 = ['', []]
while True:
try:
line = next(f)
if line.startswith(check[read_mode]):
if mol2[0]:
yield(mol2)
mol2 = ['', []]
mol2_id = next(f)
mol2[0] = mol2_id.rstrip()
mol2[1].append(line)
mol2[1].append(mol2_id)
else:
mol2[1].append(line)
except StopIteration:
yield(mol2)
return | python | def split_multimol2(mol2_path):
r"""
Splits a multi-mol2 file into individual Mol2 file contents.
Parameters
-----------
mol2_path : str
Path to the multi-mol2 file. Parses gzip files if the filepath
ends on .gz.
Returns
-----------
A generator object for lists for every extracted mol2-file. Lists contain
the molecule ID and the mol2 file contents.
e.g., ['ID1234', ['@<TRIPOS>MOLECULE\n', '...']]. Note that bytestrings
are returned (for reasons of efficieny) if the Mol2 content is read
from a gzip (.gz) file.
"""
if mol2_path.endswith('.gz'):
open_file = gzip.open
read_mode = 'rb'
else:
open_file = open
read_mode = 'r'
check = {'rb': b'@<TRIPOS>MOLECULE', 'r': '@<TRIPOS>MOLECULE'}
with open_file(mol2_path, read_mode) as f:
mol2 = ['', []]
while True:
try:
line = next(f)
if line.startswith(check[read_mode]):
if mol2[0]:
yield(mol2)
mol2 = ['', []]
mol2_id = next(f)
mol2[0] = mol2_id.rstrip()
mol2[1].append(line)
mol2[1].append(mol2_id)
else:
mol2[1].append(line)
except StopIteration:
yield(mol2)
return | [
"def",
"split_multimol2",
"(",
"mol2_path",
")",
":",
"if",
"mol2_path",
".",
"endswith",
"(",
"'.gz'",
")",
":",
"open_file",
"=",
"gzip",
".",
"open",
"read_mode",
"=",
"'rb'",
"else",
":",
"open_file",
"=",
"open",
"read_mode",
"=",
"'r'",
"check",
"=... | r"""
Splits a multi-mol2 file into individual Mol2 file contents.
Parameters
-----------
mol2_path : str
Path to the multi-mol2 file. Parses gzip files if the filepath
ends on .gz.
Returns
-----------
A generator object for lists for every extracted mol2-file. Lists contain
the molecule ID and the mol2 file contents.
e.g., ['ID1234', ['@<TRIPOS>MOLECULE\n', '...']]. Note that bytestrings
are returned (for reasons of efficieny) if the Mol2 content is read
from a gzip (.gz) file. | [
"r",
"Splits",
"a",
"multi",
"-",
"mol2",
"file",
"into",
"individual",
"Mol2",
"file",
"contents",
"."
] | 615a7cf272692c12bbcfd9d1f217eab440120235 | https://github.com/rasbt/biopandas/blob/615a7cf272692c12bbcfd9d1f217eab440120235/biopandas/mol2/mol2_io.py#L10-L54 | train | 29,818 |
rasbt/biopandas | biopandas/mol2/pandas_mol2.py | PandasMol2._load_mol2 | def _load_mol2(self, mol2_lines, mol2_code, columns):
"""Load mol2 contents into assert_raise_message instance"""
if columns is None:
col_names = COLUMN_NAMES
col_types = COLUMN_TYPES
else:
col_names, col_types = [], []
for i in range(len(columns)):
col_names.append(columns[i][0])
col_types.append(columns[i][1])
try:
self.mol2_text = ''.join(mol2_lines)
self.code = mol2_code
except TypeError:
mol2_lines = [m.decode() for m in mol2_lines]
self.mol2_text = ''.join(mol2_lines)
self.code = mol2_code.decode()
self._df = self._construct_df(mol2_lines, col_names, col_types) | python | def _load_mol2(self, mol2_lines, mol2_code, columns):
"""Load mol2 contents into assert_raise_message instance"""
if columns is None:
col_names = COLUMN_NAMES
col_types = COLUMN_TYPES
else:
col_names, col_types = [], []
for i in range(len(columns)):
col_names.append(columns[i][0])
col_types.append(columns[i][1])
try:
self.mol2_text = ''.join(mol2_lines)
self.code = mol2_code
except TypeError:
mol2_lines = [m.decode() for m in mol2_lines]
self.mol2_text = ''.join(mol2_lines)
self.code = mol2_code.decode()
self._df = self._construct_df(mol2_lines, col_names, col_types) | [
"def",
"_load_mol2",
"(",
"self",
",",
"mol2_lines",
",",
"mol2_code",
",",
"columns",
")",
":",
"if",
"columns",
"is",
"None",
":",
"col_names",
"=",
"COLUMN_NAMES",
"col_types",
"=",
"COLUMN_TYPES",
"else",
":",
"col_names",
",",
"col_types",
"=",
"[",
"... | Load mol2 contents into assert_raise_message instance | [
"Load",
"mol2",
"contents",
"into",
"assert_raise_message",
"instance"
] | 615a7cf272692c12bbcfd9d1f217eab440120235 | https://github.com/rasbt/biopandas/blob/615a7cf272692c12bbcfd9d1f217eab440120235/biopandas/mol2/pandas_mol2.py#L67-L86 | train | 29,819 |
rasbt/biopandas | biopandas/mol2/pandas_mol2.py | PandasMol2.read_mol2_from_list | def read_mol2_from_list(self, mol2_lines, mol2_code, columns=None):
r"""Reads Mol2 file from a list into DataFrames
Attributes
----------
mol2_lines : list
A list of lines containing the mol2 file contents. For example,
['@<TRIPOS>MOLECULE\n',
'ZINC38611810\n',
' 65 68 0 0 0\n',
'SMALL\n',
'NO_CHARGES\n',
'\n',
'@<TRIPOS>ATOM\n',
' 1 C1 -1.1786 2.7011 -4.0323 C.3 1 <0> -0.1537\n',
' 2 C2 -1.2950 1.2442 -3.5798 C.3 1 <0> -0.1156\n',
...]
mol2_code : str or None
Name or ID of the molecule.
columns : dict or None (default: None)
If None, this methods expects a 9-column ATOM section that contains
the following columns:
{0:('atom_id', int), 1:('atom_name', str),
2:('x', float), 3:('y', float), 4:('z', float),
5:('atom_type', str), 6:('subst_id', int),
7:('subst_name', str), 8:('charge', float)}
If your Mol2 files are formatted differently, you can provide your
own column_mapping dictionary in a format similar to the one above.
However, note that not all assert_raise_message methods may be
supported then.
Returns
---------
self
"""
self._load_mol2(mol2_lines, mol2_code, columns)
return self | python | def read_mol2_from_list(self, mol2_lines, mol2_code, columns=None):
r"""Reads Mol2 file from a list into DataFrames
Attributes
----------
mol2_lines : list
A list of lines containing the mol2 file contents. For example,
['@<TRIPOS>MOLECULE\n',
'ZINC38611810\n',
' 65 68 0 0 0\n',
'SMALL\n',
'NO_CHARGES\n',
'\n',
'@<TRIPOS>ATOM\n',
' 1 C1 -1.1786 2.7011 -4.0323 C.3 1 <0> -0.1537\n',
' 2 C2 -1.2950 1.2442 -3.5798 C.3 1 <0> -0.1156\n',
...]
mol2_code : str or None
Name or ID of the molecule.
columns : dict or None (default: None)
If None, this methods expects a 9-column ATOM section that contains
the following columns:
{0:('atom_id', int), 1:('atom_name', str),
2:('x', float), 3:('y', float), 4:('z', float),
5:('atom_type', str), 6:('subst_id', int),
7:('subst_name', str), 8:('charge', float)}
If your Mol2 files are formatted differently, you can provide your
own column_mapping dictionary in a format similar to the one above.
However, note that not all assert_raise_message methods may be
supported then.
Returns
---------
self
"""
self._load_mol2(mol2_lines, mol2_code, columns)
return self | [
"def",
"read_mol2_from_list",
"(",
"self",
",",
"mol2_lines",
",",
"mol2_code",
",",
"columns",
"=",
"None",
")",
":",
"self",
".",
"_load_mol2",
"(",
"mol2_lines",
",",
"mol2_code",
",",
"columns",
")",
"return",
"self"
] | r"""Reads Mol2 file from a list into DataFrames
Attributes
----------
mol2_lines : list
A list of lines containing the mol2 file contents. For example,
['@<TRIPOS>MOLECULE\n',
'ZINC38611810\n',
' 65 68 0 0 0\n',
'SMALL\n',
'NO_CHARGES\n',
'\n',
'@<TRIPOS>ATOM\n',
' 1 C1 -1.1786 2.7011 -4.0323 C.3 1 <0> -0.1537\n',
' 2 C2 -1.2950 1.2442 -3.5798 C.3 1 <0> -0.1156\n',
...]
mol2_code : str or None
Name or ID of the molecule.
columns : dict or None (default: None)
If None, this methods expects a 9-column ATOM section that contains
the following columns:
{0:('atom_id', int), 1:('atom_name', str),
2:('x', float), 3:('y', float), 4:('z', float),
5:('atom_type', str), 6:('subst_id', int),
7:('subst_name', str), 8:('charge', float)}
If your Mol2 files are formatted differently, you can provide your
own column_mapping dictionary in a format similar to the one above.
However, note that not all assert_raise_message methods may be
supported then.
Returns
---------
self | [
"r",
"Reads",
"Mol2",
"file",
"from",
"a",
"list",
"into",
"DataFrames"
] | 615a7cf272692c12bbcfd9d1f217eab440120235 | https://github.com/rasbt/biopandas/blob/615a7cf272692c12bbcfd9d1f217eab440120235/biopandas/mol2/pandas_mol2.py#L123-L162 | train | 29,820 |
rasbt/biopandas | biopandas/mol2/pandas_mol2.py | PandasMol2._get_atomsection | def _get_atomsection(mol2_lst):
"""Returns atom section from mol2 provided as list of strings"""
started = False
for idx, s in enumerate(mol2_lst):
if s.startswith('@<TRIPOS>ATOM'):
first_idx = idx + 1
started = True
elif started and s.startswith('@<TRIPOS>'):
last_idx_plus1 = idx
break
return mol2_lst[first_idx:last_idx_plus1] | python | def _get_atomsection(mol2_lst):
"""Returns atom section from mol2 provided as list of strings"""
started = False
for idx, s in enumerate(mol2_lst):
if s.startswith('@<TRIPOS>ATOM'):
first_idx = idx + 1
started = True
elif started and s.startswith('@<TRIPOS>'):
last_idx_plus1 = idx
break
return mol2_lst[first_idx:last_idx_plus1] | [
"def",
"_get_atomsection",
"(",
"mol2_lst",
")",
":",
"started",
"=",
"False",
"for",
"idx",
",",
"s",
"in",
"enumerate",
"(",
"mol2_lst",
")",
":",
"if",
"s",
".",
"startswith",
"(",
"'@<TRIPOS>ATOM'",
")",
":",
"first_idx",
"=",
"idx",
"+",
"1",
"sta... | Returns atom section from mol2 provided as list of strings | [
"Returns",
"atom",
"section",
"from",
"mol2",
"provided",
"as",
"list",
"of",
"strings"
] | 615a7cf272692c12bbcfd9d1f217eab440120235 | https://github.com/rasbt/biopandas/blob/615a7cf272692c12bbcfd9d1f217eab440120235/biopandas/mol2/pandas_mol2.py#L171-L181 | train | 29,821 |
rasbt/biopandas | biopandas/mol2/pandas_mol2.py | PandasMol2.rmsd | def rmsd(df1, df2, heavy_only=True):
"""Compute the Root Mean Square Deviation between molecules
Parameters
----------
df1 : pandas.DataFrame
DataFrame with HETATM, ATOM, and/or ANISOU entries
df2 : pandas.DataFrame
Second DataFrame for RMSD computation against df1. Must have the
same number of entries as df1
heavy_only : bool (default: True)
Which atoms to compare to compute the RMSD. If `True` (default),
computes the RMSD between non-hydrogen atoms only.
Returns
---------
rmsd : float
Root Mean Square Deviation between df1 and df2
"""
if df1.shape[0] != df2.shape[0]:
raise AttributeError('DataFrames have unequal lengths')
if heavy_only:
d1 = df1[df1['atom_type'] != 'H']
d2 = df2[df2['atom_type'] != 'H']
else:
d1, d2 = df1, df2
total = ((d1['x'].values - d2['x'].values)**2 +
(d1['y'].values - d2['y'].values)**2 +
(d1['z'].values - d2['z'].values)**2)
rmsd = round((total.sum() / df1.shape[0])**0.5, 4)
return rmsd | python | def rmsd(df1, df2, heavy_only=True):
"""Compute the Root Mean Square Deviation between molecules
Parameters
----------
df1 : pandas.DataFrame
DataFrame with HETATM, ATOM, and/or ANISOU entries
df2 : pandas.DataFrame
Second DataFrame for RMSD computation against df1. Must have the
same number of entries as df1
heavy_only : bool (default: True)
Which atoms to compare to compute the RMSD. If `True` (default),
computes the RMSD between non-hydrogen atoms only.
Returns
---------
rmsd : float
Root Mean Square Deviation between df1 and df2
"""
if df1.shape[0] != df2.shape[0]:
raise AttributeError('DataFrames have unequal lengths')
if heavy_only:
d1 = df1[df1['atom_type'] != 'H']
d2 = df2[df2['atom_type'] != 'H']
else:
d1, d2 = df1, df2
total = ((d1['x'].values - d2['x'].values)**2 +
(d1['y'].values - d2['y'].values)**2 +
(d1['z'].values - d2['z'].values)**2)
rmsd = round((total.sum() / df1.shape[0])**0.5, 4)
return rmsd | [
"def",
"rmsd",
"(",
"df1",
",",
"df2",
",",
"heavy_only",
"=",
"True",
")",
":",
"if",
"df1",
".",
"shape",
"[",
"0",
"]",
"!=",
"df2",
".",
"shape",
"[",
"0",
"]",
":",
"raise",
"AttributeError",
"(",
"'DataFrames have unequal lengths'",
")",
"if",
... | Compute the Root Mean Square Deviation between molecules
Parameters
----------
df1 : pandas.DataFrame
DataFrame with HETATM, ATOM, and/or ANISOU entries
df2 : pandas.DataFrame
Second DataFrame for RMSD computation against df1. Must have the
same number of entries as df1
heavy_only : bool (default: True)
Which atoms to compare to compute the RMSD. If `True` (default),
computes the RMSD between non-hydrogen atoms only.
Returns
---------
rmsd : float
Root Mean Square Deviation between df1 and df2 | [
"Compute",
"the",
"Root",
"Mean",
"Square",
"Deviation",
"between",
"molecules"
] | 615a7cf272692c12bbcfd9d1f217eab440120235 | https://github.com/rasbt/biopandas/blob/615a7cf272692c12bbcfd9d1f217eab440120235/biopandas/mol2/pandas_mol2.py#L195-L228 | train | 29,822 |
rasbt/biopandas | biopandas/mol2/pandas_mol2.py | PandasMol2.distance | def distance(self, xyz=(0.00, 0.00, 0.00)):
"""Computes Euclidean distance between atoms in
self.df and a 3D point.
Parameters
----------
xyz : tuple (0.00, 0.00, 0.00)
X, Y, and Z coordinate of the reference center for the distance
computation
Returns
---------
pandas.Series : Pandas Series object containing the Euclidean
distance between the atoms in the atom section and `xyz`.
"""
return np.sqrt(np.sum(self.df[['x', 'y', 'z']]
.subtract(xyz, axis=1)**2, axis=1)) | python | def distance(self, xyz=(0.00, 0.00, 0.00)):
"""Computes Euclidean distance between atoms in
self.df and a 3D point.
Parameters
----------
xyz : tuple (0.00, 0.00, 0.00)
X, Y, and Z coordinate of the reference center for the distance
computation
Returns
---------
pandas.Series : Pandas Series object containing the Euclidean
distance between the atoms in the atom section and `xyz`.
"""
return np.sqrt(np.sum(self.df[['x', 'y', 'z']]
.subtract(xyz, axis=1)**2, axis=1)) | [
"def",
"distance",
"(",
"self",
",",
"xyz",
"=",
"(",
"0.00",
",",
"0.00",
",",
"0.00",
")",
")",
":",
"return",
"np",
".",
"sqrt",
"(",
"np",
".",
"sum",
"(",
"self",
".",
"df",
"[",
"[",
"'x'",
",",
"'y'",
",",
"'z'",
"]",
"]",
".",
"subt... | Computes Euclidean distance between atoms in
self.df and a 3D point.
Parameters
----------
xyz : tuple (0.00, 0.00, 0.00)
X, Y, and Z coordinate of the reference center for the distance
computation
Returns
---------
pandas.Series : Pandas Series object containing the Euclidean
distance between the atoms in the atom section and `xyz`. | [
"Computes",
"Euclidean",
"distance",
"between",
"atoms",
"in",
"self",
".",
"df",
"and",
"a",
"3D",
"point",
"."
] | 615a7cf272692c12bbcfd9d1f217eab440120235 | https://github.com/rasbt/biopandas/blob/615a7cf272692c12bbcfd9d1f217eab440120235/biopandas/mol2/pandas_mol2.py#L230-L247 | train | 29,823 |
willkg/everett | everett/ext/yamlfile.py | ConfigYamlEnv.parse_yaml_file | def parse_yaml_file(self, path):
"""Parse yaml file at ``path`` and return a dict."""
with open(path, 'r') as fp:
data = yaml.safe_load(fp)
if not data:
return {}
def traverse(namespace, d):
cfg = {}
for key, val in d.items():
if isinstance(d[key], dict):
cfg.update(traverse(namespace + [key], d[key]))
else:
if not isinstance(val, str):
# All values should be double-quoted strings so they
# parse as strings; anything else is a configuration
# error at parse-time
raise ConfigurationError(
'Invalid value %r in file %s: values must be double-quoted strings' % (
val, path
)
)
cfg['_'.join(namespace + [key]).upper()] = val
return cfg
return traverse([], data) | python | def parse_yaml_file(self, path):
"""Parse yaml file at ``path`` and return a dict."""
with open(path, 'r') as fp:
data = yaml.safe_load(fp)
if not data:
return {}
def traverse(namespace, d):
cfg = {}
for key, val in d.items():
if isinstance(d[key], dict):
cfg.update(traverse(namespace + [key], d[key]))
else:
if not isinstance(val, str):
# All values should be double-quoted strings so they
# parse as strings; anything else is a configuration
# error at parse-time
raise ConfigurationError(
'Invalid value %r in file %s: values must be double-quoted strings' % (
val, path
)
)
cfg['_'.join(namespace + [key]).upper()] = val
return cfg
return traverse([], data) | [
"def",
"parse_yaml_file",
"(",
"self",
",",
"path",
")",
":",
"with",
"open",
"(",
"path",
",",
"'r'",
")",
"as",
"fp",
":",
"data",
"=",
"yaml",
".",
"safe_load",
"(",
"fp",
")",
"if",
"not",
"data",
":",
"return",
"{",
"}",
"def",
"traverse",
"... | Parse yaml file at ``path`` and return a dict. | [
"Parse",
"yaml",
"file",
"at",
"path",
"and",
"return",
"a",
"dict",
"."
] | 5653134af59f439d2b33f3939fab2b8544428f11 | https://github.com/willkg/everett/blob/5653134af59f439d2b33f3939fab2b8544428f11/everett/ext/yamlfile.py#L129-L157 | train | 29,824 |
willkg/everett | everett/sphinxext.py | import_class | def import_class(clspath):
"""Given a clspath, returns the class.
Note: This is a really simplistic implementation.
"""
modpath, clsname = split_clspath(clspath)
__import__(modpath)
module = sys.modules[modpath]
return getattr(module, clsname) | python | def import_class(clspath):
"""Given a clspath, returns the class.
Note: This is a really simplistic implementation.
"""
modpath, clsname = split_clspath(clspath)
__import__(modpath)
module = sys.modules[modpath]
return getattr(module, clsname) | [
"def",
"import_class",
"(",
"clspath",
")",
":",
"modpath",
",",
"clsname",
"=",
"split_clspath",
"(",
"clspath",
")",
"__import__",
"(",
"modpath",
")",
"module",
"=",
"sys",
".",
"modules",
"[",
"modpath",
"]",
"return",
"getattr",
"(",
"module",
",",
... | Given a clspath, returns the class.
Note: This is a really simplistic implementation. | [
"Given",
"a",
"clspath",
"returns",
"the",
"class",
"."
] | 5653134af59f439d2b33f3939fab2b8544428f11 | https://github.com/willkg/everett/blob/5653134af59f439d2b33f3939fab2b8544428f11/everett/sphinxext.py#L155-L164 | train | 29,825 |
willkg/everett | everett/sphinxext.py | upper_lower_none | def upper_lower_none(arg):
"""Validate arg value as "upper", "lower", or None."""
if not arg:
return arg
arg = arg.strip().lower()
if arg in ['upper', 'lower']:
return arg
raise ValueError('argument must be "upper", "lower" or None') | python | def upper_lower_none(arg):
"""Validate arg value as "upper", "lower", or None."""
if not arg:
return arg
arg = arg.strip().lower()
if arg in ['upper', 'lower']:
return arg
raise ValueError('argument must be "upper", "lower" or None') | [
"def",
"upper_lower_none",
"(",
"arg",
")",
":",
"if",
"not",
"arg",
":",
"return",
"arg",
"arg",
"=",
"arg",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"if",
"arg",
"in",
"[",
"'upper'",
",",
"'lower'",
"]",
":",
"return",
"arg",
"raise",
"... | Validate arg value as "upper", "lower", or None. | [
"Validate",
"arg",
"value",
"as",
"upper",
"lower",
"or",
"None",
"."
] | 5653134af59f439d2b33f3939fab2b8544428f11 | https://github.com/willkg/everett/blob/5653134af59f439d2b33f3939fab2b8544428f11/everett/sphinxext.py#L167-L176 | train | 29,826 |
willkg/everett | everett/sphinxext.py | setup | def setup(app):
"""Register domain and directive in Sphinx."""
app.add_domain(EverettDomain)
app.add_directive('autocomponent', AutoComponentDirective)
return {
'version': __version__,
'parallel_read_safe': True,
'parallel_write_safe': True
} | python | def setup(app):
"""Register domain and directive in Sphinx."""
app.add_domain(EverettDomain)
app.add_directive('autocomponent', AutoComponentDirective)
return {
'version': __version__,
'parallel_read_safe': True,
'parallel_write_safe': True
} | [
"def",
"setup",
"(",
"app",
")",
":",
"app",
".",
"add_domain",
"(",
"EverettDomain",
")",
"app",
".",
"add_directive",
"(",
"'autocomponent'",
",",
"AutoComponentDirective",
")",
"return",
"{",
"'version'",
":",
"__version__",
",",
"'parallel_read_safe'",
":",
... | Register domain and directive in Sphinx. | [
"Register",
"domain",
"and",
"directive",
"in",
"Sphinx",
"."
] | 5653134af59f439d2b33f3939fab2b8544428f11 | https://github.com/willkg/everett/blob/5653134af59f439d2b33f3939fab2b8544428f11/everett/sphinxext.py#L429-L438 | train | 29,827 |
willkg/everett | everett/sphinxext.py | EverettComponent.handle_signature | def handle_signature(self, sig, signode):
"""Create a signature for this thing."""
if sig != 'Configuration':
signode.clear()
# Add "component" which is the type of this thing
signode += addnodes.desc_annotation('component ', 'component ')
if '.' in sig:
modname, clsname = sig.rsplit('.', 1)
else:
modname, clsname = '', sig
# If there's a module name, then we add the module
if modname:
signode += addnodes.desc_addname(modname + '.', modname + '.')
# Add the class name
signode += addnodes.desc_name(clsname, clsname)
else:
# Add just "Configuration"
signode += addnodes.desc_name(sig, sig)
return sig | python | def handle_signature(self, sig, signode):
"""Create a signature for this thing."""
if sig != 'Configuration':
signode.clear()
# Add "component" which is the type of this thing
signode += addnodes.desc_annotation('component ', 'component ')
if '.' in sig:
modname, clsname = sig.rsplit('.', 1)
else:
modname, clsname = '', sig
# If there's a module name, then we add the module
if modname:
signode += addnodes.desc_addname(modname + '.', modname + '.')
# Add the class name
signode += addnodes.desc_name(clsname, clsname)
else:
# Add just "Configuration"
signode += addnodes.desc_name(sig, sig)
return sig | [
"def",
"handle_signature",
"(",
"self",
",",
"sig",
",",
"signode",
")",
":",
"if",
"sig",
"!=",
"'Configuration'",
":",
"signode",
".",
"clear",
"(",
")",
"# Add \"component\" which is the type of this thing",
"signode",
"+=",
"addnodes",
".",
"desc_annotation",
... | Create a signature for this thing. | [
"Create",
"a",
"signature",
"for",
"this",
"thing",
"."
] | 5653134af59f439d2b33f3939fab2b8544428f11 | https://github.com/willkg/everett/blob/5653134af59f439d2b33f3939fab2b8544428f11/everett/sphinxext.py#L191-L214 | train | 29,828 |
willkg/everett | everett/sphinxext.py | EverettComponent.add_target_and_index | def add_target_and_index(self, name, sig, signode):
"""Add a target and index for this thing."""
targetname = '%s-%s' % (self.objtype, name)
if targetname not in self.state.document.ids:
signode['names'].append(targetname)
signode['ids'].append(targetname)
signode['first'] = (not self.names)
self.state.document.note_explicit_target(signode)
objects = self.env.domaindata['everett']['objects']
key = (self.objtype, name)
if key in objects:
self.state_machine.reporter.warning(
'duplicate description of %s %s, ' % (self.objtype, name) +
'other instance in ' + self.env.doc2path(objects[key]),
line=self.lineno
)
objects[key] = self.env.docname
indextext = _('%s (component)') % name
self.indexnode['entries'].append(('single', indextext, targetname, '', None)) | python | def add_target_and_index(self, name, sig, signode):
"""Add a target and index for this thing."""
targetname = '%s-%s' % (self.objtype, name)
if targetname not in self.state.document.ids:
signode['names'].append(targetname)
signode['ids'].append(targetname)
signode['first'] = (not self.names)
self.state.document.note_explicit_target(signode)
objects = self.env.domaindata['everett']['objects']
key = (self.objtype, name)
if key in objects:
self.state_machine.reporter.warning(
'duplicate description of %s %s, ' % (self.objtype, name) +
'other instance in ' + self.env.doc2path(objects[key]),
line=self.lineno
)
objects[key] = self.env.docname
indextext = _('%s (component)') % name
self.indexnode['entries'].append(('single', indextext, targetname, '', None)) | [
"def",
"add_target_and_index",
"(",
"self",
",",
"name",
",",
"sig",
",",
"signode",
")",
":",
"targetname",
"=",
"'%s-%s'",
"%",
"(",
"self",
".",
"objtype",
",",
"name",
")",
"if",
"targetname",
"not",
"in",
"self",
".",
"state",
".",
"document",
"."... | Add a target and index for this thing. | [
"Add",
"a",
"target",
"and",
"index",
"for",
"this",
"thing",
"."
] | 5653134af59f439d2b33f3939fab2b8544428f11 | https://github.com/willkg/everett/blob/5653134af59f439d2b33f3939fab2b8544428f11/everett/sphinxext.py#L216-L237 | train | 29,829 |
willkg/everett | everett/sphinxext.py | AutoComponentDirective.add_line | def add_line(self, line, source, *lineno):
"""Add a line to the result"""
self.result.append(line, source, *lineno) | python | def add_line(self, line, source, *lineno):
"""Add a line to the result"""
self.result.append(line, source, *lineno) | [
"def",
"add_line",
"(",
"self",
",",
"line",
",",
"source",
",",
"*",
"lineno",
")",
":",
"self",
".",
"result",
".",
"append",
"(",
"line",
",",
"source",
",",
"*",
"lineno",
")"
] | Add a line to the result | [
"Add",
"a",
"line",
"to",
"the",
"result"
] | 5653134af59f439d2b33f3939fab2b8544428f11 | https://github.com/willkg/everett/blob/5653134af59f439d2b33f3939fab2b8544428f11/everett/sphinxext.py#L325-L327 | train | 29,830 |
willkg/everett | everett/sphinxext.py | AutoComponentDirective.generate_docs | def generate_docs(self, clspath, more_content):
"""Generate documentation for this configman class"""
obj = import_class(clspath)
sourcename = 'docstring of %s' % clspath
all_options = []
indent = ' '
config = obj.get_required_config()
if config.options:
# Go through options and figure out relevant information
for option in config:
if 'namespace' in self.options:
namespaced_key = self.options['namespace'] + '_' + option.key
else:
namespaced_key = option.key
if 'case' in self.options:
if self.options['case'] == 'upper':
namespaced_key = namespaced_key.upper()
elif self.options['case'] == 'lower':
namespaced_key = namespaced_key.lower()
all_options.append({
'key': namespaced_key,
'parser': qualname(option.parser),
'doc': option.doc,
'default': option.default,
})
if 'hide-classname' not in self.options:
modname, clsname = split_clspath(clspath)
component_name = clspath
component_index = clsname
else:
component_name = 'Configuration'
component_index = 'Configuration'
if all_options:
# Add index entries for options first so they link to the right
# place; we do it this way so that we don't have to make options a
# real object type and then we don't get to use TypedField
# formatting
self.add_line('.. index::', sourcename)
for option in all_options:
self.add_line(' single: %s; (%s)' % (option['key'], component_index), sourcename)
self.add_line('', '')
# Add the classname or 'Configuration'
self.add_line('.. everett:component:: %s' % component_name, sourcename)
self.add_line('', sourcename)
# Add the docstring if there is one and if show-docstring
if 'show-docstring' in self.options:
docstring_attr = self.options['show-docstring'] or '__doc__'
docstring = getattr(obj, docstring_attr, None)
if docstring:
docstringlines = prepare_docstring(docstring, ignore=1)
for i, line in enumerate(docstringlines):
self.add_line(indent + line, sourcename, i)
self.add_line('', '')
# Add content from the directive if there was any
if more_content:
for line, src in zip(more_content.data, more_content.items):
self.add_line(indent + line, src[0], src[1])
self.add_line('', '')
if all_options:
# Now list the options
sourcename = 'class definition'
for option in all_options:
self.add_line(
'%s:option %s %s:' % (indent, option['parser'], option['key']),
sourcename
)
self.add_line('%s %s' % (indent, option['doc']), sourcename)
if option['default'] is not NO_VALUE:
self.add_line('', '')
self.add_line(
'%s Defaults to ``%r``.' % (indent, option['default']),
sourcename
)
self.add_line('', '') | python | def generate_docs(self, clspath, more_content):
"""Generate documentation for this configman class"""
obj = import_class(clspath)
sourcename = 'docstring of %s' % clspath
all_options = []
indent = ' '
config = obj.get_required_config()
if config.options:
# Go through options and figure out relevant information
for option in config:
if 'namespace' in self.options:
namespaced_key = self.options['namespace'] + '_' + option.key
else:
namespaced_key = option.key
if 'case' in self.options:
if self.options['case'] == 'upper':
namespaced_key = namespaced_key.upper()
elif self.options['case'] == 'lower':
namespaced_key = namespaced_key.lower()
all_options.append({
'key': namespaced_key,
'parser': qualname(option.parser),
'doc': option.doc,
'default': option.default,
})
if 'hide-classname' not in self.options:
modname, clsname = split_clspath(clspath)
component_name = clspath
component_index = clsname
else:
component_name = 'Configuration'
component_index = 'Configuration'
if all_options:
# Add index entries for options first so they link to the right
# place; we do it this way so that we don't have to make options a
# real object type and then we don't get to use TypedField
# formatting
self.add_line('.. index::', sourcename)
for option in all_options:
self.add_line(' single: %s; (%s)' % (option['key'], component_index), sourcename)
self.add_line('', '')
# Add the classname or 'Configuration'
self.add_line('.. everett:component:: %s' % component_name, sourcename)
self.add_line('', sourcename)
# Add the docstring if there is one and if show-docstring
if 'show-docstring' in self.options:
docstring_attr = self.options['show-docstring'] or '__doc__'
docstring = getattr(obj, docstring_attr, None)
if docstring:
docstringlines = prepare_docstring(docstring, ignore=1)
for i, line in enumerate(docstringlines):
self.add_line(indent + line, sourcename, i)
self.add_line('', '')
# Add content from the directive if there was any
if more_content:
for line, src in zip(more_content.data, more_content.items):
self.add_line(indent + line, src[0], src[1])
self.add_line('', '')
if all_options:
# Now list the options
sourcename = 'class definition'
for option in all_options:
self.add_line(
'%s:option %s %s:' % (indent, option['parser'], option['key']),
sourcename
)
self.add_line('%s %s' % (indent, option['doc']), sourcename)
if option['default'] is not NO_VALUE:
self.add_line('', '')
self.add_line(
'%s Defaults to ``%r``.' % (indent, option['default']),
sourcename
)
self.add_line('', '') | [
"def",
"generate_docs",
"(",
"self",
",",
"clspath",
",",
"more_content",
")",
":",
"obj",
"=",
"import_class",
"(",
"clspath",
")",
"sourcename",
"=",
"'docstring of %s'",
"%",
"clspath",
"all_options",
"=",
"[",
"]",
"indent",
"=",
"' '",
"config",
"=",... | Generate documentation for this configman class | [
"Generate",
"documentation",
"for",
"this",
"configman",
"class"
] | 5653134af59f439d2b33f3939fab2b8544428f11 | https://github.com/willkg/everett/blob/5653134af59f439d2b33f3939fab2b8544428f11/everett/sphinxext.py#L329-L412 | train | 29,831 |
willkg/everett | everett/manager.py | qualname | def qualname(thing):
"""Return the dot name for a given thing.
>>> import everett.manager
>>> qualname(str)
'str'
>>> qualname(everett.manager.parse_class)
'everett.manager.parse_class'
>>> qualname(everett.manager)
'everett.manager'
"""
parts = []
# Add the module, unless it's a builtin
mod = inspect.getmodule(thing)
if mod and mod.__name__ not in ('__main__', '__builtin__', 'builtins'):
parts.append(mod.__name__)
# If there's a __qualname__, use that
if hasattr(thing, '__qualname__'):
parts.append(thing.__qualname__)
return '.'.join(parts)
# If it's a module
if inspect.ismodule(thing):
return '.'.join(parts)
# If it's a class
if inspect.isclass(thing):
parts.append(thing.__name__)
return '.'.join(parts)
# If it's a function
if isinstance(thing, (types.FunctionType, types.MethodType)):
# If it's a method or function
if inspect.ismethod(thing):
if thing.im_class is type:
# This is a class method
parts.append(thing.im_self.__name__)
else:
# This is an bound/instance method
parts.append(thing.im_class.__name__)
parts.append(thing.__name__)
elif inspect.isfunction(thing):
parts.append(thing.__name__)
return '.'.join(parts)
# It's an instance, so ... let's call repr on it
return repr(thing) | python | def qualname(thing):
"""Return the dot name for a given thing.
>>> import everett.manager
>>> qualname(str)
'str'
>>> qualname(everett.manager.parse_class)
'everett.manager.parse_class'
>>> qualname(everett.manager)
'everett.manager'
"""
parts = []
# Add the module, unless it's a builtin
mod = inspect.getmodule(thing)
if mod and mod.__name__ not in ('__main__', '__builtin__', 'builtins'):
parts.append(mod.__name__)
# If there's a __qualname__, use that
if hasattr(thing, '__qualname__'):
parts.append(thing.__qualname__)
return '.'.join(parts)
# If it's a module
if inspect.ismodule(thing):
return '.'.join(parts)
# If it's a class
if inspect.isclass(thing):
parts.append(thing.__name__)
return '.'.join(parts)
# If it's a function
if isinstance(thing, (types.FunctionType, types.MethodType)):
# If it's a method or function
if inspect.ismethod(thing):
if thing.im_class is type:
# This is a class method
parts.append(thing.im_self.__name__)
else:
# This is an bound/instance method
parts.append(thing.im_class.__name__)
parts.append(thing.__name__)
elif inspect.isfunction(thing):
parts.append(thing.__name__)
return '.'.join(parts)
# It's an instance, so ... let's call repr on it
return repr(thing) | [
"def",
"qualname",
"(",
"thing",
")",
":",
"parts",
"=",
"[",
"]",
"# Add the module, unless it's a builtin",
"mod",
"=",
"inspect",
".",
"getmodule",
"(",
"thing",
")",
"if",
"mod",
"and",
"mod",
".",
"__name__",
"not",
"in",
"(",
"'__main__'",
",",
"'__b... | Return the dot name for a given thing.
>>> import everett.manager
>>> qualname(str)
'str'
>>> qualname(everett.manager.parse_class)
'everett.manager.parse_class'
>>> qualname(everett.manager)
'everett.manager' | [
"Return",
"the",
"dot",
"name",
"for",
"a",
"given",
"thing",
"."
] | 5653134af59f439d2b33f3939fab2b8544428f11 | https://github.com/willkg/everett/blob/5653134af59f439d2b33f3939fab2b8544428f11/everett/manager.py#L39-L90 | train | 29,832 |
willkg/everett | everett/manager.py | parse_bool | def parse_bool(val):
"""Parse a bool value.
Handles a series of values, but you should probably standardize on
"true" and "false".
>>> parse_bool('y')
True
>>> parse_bool('FALSE')
False
"""
true_vals = ('t', 'true', 'yes', 'y', '1', 'on')
false_vals = ('f', 'false', 'no', 'n', '0', 'off')
val = val.lower()
if val in true_vals:
return True
if val in false_vals:
return False
raise ValueError('"%s" is not a valid bool value' % val) | python | def parse_bool(val):
"""Parse a bool value.
Handles a series of values, but you should probably standardize on
"true" and "false".
>>> parse_bool('y')
True
>>> parse_bool('FALSE')
False
"""
true_vals = ('t', 'true', 'yes', 'y', '1', 'on')
false_vals = ('f', 'false', 'no', 'n', '0', 'off')
val = val.lower()
if val in true_vals:
return True
if val in false_vals:
return False
raise ValueError('"%s" is not a valid bool value' % val) | [
"def",
"parse_bool",
"(",
"val",
")",
":",
"true_vals",
"=",
"(",
"'t'",
",",
"'true'",
",",
"'yes'",
",",
"'y'",
",",
"'1'",
",",
"'on'",
")",
"false_vals",
"=",
"(",
"'f'",
",",
"'false'",
",",
"'no'",
",",
"'n'",
",",
"'0'",
",",
"'off'",
")",... | Parse a bool value.
Handles a series of values, but you should probably standardize on
"true" and "false".
>>> parse_bool('y')
True
>>> parse_bool('FALSE')
False | [
"Parse",
"a",
"bool",
"value",
"."
] | 5653134af59f439d2b33f3939fab2b8544428f11 | https://github.com/willkg/everett/blob/5653134af59f439d2b33f3939fab2b8544428f11/everett/manager.py#L93-L114 | train | 29,833 |
willkg/everett | everett/manager.py | parse_env_file | def parse_env_file(envfile):
"""Parse the content of an iterable of lines as ``.env``.
Return a dict of config variables.
>>> parse_env_file(['DUDE=Abides'])
{'DUDE': 'Abides'}
"""
data = {}
for line_no, line in enumerate(envfile):
line = line.strip()
if not line or line.startswith('#'):
continue
if '=' not in line:
raise ConfigurationError('Env file line missing = operator (line %s)' % (line_no + 1))
k, v = line.split('=', 1)
k = k.strip()
if not ENV_KEY_RE.match(k):
raise ConfigurationError(
'Invalid variable name "%s" in env file (line %s)' % (k, (line_no + 1))
)
v = v.strip().strip('\'"')
data[k] = v
return data | python | def parse_env_file(envfile):
"""Parse the content of an iterable of lines as ``.env``.
Return a dict of config variables.
>>> parse_env_file(['DUDE=Abides'])
{'DUDE': 'Abides'}
"""
data = {}
for line_no, line in enumerate(envfile):
line = line.strip()
if not line or line.startswith('#'):
continue
if '=' not in line:
raise ConfigurationError('Env file line missing = operator (line %s)' % (line_no + 1))
k, v = line.split('=', 1)
k = k.strip()
if not ENV_KEY_RE.match(k):
raise ConfigurationError(
'Invalid variable name "%s" in env file (line %s)' % (k, (line_no + 1))
)
v = v.strip().strip('\'"')
data[k] = v
return data | [
"def",
"parse_env_file",
"(",
"envfile",
")",
":",
"data",
"=",
"{",
"}",
"for",
"line_no",
",",
"line",
"in",
"enumerate",
"(",
"envfile",
")",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"not",
"line",
"or",
"line",
".",
"startswith",
... | Parse the content of an iterable of lines as ``.env``.
Return a dict of config variables.
>>> parse_env_file(['DUDE=Abides'])
{'DUDE': 'Abides'} | [
"Parse",
"the",
"content",
"of",
"an",
"iterable",
"of",
"lines",
"as",
".",
"env",
"."
] | 5653134af59f439d2b33f3939fab2b8544428f11 | https://github.com/willkg/everett/blob/5653134af59f439d2b33f3939fab2b8544428f11/everett/manager.py#L117-L142 | train | 29,834 |
willkg/everett | everett/manager.py | parse_class | def parse_class(val):
"""Parse a string, imports the module and returns the class.
>>> parse_class('hashlib.md5')
<built-in function openssl_md5>
"""
module, class_name = val.rsplit('.', 1)
module = importlib.import_module(module)
try:
return getattr(module, class_name)
except AttributeError:
raise ValueError('"%s" is not a valid member of %s' % (
class_name, qualname(module))
) | python | def parse_class(val):
"""Parse a string, imports the module and returns the class.
>>> parse_class('hashlib.md5')
<built-in function openssl_md5>
"""
module, class_name = val.rsplit('.', 1)
module = importlib.import_module(module)
try:
return getattr(module, class_name)
except AttributeError:
raise ValueError('"%s" is not a valid member of %s' % (
class_name, qualname(module))
) | [
"def",
"parse_class",
"(",
"val",
")",
":",
"module",
",",
"class_name",
"=",
"val",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"module",
"=",
"importlib",
".",
"import_module",
"(",
"module",
")",
"try",
":",
"return",
"getattr",
"(",
"module",
",",
... | Parse a string, imports the module and returns the class.
>>> parse_class('hashlib.md5')
<built-in function openssl_md5> | [
"Parse",
"a",
"string",
"imports",
"the",
"module",
"and",
"returns",
"the",
"class",
"."
] | 5653134af59f439d2b33f3939fab2b8544428f11 | https://github.com/willkg/everett/blob/5653134af59f439d2b33f3939fab2b8544428f11/everett/manager.py#L145-L159 | train | 29,835 |
willkg/everett | everett/manager.py | generate_uppercase_key | def generate_uppercase_key(key, namespace=None):
"""Given a key and a namespace, generates a final uppercase key."""
if namespace:
namespace = [part for part in listify(namespace) if part]
key = '_'.join(namespace + [key])
key = key.upper()
return key | python | def generate_uppercase_key(key, namespace=None):
"""Given a key and a namespace, generates a final uppercase key."""
if namespace:
namespace = [part for part in listify(namespace) if part]
key = '_'.join(namespace + [key])
key = key.upper()
return key | [
"def",
"generate_uppercase_key",
"(",
"key",
",",
"namespace",
"=",
"None",
")",
":",
"if",
"namespace",
":",
"namespace",
"=",
"[",
"part",
"for",
"part",
"in",
"listify",
"(",
"namespace",
")",
"if",
"part",
"]",
"key",
"=",
"'_'",
".",
"join",
"(",
... | Given a key and a namespace, generates a final uppercase key. | [
"Given",
"a",
"key",
"and",
"a",
"namespace",
"generates",
"a",
"final",
"uppercase",
"key",
"."
] | 5653134af59f439d2b33f3939fab2b8544428f11 | https://github.com/willkg/everett/blob/5653134af59f439d2b33f3939fab2b8544428f11/everett/manager.py#L189-L196 | train | 29,836 |
willkg/everett | everett/manager.py | get_key_from_envs | def get_key_from_envs(envs, key):
"""Return the value of a key from the given dict respecting namespaces.
Data can also be a list of data dicts.
"""
# if it barks like a dict, make it a list have to use `get` since dicts and
# lists both have __getitem__
if hasattr(envs, 'get'):
envs = [envs]
for env in envs:
if key in env:
return env[key]
return NO_VALUE | python | def get_key_from_envs(envs, key):
"""Return the value of a key from the given dict respecting namespaces.
Data can also be a list of data dicts.
"""
# if it barks like a dict, make it a list have to use `get` since dicts and
# lists both have __getitem__
if hasattr(envs, 'get'):
envs = [envs]
for env in envs:
if key in env:
return env[key]
return NO_VALUE | [
"def",
"get_key_from_envs",
"(",
"envs",
",",
"key",
")",
":",
"# if it barks like a dict, make it a list have to use `get` since dicts and",
"# lists both have __getitem__",
"if",
"hasattr",
"(",
"envs",
",",
"'get'",
")",
":",
"envs",
"=",
"[",
"envs",
"]",
"for",
"... | Return the value of a key from the given dict respecting namespaces.
Data can also be a list of data dicts. | [
"Return",
"the",
"value",
"of",
"a",
"key",
"from",
"the",
"given",
"dict",
"respecting",
"namespaces",
"."
] | 5653134af59f439d2b33f3939fab2b8544428f11 | https://github.com/willkg/everett/blob/5653134af59f439d2b33f3939fab2b8544428f11/everett/manager.py#L199-L214 | train | 29,837 |
willkg/everett | everett/manager.py | ConfigManagerBase.with_options | def with_options(self, component):
"""Apply options component options to this configuration."""
options = component.get_required_config()
component_name = _get_component_name(component)
return BoundConfig(self._get_base_config(), component_name, options) | python | def with_options(self, component):
"""Apply options component options to this configuration."""
options = component.get_required_config()
component_name = _get_component_name(component)
return BoundConfig(self._get_base_config(), component_name, options) | [
"def",
"with_options",
"(",
"self",
",",
"component",
")",
":",
"options",
"=",
"component",
".",
"get_required_config",
"(",
")",
"component_name",
"=",
"_get_component_name",
"(",
"component",
")",
"return",
"BoundConfig",
"(",
"self",
".",
"_get_base_config",
... | Apply options component options to this configuration. | [
"Apply",
"options",
"component",
"options",
"to",
"this",
"configuration",
"."
] | 5653134af59f439d2b33f3939fab2b8544428f11 | https://github.com/willkg/everett/blob/5653134af59f439d2b33f3939fab2b8544428f11/everett/manager.py#L560-L564 | train | 29,838 |
willkg/everett | everett/manager.py | ConfigOverride.decorate | def decorate(self, fun):
"""Decorate a function for overriding configuration."""
@wraps(fun)
def _decorated(*args, **kwargs):
# Push the config, run the function and pop it afterwards.
self.push_config()
try:
return fun(*args, **kwargs)
finally:
self.pop_config()
return _decorated | python | def decorate(self, fun):
"""Decorate a function for overriding configuration."""
@wraps(fun)
def _decorated(*args, **kwargs):
# Push the config, run the function and pop it afterwards.
self.push_config()
try:
return fun(*args, **kwargs)
finally:
self.pop_config()
return _decorated | [
"def",
"decorate",
"(",
"self",
",",
"fun",
")",
":",
"@",
"wraps",
"(",
"fun",
")",
"def",
"_decorated",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Push the config, run the function and pop it afterwards.",
"self",
".",
"push_config",
"(",
")",... | Decorate a function for overriding configuration. | [
"Decorate",
"a",
"function",
"for",
"overriding",
"configuration",
"."
] | 5653134af59f439d2b33f3939fab2b8544428f11 | https://github.com/willkg/everett/blob/5653134af59f439d2b33f3939fab2b8544428f11/everett/manager.py#L1023-L1033 | train | 29,839 |
willkg/everett | everett/component.py | ConfigOptions.add_option | def add_option(self, key, default=NO_VALUE, alternate_keys=NO_VALUE,
doc='', parser=str, **meta):
"""Add an option to the group.
:arg key: the key to look up
:arg default: the default value (if any); must be a string that is
parseable by the specified parser
:arg alternate_keys: the list of alternate keys to look up;
supports a ``root:`` key prefix which will cause this to look at
the configuration root rather than the current namespace
:arg doc: documentation for this config option
:arg parser: the parser for converting this value to a Python object
:arg meta: catch-all for other key/value pairs you want to association
with this option
"""
option = Option(key, default, alternate_keys, doc, parser, meta)
self.options[key] = option | python | def add_option(self, key, default=NO_VALUE, alternate_keys=NO_VALUE,
doc='', parser=str, **meta):
"""Add an option to the group.
:arg key: the key to look up
:arg default: the default value (if any); must be a string that is
parseable by the specified parser
:arg alternate_keys: the list of alternate keys to look up;
supports a ``root:`` key prefix which will cause this to look at
the configuration root rather than the current namespace
:arg doc: documentation for this config option
:arg parser: the parser for converting this value to a Python object
:arg meta: catch-all for other key/value pairs you want to association
with this option
"""
option = Option(key, default, alternate_keys, doc, parser, meta)
self.options[key] = option | [
"def",
"add_option",
"(",
"self",
",",
"key",
",",
"default",
"=",
"NO_VALUE",
",",
"alternate_keys",
"=",
"NO_VALUE",
",",
"doc",
"=",
"''",
",",
"parser",
"=",
"str",
",",
"*",
"*",
"meta",
")",
":",
"option",
"=",
"Option",
"(",
"key",
",",
"def... | Add an option to the group.
:arg key: the key to look up
:arg default: the default value (if any); must be a string that is
parseable by the specified parser
:arg alternate_keys: the list of alternate keys to look up;
supports a ``root:`` key prefix which will cause this to look at
the configuration root rather than the current namespace
:arg doc: documentation for this config option
:arg parser: the parser for converting this value to a Python object
:arg meta: catch-all for other key/value pairs you want to association
with this option | [
"Add",
"an",
"option",
"to",
"the",
"group",
"."
] | 5653134af59f439d2b33f3939fab2b8544428f11 | https://github.com/willkg/everett/blob/5653134af59f439d2b33f3939fab2b8544428f11/everett/component.py#L43-L65 | train | 29,840 |
willkg/everett | everett/component.py | ConfigOptions.update | def update(self, new_options):
"""Update this ConfigOptions using data from another."""
for option in new_options:
if option.key in self.options:
del self.options[option.key]
self.options[option.key] = option | python | def update(self, new_options):
"""Update this ConfigOptions using data from another."""
for option in new_options:
if option.key in self.options:
del self.options[option.key]
self.options[option.key] = option | [
"def",
"update",
"(",
"self",
",",
"new_options",
")",
":",
"for",
"option",
"in",
"new_options",
":",
"if",
"option",
".",
"key",
"in",
"self",
".",
"options",
":",
"del",
"self",
".",
"options",
"[",
"option",
".",
"key",
"]",
"self",
".",
"options... | Update this ConfigOptions using data from another. | [
"Update",
"this",
"ConfigOptions",
"using",
"data",
"from",
"another",
"."
] | 5653134af59f439d2b33f3939fab2b8544428f11 | https://github.com/willkg/everett/blob/5653134af59f439d2b33f3939fab2b8544428f11/everett/component.py#L67-L72 | train | 29,841 |
willkg/everett | everett/component.py | RequiredConfigMixin.get_required_config | def get_required_config(cls):
"""Roll up configuration options for this class and parent classes.
This handles subclasses overriding options in parent classes.
:returns: final ``ConfigOptions`` representing all configuration for
this class
"""
options = ConfigOptions()
for cls in reversed(cls.__mro__):
try:
options.update(cls.required_config)
except AttributeError:
pass
return options | python | def get_required_config(cls):
"""Roll up configuration options for this class and parent classes.
This handles subclasses overriding options in parent classes.
:returns: final ``ConfigOptions`` representing all configuration for
this class
"""
options = ConfigOptions()
for cls in reversed(cls.__mro__):
try:
options.update(cls.required_config)
except AttributeError:
pass
return options | [
"def",
"get_required_config",
"(",
"cls",
")",
":",
"options",
"=",
"ConfigOptions",
"(",
")",
"for",
"cls",
"in",
"reversed",
"(",
"cls",
".",
"__mro__",
")",
":",
"try",
":",
"options",
".",
"update",
"(",
"cls",
".",
"required_config",
")",
"except",
... | Roll up configuration options for this class and parent classes.
This handles subclasses overriding options in parent classes.
:returns: final ``ConfigOptions`` representing all configuration for
this class | [
"Roll",
"up",
"configuration",
"options",
"for",
"this",
"class",
"and",
"parent",
"classes",
"."
] | 5653134af59f439d2b33f3939fab2b8544428f11 | https://github.com/willkg/everett/blob/5653134af59f439d2b33f3939fab2b8544428f11/everett/component.py#L100-L115 | train | 29,842 |
willkg/everett | everett/component.py | RequiredConfigMixin.get_runtime_config | def get_runtime_config(self, namespace=None):
"""Roll up the runtime config for this class and all children.
Implement this to call ``.get_runtime_config()`` on child components or
to adjust how it works.
For example, if you created a component that has a child component, you
could do something like this::
class MyComponent(RequiredConfigMixin):
....
def __init__(self, config):
self.config = config.with_options(self)
self.child = OtherComponent(config.with_namespace('source'))
def get_runtime_config(self, namespace=None):
for item in super(MyComponent, self).get_runtime_config(namespace):
yield item
for item in self.child.get_runtime_config(['source']):
yield item
Calling this function can give you the complete runtime configuration
for a component tree. This is helpful for doing things like printing
the configuration being used including default values.
.. Note::
If this instance has a ``.config`` attribute and it is a
:py:class:`everett.component.BoundConfig`, then this will try to
compute the runtime config.
Otherwise, it'll yield nothing.
:arg list namespace: list of namespace parts or None
:returns: list of ``(namespace, key, option)``
"""
namespace = namespace or []
cfg = getattr(self, 'config', None)
if cfg is None or not isinstance(cfg, BoundConfig):
return
for key, opt in self.get_required_config().options.items():
yield (namespace, key, self.config(key, raise_error=False, raw_value=True), opt) | python | def get_runtime_config(self, namespace=None):
"""Roll up the runtime config for this class and all children.
Implement this to call ``.get_runtime_config()`` on child components or
to adjust how it works.
For example, if you created a component that has a child component, you
could do something like this::
class MyComponent(RequiredConfigMixin):
....
def __init__(self, config):
self.config = config.with_options(self)
self.child = OtherComponent(config.with_namespace('source'))
def get_runtime_config(self, namespace=None):
for item in super(MyComponent, self).get_runtime_config(namespace):
yield item
for item in self.child.get_runtime_config(['source']):
yield item
Calling this function can give you the complete runtime configuration
for a component tree. This is helpful for doing things like printing
the configuration being used including default values.
.. Note::
If this instance has a ``.config`` attribute and it is a
:py:class:`everett.component.BoundConfig`, then this will try to
compute the runtime config.
Otherwise, it'll yield nothing.
:arg list namespace: list of namespace parts or None
:returns: list of ``(namespace, key, option)``
"""
namespace = namespace or []
cfg = getattr(self, 'config', None)
if cfg is None or not isinstance(cfg, BoundConfig):
return
for key, opt in self.get_required_config().options.items():
yield (namespace, key, self.config(key, raise_error=False, raw_value=True), opt) | [
"def",
"get_runtime_config",
"(",
"self",
",",
"namespace",
"=",
"None",
")",
":",
"namespace",
"=",
"namespace",
"or",
"[",
"]",
"cfg",
"=",
"getattr",
"(",
"self",
",",
"'config'",
",",
"None",
")",
"if",
"cfg",
"is",
"None",
"or",
"not",
"isinstance... | Roll up the runtime config for this class and all children.
Implement this to call ``.get_runtime_config()`` on child components or
to adjust how it works.
For example, if you created a component that has a child component, you
could do something like this::
class MyComponent(RequiredConfigMixin):
....
def __init__(self, config):
self.config = config.with_options(self)
self.child = OtherComponent(config.with_namespace('source'))
def get_runtime_config(self, namespace=None):
for item in super(MyComponent, self).get_runtime_config(namespace):
yield item
for item in self.child.get_runtime_config(['source']):
yield item
Calling this function can give you the complete runtime configuration
for a component tree. This is helpful for doing things like printing
the configuration being used including default values.
.. Note::
If this instance has a ``.config`` attribute and it is a
:py:class:`everett.component.BoundConfig`, then this will try to
compute the runtime config.
Otherwise, it'll yield nothing.
:arg list namespace: list of namespace parts or None
:returns: list of ``(namespace, key, option)`` | [
"Roll",
"up",
"the",
"runtime",
"config",
"for",
"this",
"class",
"and",
"all",
"children",
"."
] | 5653134af59f439d2b33f3939fab2b8544428f11 | https://github.com/willkg/everett/blob/5653134af59f439d2b33f3939fab2b8544428f11/everett/component.py#L117-L164 | train | 29,843 |
willkg/everett | everett/ext/inifile.py | ConfigIniEnv.parse_ini_file | def parse_ini_file(self, path):
"""Parse ini file at ``path`` and return dict."""
cfgobj = ConfigObj(path, list_values=False)
def extract_section(namespace, d):
cfg = {}
for key, val in d.items():
if isinstance(d[key], dict):
cfg.update(extract_section(namespace + [key], d[key]))
else:
cfg['_'.join(namespace + [key]).upper()] = val
return cfg
return extract_section([], cfgobj.dict()) | python | def parse_ini_file(self, path):
"""Parse ini file at ``path`` and return dict."""
cfgobj = ConfigObj(path, list_values=False)
def extract_section(namespace, d):
cfg = {}
for key, val in d.items():
if isinstance(d[key], dict):
cfg.update(extract_section(namespace + [key], d[key]))
else:
cfg['_'.join(namespace + [key]).upper()] = val
return cfg
return extract_section([], cfgobj.dict()) | [
"def",
"parse_ini_file",
"(",
"self",
",",
"path",
")",
":",
"cfgobj",
"=",
"ConfigObj",
"(",
"path",
",",
"list_values",
"=",
"False",
")",
"def",
"extract_section",
"(",
"namespace",
",",
"d",
")",
":",
"cfg",
"=",
"{",
"}",
"for",
"key",
",",
"val... | Parse ini file at ``path`` and return dict. | [
"Parse",
"ini",
"file",
"at",
"path",
"and",
"return",
"dict",
"."
] | 5653134af59f439d2b33f3939fab2b8544428f11 | https://github.com/willkg/everett/blob/5653134af59f439d2b33f3939fab2b8544428f11/everett/ext/inifile.py#L142-L156 | train | 29,844 |
frc1418/tbapy | tbapy/main.py | TBA.team | def team(self, team, simple=False):
"""
Get data on a single specified team.
:param team: Team to get data for.
:param simple: Get only vital data.
:return: Team object with data on specified team.
"""
return Team(self._get('team/%s%s' % (self.team_key(team), '/simple' if simple else ''))) | python | def team(self, team, simple=False):
"""
Get data on a single specified team.
:param team: Team to get data for.
:param simple: Get only vital data.
:return: Team object with data on specified team.
"""
return Team(self._get('team/%s%s' % (self.team_key(team), '/simple' if simple else ''))) | [
"def",
"team",
"(",
"self",
",",
"team",
",",
"simple",
"=",
"False",
")",
":",
"return",
"Team",
"(",
"self",
".",
"_get",
"(",
"'team/%s%s'",
"%",
"(",
"self",
".",
"team_key",
"(",
"team",
")",
",",
"'/simple'",
"if",
"simple",
"else",
"''",
")"... | Get data on a single specified team.
:param team: Team to get data for.
:param simple: Get only vital data.
:return: Team object with data on specified team. | [
"Get",
"data",
"on",
"a",
"single",
"specified",
"team",
"."
] | 3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4 | https://github.com/frc1418/tbapy/blob/3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4/tbapy/main.py#L110-L118 | train | 29,845 |
frc1418/tbapy | tbapy/main.py | TBA.team_events | def team_events(self, team, year=None, simple=False, keys=False):
"""
Get team events a team has participated in.
:param team: Team to get events for.
:param year: Year to get events from.
:param simple: Get only vital data.
:param keys: Get just the keys of the events. Set to True if you only need the keys of each event and not their full data.
:return: List of strings or Teams
"""
if year:
if keys:
return self._get('team/%s/events/%s/keys' % (self.team_key(team), year))
else:
return [Event(raw) for raw in self._get('team/%s/events/%s%s' % (self.team_key(team), year, '/simple' if simple else ''))]
else:
if keys:
return self._get('team/%s/events/keys' % self.team_key(team))
else:
return [Event(raw) for raw in self._get('team/%s/events%s' % (self.team_key(team), '/simple' if simple else ''))] | python | def team_events(self, team, year=None, simple=False, keys=False):
"""
Get team events a team has participated in.
:param team: Team to get events for.
:param year: Year to get events from.
:param simple: Get only vital data.
:param keys: Get just the keys of the events. Set to True if you only need the keys of each event and not their full data.
:return: List of strings or Teams
"""
if year:
if keys:
return self._get('team/%s/events/%s/keys' % (self.team_key(team), year))
else:
return [Event(raw) for raw in self._get('team/%s/events/%s%s' % (self.team_key(team), year, '/simple' if simple else ''))]
else:
if keys:
return self._get('team/%s/events/keys' % self.team_key(team))
else:
return [Event(raw) for raw in self._get('team/%s/events%s' % (self.team_key(team), '/simple' if simple else ''))] | [
"def",
"team_events",
"(",
"self",
",",
"team",
",",
"year",
"=",
"None",
",",
"simple",
"=",
"False",
",",
"keys",
"=",
"False",
")",
":",
"if",
"year",
":",
"if",
"keys",
":",
"return",
"self",
".",
"_get",
"(",
"'team/%s/events/%s/keys'",
"%",
"("... | Get team events a team has participated in.
:param team: Team to get events for.
:param year: Year to get events from.
:param simple: Get only vital data.
:param keys: Get just the keys of the events. Set to True if you only need the keys of each event and not their full data.
:return: List of strings or Teams | [
"Get",
"team",
"events",
"a",
"team",
"has",
"participated",
"in",
"."
] | 3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4 | https://github.com/frc1418/tbapy/blob/3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4/tbapy/main.py#L120-L139 | train | 29,846 |
frc1418/tbapy | tbapy/main.py | TBA.team_awards | def team_awards(self, team, year=None, event=None):
"""
Get list of awards team has recieved.
:param team: Team to get awards of.
:param year: Year to get awards from.
:param event: Event to get awards from.
:return: List of Award objects
"""
if event:
return [Award(raw) for raw in self._get('team/%s/event/%s/awards' % (self.team_key(team), event))]
else:
if year:
return [Award(raw) for raw in self._get('team/%s/awards/%s' % (self.team_key(team), year))]
else:
return [Award(raw) for raw in self._get('team/%s/awards' % self.team_key(team))] | python | def team_awards(self, team, year=None, event=None):
"""
Get list of awards team has recieved.
:param team: Team to get awards of.
:param year: Year to get awards from.
:param event: Event to get awards from.
:return: List of Award objects
"""
if event:
return [Award(raw) for raw in self._get('team/%s/event/%s/awards' % (self.team_key(team), event))]
else:
if year:
return [Award(raw) for raw in self._get('team/%s/awards/%s' % (self.team_key(team), year))]
else:
return [Award(raw) for raw in self._get('team/%s/awards' % self.team_key(team))] | [
"def",
"team_awards",
"(",
"self",
",",
"team",
",",
"year",
"=",
"None",
",",
"event",
"=",
"None",
")",
":",
"if",
"event",
":",
"return",
"[",
"Award",
"(",
"raw",
")",
"for",
"raw",
"in",
"self",
".",
"_get",
"(",
"'team/%s/event/%s/awards'",
"%"... | Get list of awards team has recieved.
:param team: Team to get awards of.
:param year: Year to get awards from.
:param event: Event to get awards from.
:return: List of Award objects | [
"Get",
"list",
"of",
"awards",
"team",
"has",
"recieved",
"."
] | 3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4 | https://github.com/frc1418/tbapy/blob/3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4/tbapy/main.py#L141-L156 | train | 29,847 |
frc1418/tbapy | tbapy/main.py | TBA.team_matches | def team_matches(self, team, event=None, year=None, simple=False, keys=False):
"""
Get list of matches team has participated in.
:param team: Team to get matches of.
:param year: Year to get matches from.
:param event: Event to get matches from.
:param simple: Get only vital data.
:param keys: Only get match keys rather than their full data.
:return: List of string keys or Match objects.
"""
if event:
if keys:
return self._get('team/%s/event/%s/matches/keys' % (self.team_key(team), event))
else:
return [Match(raw) for raw in self._get('team/%s/event/%s/matches%s' % (self.team_key(team), event, '/simple' if simple else ''))]
elif year:
if keys:
return self._get('team/%s/matches/%s/keys' % (self.team_key(team), year))
else:
return [Match(raw) for raw in self._get('team/%s/matches/%s%s' % (self.team_key(team), year, '/simple' if simple else ''))] | python | def team_matches(self, team, event=None, year=None, simple=False, keys=False):
"""
Get list of matches team has participated in.
:param team: Team to get matches of.
:param year: Year to get matches from.
:param event: Event to get matches from.
:param simple: Get only vital data.
:param keys: Only get match keys rather than their full data.
:return: List of string keys or Match objects.
"""
if event:
if keys:
return self._get('team/%s/event/%s/matches/keys' % (self.team_key(team), event))
else:
return [Match(raw) for raw in self._get('team/%s/event/%s/matches%s' % (self.team_key(team), event, '/simple' if simple else ''))]
elif year:
if keys:
return self._get('team/%s/matches/%s/keys' % (self.team_key(team), year))
else:
return [Match(raw) for raw in self._get('team/%s/matches/%s%s' % (self.team_key(team), year, '/simple' if simple else ''))] | [
"def",
"team_matches",
"(",
"self",
",",
"team",
",",
"event",
"=",
"None",
",",
"year",
"=",
"None",
",",
"simple",
"=",
"False",
",",
"keys",
"=",
"False",
")",
":",
"if",
"event",
":",
"if",
"keys",
":",
"return",
"self",
".",
"_get",
"(",
"'t... | Get list of matches team has participated in.
:param team: Team to get matches of.
:param year: Year to get matches from.
:param event: Event to get matches from.
:param simple: Get only vital data.
:param keys: Only get match keys rather than their full data.
:return: List of string keys or Match objects. | [
"Get",
"list",
"of",
"matches",
"team",
"has",
"participated",
"in",
"."
] | 3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4 | https://github.com/frc1418/tbapy/blob/3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4/tbapy/main.py#L158-L178 | train | 29,848 |
frc1418/tbapy | tbapy/main.py | TBA.team_media | def team_media(self, team, year=None, tag=None):
"""
Get media for a given team.
:param team: Team to get media of.
:param year: Year to get media from.
:param tag: Get only media with a given tag.
:return: List of Media objects.
"""
return [Media(raw) for raw in self._get('team/%s/media%s%s' % (self.team_key(team), ('/tag/%s' % tag) if tag else '', ('/%s' % year) if year else ''))] | python | def team_media(self, team, year=None, tag=None):
"""
Get media for a given team.
:param team: Team to get media of.
:param year: Year to get media from.
:param tag: Get only media with a given tag.
:return: List of Media objects.
"""
return [Media(raw) for raw in self._get('team/%s/media%s%s' % (self.team_key(team), ('/tag/%s' % tag) if tag else '', ('/%s' % year) if year else ''))] | [
"def",
"team_media",
"(",
"self",
",",
"team",
",",
"year",
"=",
"None",
",",
"tag",
"=",
"None",
")",
":",
"return",
"[",
"Media",
"(",
"raw",
")",
"for",
"raw",
"in",
"self",
".",
"_get",
"(",
"'team/%s/media%s%s'",
"%",
"(",
"self",
".",
"team_k... | Get media for a given team.
:param team: Team to get media of.
:param year: Year to get media from.
:param tag: Get only media with a given tag.
:return: List of Media objects. | [
"Get",
"media",
"for",
"a",
"given",
"team",
"."
] | 3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4 | https://github.com/frc1418/tbapy/blob/3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4/tbapy/main.py#L189-L198 | train | 29,849 |
frc1418/tbapy | tbapy/main.py | TBA.team_robots | def team_robots(self, team):
"""
Get data about a team's robots.
:param team: Key for team whose robots you want data on.
:return: List of Robot objects
"""
return [Robot(raw) for raw in self._get('team/%s/robots' % self.team_key(team))] | python | def team_robots(self, team):
"""
Get data about a team's robots.
:param team: Key for team whose robots you want data on.
:return: List of Robot objects
"""
return [Robot(raw) for raw in self._get('team/%s/robots' % self.team_key(team))] | [
"def",
"team_robots",
"(",
"self",
",",
"team",
")",
":",
"return",
"[",
"Robot",
"(",
"raw",
")",
"for",
"raw",
"in",
"self",
".",
"_get",
"(",
"'team/%s/robots'",
"%",
"self",
".",
"team_key",
"(",
"team",
")",
")",
"]"
] | Get data about a team's robots.
:param team: Key for team whose robots you want data on.
:return: List of Robot objects | [
"Get",
"data",
"about",
"a",
"team",
"s",
"robots",
"."
] | 3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4 | https://github.com/frc1418/tbapy/blob/3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4/tbapy/main.py#L200-L207 | train | 29,850 |
frc1418/tbapy | tbapy/main.py | TBA.team_districts | def team_districts(self, team):
"""
Get districts a team has competed in.
:param team: Team to get data on.
:return: List of District objects.
"""
return [District(raw) for raw in self._get('team/%s/districts' % self.team_key(team))] | python | def team_districts(self, team):
"""
Get districts a team has competed in.
:param team: Team to get data on.
:return: List of District objects.
"""
return [District(raw) for raw in self._get('team/%s/districts' % self.team_key(team))] | [
"def",
"team_districts",
"(",
"self",
",",
"team",
")",
":",
"return",
"[",
"District",
"(",
"raw",
")",
"for",
"raw",
"in",
"self",
".",
"_get",
"(",
"'team/%s/districts'",
"%",
"self",
".",
"team_key",
"(",
"team",
")",
")",
"]"
] | Get districts a team has competed in.
:param team: Team to get data on.
:return: List of District objects. | [
"Get",
"districts",
"a",
"team",
"has",
"competed",
"in",
"."
] | 3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4 | https://github.com/frc1418/tbapy/blob/3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4/tbapy/main.py#L209-L216 | train | 29,851 |
frc1418/tbapy | tbapy/main.py | TBA.team_profiles | def team_profiles(self, team):
"""
Get team's social media profiles linked on their TBA page.
:param team: Team to get data on.
:return: List of Profile objects.
"""
return [Profile(raw) for raw in self._get('team/%s/social_media' % self.team_key(team))] | python | def team_profiles(self, team):
"""
Get team's social media profiles linked on their TBA page.
:param team: Team to get data on.
:return: List of Profile objects.
"""
return [Profile(raw) for raw in self._get('team/%s/social_media' % self.team_key(team))] | [
"def",
"team_profiles",
"(",
"self",
",",
"team",
")",
":",
"return",
"[",
"Profile",
"(",
"raw",
")",
"for",
"raw",
"in",
"self",
".",
"_get",
"(",
"'team/%s/social_media'",
"%",
"self",
".",
"team_key",
"(",
"team",
")",
")",
"]"
] | Get team's social media profiles linked on their TBA page.
:param team: Team to get data on.
:return: List of Profile objects. | [
"Get",
"team",
"s",
"social",
"media",
"profiles",
"linked",
"on",
"their",
"TBA",
"page",
"."
] | 3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4 | https://github.com/frc1418/tbapy/blob/3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4/tbapy/main.py#L218-L225 | train | 29,852 |
frc1418/tbapy | tbapy/main.py | TBA.team_status | def team_status(self, team, event):
"""
Get status of a team at an event.
:param team: Team whose status to get.
:param event: Event team is at.
:return: Status object.
"""
return Status(self._get('team/%s/event/%s/status' % (self.team_key(team), event))) | python | def team_status(self, team, event):
"""
Get status of a team at an event.
:param team: Team whose status to get.
:param event: Event team is at.
:return: Status object.
"""
return Status(self._get('team/%s/event/%s/status' % (self.team_key(team), event))) | [
"def",
"team_status",
"(",
"self",
",",
"team",
",",
"event",
")",
":",
"return",
"Status",
"(",
"self",
".",
"_get",
"(",
"'team/%s/event/%s/status'",
"%",
"(",
"self",
".",
"team_key",
"(",
"team",
")",
",",
"event",
")",
")",
")"
] | Get status of a team at an event.
:param team: Team whose status to get.
:param event: Event team is at.
:return: Status object. | [
"Get",
"status",
"of",
"a",
"team",
"at",
"an",
"event",
"."
] | 3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4 | https://github.com/frc1418/tbapy/blob/3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4/tbapy/main.py#L227-L235 | train | 29,853 |
frc1418/tbapy | tbapy/main.py | TBA.events | def events(self, year, simple=False, keys=False):
"""
Get a list of events in a given year.
:param year: Year to get events from.
:param keys: Get only keys of the events rather than full data.
:param simple: Get only vital data.
:return: List of string event keys or Event objects.
"""
if keys:
return self._get('events/%s/keys' % year)
else:
return [Event(raw) for raw in self._get('events/%s%s' % (year, '/simple' if simple else ''))] | python | def events(self, year, simple=False, keys=False):
"""
Get a list of events in a given year.
:param year: Year to get events from.
:param keys: Get only keys of the events rather than full data.
:param simple: Get only vital data.
:return: List of string event keys or Event objects.
"""
if keys:
return self._get('events/%s/keys' % year)
else:
return [Event(raw) for raw in self._get('events/%s%s' % (year, '/simple' if simple else ''))] | [
"def",
"events",
"(",
"self",
",",
"year",
",",
"simple",
"=",
"False",
",",
"keys",
"=",
"False",
")",
":",
"if",
"keys",
":",
"return",
"self",
".",
"_get",
"(",
"'events/%s/keys'",
"%",
"year",
")",
"else",
":",
"return",
"[",
"Event",
"(",
"raw... | Get a list of events in a given year.
:param year: Year to get events from.
:param keys: Get only keys of the events rather than full data.
:param simple: Get only vital data.
:return: List of string event keys or Event objects. | [
"Get",
"a",
"list",
"of",
"events",
"in",
"a",
"given",
"year",
"."
] | 3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4 | https://github.com/frc1418/tbapy/blob/3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4/tbapy/main.py#L237-L249 | train | 29,854 |
frc1418/tbapy | tbapy/main.py | TBA.event | def event(self, event, simple=False):
"""
Get basic information about an event.
More specific data (typically obtained with the detail_type URL parameter) can be obtained with event_alliances(), event_district_points(), event_insights(), event_oprs(), event_predictions(), and event_rankings().
:param event: Key of event for which you desire data.
:param simple: Get only vital data.
:return: A single Event object.
"""
return Event(self._get('event/%s%s' % (event, '/simple' if simple else ''))) | python | def event(self, event, simple=False):
"""
Get basic information about an event.
More specific data (typically obtained with the detail_type URL parameter) can be obtained with event_alliances(), event_district_points(), event_insights(), event_oprs(), event_predictions(), and event_rankings().
:param event: Key of event for which you desire data.
:param simple: Get only vital data.
:return: A single Event object.
"""
return Event(self._get('event/%s%s' % (event, '/simple' if simple else ''))) | [
"def",
"event",
"(",
"self",
",",
"event",
",",
"simple",
"=",
"False",
")",
":",
"return",
"Event",
"(",
"self",
".",
"_get",
"(",
"'event/%s%s'",
"%",
"(",
"event",
",",
"'/simple'",
"if",
"simple",
"else",
"''",
")",
")",
")"
] | Get basic information about an event.
More specific data (typically obtained with the detail_type URL parameter) can be obtained with event_alliances(), event_district_points(), event_insights(), event_oprs(), event_predictions(), and event_rankings().
:param event: Key of event for which you desire data.
:param simple: Get only vital data.
:return: A single Event object. | [
"Get",
"basic",
"information",
"about",
"an",
"event",
"."
] | 3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4 | https://github.com/frc1418/tbapy/blob/3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4/tbapy/main.py#L251-L261 | train | 29,855 |
frc1418/tbapy | tbapy/main.py | TBA.event_teams | def event_teams(self, event, simple=False, keys=False):
"""
Get list of teams at an event.
:param event: Event key to get data on.
:param simple: Get only vital data.
:param keys: Return list of team keys only rather than full data on every team.
:return: List of string keys or Team objects.
"""
if keys:
return self._get('event/%s/teams/keys' % event)
else:
return [Team(raw) for raw in self._get('event/%s/teams%s' % (event, '/simple' if simple else ''))] | python | def event_teams(self, event, simple=False, keys=False):
"""
Get list of teams at an event.
:param event: Event key to get data on.
:param simple: Get only vital data.
:param keys: Return list of team keys only rather than full data on every team.
:return: List of string keys or Team objects.
"""
if keys:
return self._get('event/%s/teams/keys' % event)
else:
return [Team(raw) for raw in self._get('event/%s/teams%s' % (event, '/simple' if simple else ''))] | [
"def",
"event_teams",
"(",
"self",
",",
"event",
",",
"simple",
"=",
"False",
",",
"keys",
"=",
"False",
")",
":",
"if",
"keys",
":",
"return",
"self",
".",
"_get",
"(",
"'event/%s/teams/keys'",
"%",
"event",
")",
"else",
":",
"return",
"[",
"Team",
... | Get list of teams at an event.
:param event: Event key to get data on.
:param simple: Get only vital data.
:param keys: Return list of team keys only rather than full data on every team.
:return: List of string keys or Team objects. | [
"Get",
"list",
"of",
"teams",
"at",
"an",
"event",
"."
] | 3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4 | https://github.com/frc1418/tbapy/blob/3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4/tbapy/main.py#L317-L329 | train | 29,856 |
frc1418/tbapy | tbapy/main.py | TBA.event_matches | def event_matches(self, event, simple=False, keys=False):
"""
Get list of matches played at an event.
:param event: Event key to get data on.
:param keys: Return list of match keys only rather than full data on every match.
:param simple: Get only vital data.
:return: List of string keys or Match objects.
"""
if keys:
return self._get('event/%s/matches/keys' % event)
else:
return [Match(raw) for raw in self._get('event/%s/matches%s' % (event, '/simple' if simple else ''))] | python | def event_matches(self, event, simple=False, keys=False):
"""
Get list of matches played at an event.
:param event: Event key to get data on.
:param keys: Return list of match keys only rather than full data on every match.
:param simple: Get only vital data.
:return: List of string keys or Match objects.
"""
if keys:
return self._get('event/%s/matches/keys' % event)
else:
return [Match(raw) for raw in self._get('event/%s/matches%s' % (event, '/simple' if simple else ''))] | [
"def",
"event_matches",
"(",
"self",
",",
"event",
",",
"simple",
"=",
"False",
",",
"keys",
"=",
"False",
")",
":",
"if",
"keys",
":",
"return",
"self",
".",
"_get",
"(",
"'event/%s/matches/keys'",
"%",
"event",
")",
"else",
":",
"return",
"[",
"Match... | Get list of matches played at an event.
:param event: Event key to get data on.
:param keys: Return list of match keys only rather than full data on every match.
:param simple: Get only vital data.
:return: List of string keys or Match objects. | [
"Get",
"list",
"of",
"matches",
"played",
"at",
"an",
"event",
"."
] | 3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4 | https://github.com/frc1418/tbapy/blob/3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4/tbapy/main.py#L340-L352 | train | 29,857 |
frc1418/tbapy | tbapy/main.py | TBA.match | def match(self, key=None, year=None, event=None, type='qm', number=None, round=None, simple=False):
"""
Get data on a match.
You may either pass the match's key directly, or pass `year`, `event`, `type`, `match` (the match number), and `round` if applicable (playoffs only). The event year may be specified as part of the event key or specified in the `year` parameter.
:param key: Key of match to get data on. First option for specifying a match (see above).
:param year: Year in which match took place. Optional; if excluded then must be included in event key.
:param event: Key of event in which match took place. Including year is optional; if excluded then must be specified in `year` parameter.
:param type: One of 'qm' (qualifier match), 'qf' (quarterfinal), 'sf' (semifinal), 'f' (final). If unspecified, 'qm' will be assumed.
:param number: Match number. For example, for qualifier 32, you'd pass 32. For Semifinal 2 round 3, you'd pass 2.
:param round: For playoff matches, you will need to specify a round.
:param simple: Get only vital data.
:return: A single Match object.
"""
if key:
return Match(self._get('match/%s%s' % (key, '/simple' if simple else '')))
else:
return Match(self._get('match/{year}{event}_{type}{number}{round}{simple}'.format(year=year if not event[0].isdigit() else '',
event=event,
type=type,
number=number,
round=('m%s' % round) if not type == 'qm' else '',
simple='/simple' if simple else ''))) | python | def match(self, key=None, year=None, event=None, type='qm', number=None, round=None, simple=False):
"""
Get data on a match.
You may either pass the match's key directly, or pass `year`, `event`, `type`, `match` (the match number), and `round` if applicable (playoffs only). The event year may be specified as part of the event key or specified in the `year` parameter.
:param key: Key of match to get data on. First option for specifying a match (see above).
:param year: Year in which match took place. Optional; if excluded then must be included in event key.
:param event: Key of event in which match took place. Including year is optional; if excluded then must be specified in `year` parameter.
:param type: One of 'qm' (qualifier match), 'qf' (quarterfinal), 'sf' (semifinal), 'f' (final). If unspecified, 'qm' will be assumed.
:param number: Match number. For example, for qualifier 32, you'd pass 32. For Semifinal 2 round 3, you'd pass 2.
:param round: For playoff matches, you will need to specify a round.
:param simple: Get only vital data.
:return: A single Match object.
"""
if key:
return Match(self._get('match/%s%s' % (key, '/simple' if simple else '')))
else:
return Match(self._get('match/{year}{event}_{type}{number}{round}{simple}'.format(year=year if not event[0].isdigit() else '',
event=event,
type=type,
number=number,
round=('m%s' % round) if not type == 'qm' else '',
simple='/simple' if simple else ''))) | [
"def",
"match",
"(",
"self",
",",
"key",
"=",
"None",
",",
"year",
"=",
"None",
",",
"event",
"=",
"None",
",",
"type",
"=",
"'qm'",
",",
"number",
"=",
"None",
",",
"round",
"=",
"None",
",",
"simple",
"=",
"False",
")",
":",
"if",
"key",
":",... | Get data on a match.
You may either pass the match's key directly, or pass `year`, `event`, `type`, `match` (the match number), and `round` if applicable (playoffs only). The event year may be specified as part of the event key or specified in the `year` parameter.
:param key: Key of match to get data on. First option for specifying a match (see above).
:param year: Year in which match took place. Optional; if excluded then must be included in event key.
:param event: Key of event in which match took place. Including year is optional; if excluded then must be specified in `year` parameter.
:param type: One of 'qm' (qualifier match), 'qf' (quarterfinal), 'sf' (semifinal), 'f' (final). If unspecified, 'qm' will be assumed.
:param number: Match number. For example, for qualifier 32, you'd pass 32. For Semifinal 2 round 3, you'd pass 2.
:param round: For playoff matches, you will need to specify a round.
:param simple: Get only vital data.
:return: A single Match object. | [
"Get",
"data",
"on",
"a",
"match",
"."
] | 3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4 | https://github.com/frc1418/tbapy/blob/3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4/tbapy/main.py#L354-L377 | train | 29,858 |
frc1418/tbapy | tbapy/main.py | TBA.district_events | def district_events(self, district, simple=False, keys=False):
"""
Return list of events in a given district.
:param district: Key of district whose events you want.
:param simple: Get only vital data.
:param keys: Return list of event keys only rather than full data on every event.
:return: List of string keys or Event objects.
"""
if keys:
return self._get('district/%s/events/keys' % district)
else:
return [Event(raw) for raw in self._get('district/%s/events%s' % (district, '/simple' if simple else ''))] | python | def district_events(self, district, simple=False, keys=False):
"""
Return list of events in a given district.
:param district: Key of district whose events you want.
:param simple: Get only vital data.
:param keys: Return list of event keys only rather than full data on every event.
:return: List of string keys or Event objects.
"""
if keys:
return self._get('district/%s/events/keys' % district)
else:
return [Event(raw) for raw in self._get('district/%s/events%s' % (district, '/simple' if simple else ''))] | [
"def",
"district_events",
"(",
"self",
",",
"district",
",",
"simple",
"=",
"False",
",",
"keys",
"=",
"False",
")",
":",
"if",
"keys",
":",
"return",
"self",
".",
"_get",
"(",
"'district/%s/events/keys'",
"%",
"district",
")",
"else",
":",
"return",
"["... | Return list of events in a given district.
:param district: Key of district whose events you want.
:param simple: Get only vital data.
:param keys: Return list of event keys only rather than full data on every event.
:return: List of string keys or Event objects. | [
"Return",
"list",
"of",
"events",
"in",
"a",
"given",
"district",
"."
] | 3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4 | https://github.com/frc1418/tbapy/blob/3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4/tbapy/main.py#L388-L400 | train | 29,859 |
frc1418/tbapy | tbapy/main.py | TBA.district_teams | def district_teams(self, district, simple=False, keys=False):
"""
Get list of teams in the given district.
:param district: Key for the district to get teams in.
:param simple: Get only vital data.
:param keys: Return list of team keys only rather than full data on every team.
:return: List of string keys or Team objects.
"""
if keys:
return self._get('district/%s/teams/keys' % district)
else:
return [Team(raw) for raw in self._get('district/%s/teams' % district)] | python | def district_teams(self, district, simple=False, keys=False):
"""
Get list of teams in the given district.
:param district: Key for the district to get teams in.
:param simple: Get only vital data.
:param keys: Return list of team keys only rather than full data on every team.
:return: List of string keys or Team objects.
"""
if keys:
return self._get('district/%s/teams/keys' % district)
else:
return [Team(raw) for raw in self._get('district/%s/teams' % district)] | [
"def",
"district_teams",
"(",
"self",
",",
"district",
",",
"simple",
"=",
"False",
",",
"keys",
"=",
"False",
")",
":",
"if",
"keys",
":",
"return",
"self",
".",
"_get",
"(",
"'district/%s/teams/keys'",
"%",
"district",
")",
"else",
":",
"return",
"[",
... | Get list of teams in the given district.
:param district: Key for the district to get teams in.
:param simple: Get only vital data.
:param keys: Return list of team keys only rather than full data on every team.
:return: List of string keys or Team objects. | [
"Get",
"list",
"of",
"teams",
"in",
"the",
"given",
"district",
"."
] | 3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4 | https://github.com/frc1418/tbapy/blob/3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4/tbapy/main.py#L411-L423 | train | 29,860 |
frc1418/tbapy | tbapy/main.py | TBA.update_trusted | def update_trusted(self, auth_id, auth_secret, event_key):
"""
Set Trusted API ID and Secret and the event key they are assigned to.
:param auth_id: Your event authorization ID, obtainable at https://www.thebluealliance.com/request/apiwrite
:param auth_secret: Your event authorization secret, obtainable at https://www.thebluealliance.com/request/apiwrite
:param event_key: The event key that is linked to the ID and secret provided.
"""
self.session.headers.update({'X-TBA-Auth-Id': auth_id})
self.auth_secret = auth_secret
self.event_key = event_key | python | def update_trusted(self, auth_id, auth_secret, event_key):
"""
Set Trusted API ID and Secret and the event key they are assigned to.
:param auth_id: Your event authorization ID, obtainable at https://www.thebluealliance.com/request/apiwrite
:param auth_secret: Your event authorization secret, obtainable at https://www.thebluealliance.com/request/apiwrite
:param event_key: The event key that is linked to the ID and secret provided.
"""
self.session.headers.update({'X-TBA-Auth-Id': auth_id})
self.auth_secret = auth_secret
self.event_key = event_key | [
"def",
"update_trusted",
"(",
"self",
",",
"auth_id",
",",
"auth_secret",
",",
"event_key",
")",
":",
"self",
".",
"session",
".",
"headers",
".",
"update",
"(",
"{",
"'X-TBA-Auth-Id'",
":",
"auth_id",
"}",
")",
"self",
".",
"auth_secret",
"=",
"auth_secre... | Set Trusted API ID and Secret and the event key they are assigned to.
:param auth_id: Your event authorization ID, obtainable at https://www.thebluealliance.com/request/apiwrite
:param auth_secret: Your event authorization secret, obtainable at https://www.thebluealliance.com/request/apiwrite
:param event_key: The event key that is linked to the ID and secret provided. | [
"Set",
"Trusted",
"API",
"ID",
"and",
"Secret",
"and",
"the",
"event",
"key",
"they",
"are",
"assigned",
"to",
"."
] | 3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4 | https://github.com/frc1418/tbapy/blob/3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4/tbapy/main.py#L425-L435 | train | 29,861 |
frc1418/tbapy | tbapy/main.py | TBA.delete_event_matches | def delete_event_matches(self, data=None):
"""
Delete an event's matches on The Blue Alliance.
:param data: List of match keys to delete, can be ommited if you would like to delete all matches.
"""
return self._post('event/%s/matches/delete_all' if data is None else 'event/%s/matches/delete', json.dumps(self.event_key) if data is None else json.dumps(data)) | python | def delete_event_matches(self, data=None):
"""
Delete an event's matches on The Blue Alliance.
:param data: List of match keys to delete, can be ommited if you would like to delete all matches.
"""
return self._post('event/%s/matches/delete_all' if data is None else 'event/%s/matches/delete', json.dumps(self.event_key) if data is None else json.dumps(data)) | [
"def",
"delete_event_matches",
"(",
"self",
",",
"data",
"=",
"None",
")",
":",
"return",
"self",
".",
"_post",
"(",
"'event/%s/matches/delete_all'",
"if",
"data",
"is",
"None",
"else",
"'event/%s/matches/delete'",
",",
"json",
".",
"dumps",
"(",
"self",
".",
... | Delete an event's matches on The Blue Alliance.
:param data: List of match keys to delete, can be ommited if you would like to delete all matches. | [
"Delete",
"an",
"event",
"s",
"matches",
"on",
"The",
"Blue",
"Alliance",
"."
] | 3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4 | https://github.com/frc1418/tbapy/blob/3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4/tbapy/main.py#L469-L475 | train | 29,862 |
stevearc/dql | dql/expressions/constraint.py | ConstraintExpression.from_where | def from_where(cls, where):
""" Factory method for creating the top-level expression """
if where.conjunction:
return Conjunction.from_clause(where)
else:
return cls.from_clause(where[0]) | python | def from_where(cls, where):
""" Factory method for creating the top-level expression """
if where.conjunction:
return Conjunction.from_clause(where)
else:
return cls.from_clause(where[0]) | [
"def",
"from_where",
"(",
"cls",
",",
"where",
")",
":",
"if",
"where",
".",
"conjunction",
":",
"return",
"Conjunction",
".",
"from_clause",
"(",
"where",
")",
"else",
":",
"return",
"cls",
".",
"from_clause",
"(",
"where",
"[",
"0",
"]",
")"
] | Factory method for creating the top-level expression | [
"Factory",
"method",
"for",
"creating",
"the",
"top",
"-",
"level",
"expression"
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/expressions/constraint.py#L30-L35 | train | 29,863 |
stevearc/dql | dql/expressions/constraint.py | Conjunction._factory | def _factory(cls, constraints, op):
""" Factory for joining constraints with a single conjunction """
pieces = []
for i, constraint in enumerate(constraints):
pieces.append(constraint)
if i != len(constraints) - 1:
pieces.append(op)
return cls(pieces) | python | def _factory(cls, constraints, op):
""" Factory for joining constraints with a single conjunction """
pieces = []
for i, constraint in enumerate(constraints):
pieces.append(constraint)
if i != len(constraints) - 1:
pieces.append(op)
return cls(pieces) | [
"def",
"_factory",
"(",
"cls",
",",
"constraints",
",",
"op",
")",
":",
"pieces",
"=",
"[",
"]",
"for",
"i",
",",
"constraint",
"in",
"enumerate",
"(",
"constraints",
")",
":",
"pieces",
".",
"append",
"(",
"constraint",
")",
"if",
"i",
"!=",
"len",
... | Factory for joining constraints with a single conjunction | [
"Factory",
"for",
"joining",
"constraints",
"with",
"a",
"single",
"conjunction"
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/expressions/constraint.py#L131-L138 | train | 29,864 |
stevearc/dql | dql/expressions/constraint.py | Conjunction.remove_index | def remove_index(self, index):
"""
This one takes some explanation. When we do a query with a WHERE
statement, it may end up being a scan and it may end up being a query.
If it is a query, we need to remove the hash and range key constraints
from the expression and return that as the query_constraints. The
remaining constraints, if any, are returned as the filter_constraints.
"""
# We can only be doing this if all of the joining ops are AND (no OR),
# so we don't even need to worry about OR's
query = []
remainder = []
for i in range(0, len(self.pieces), 2):
const = self.pieces[i]
if const.hash_field == index.hash_key:
query.append(const)
elif index.range_key is not None and const.range_field == index.range_key:
query.append(const)
else:
remainder.append(const)
if len(query) == 1:
query_constraints = query[0]
else:
query_constraints = Conjunction.and_(query)
if not remainder:
filter_constraints = None
elif len(remainder) == 1:
filter_constraints = remainder[0]
else:
filter_constraints = Conjunction.and_(remainder)
return (query_constraints, filter_constraints) | python | def remove_index(self, index):
"""
This one takes some explanation. When we do a query with a WHERE
statement, it may end up being a scan and it may end up being a query.
If it is a query, we need to remove the hash and range key constraints
from the expression and return that as the query_constraints. The
remaining constraints, if any, are returned as the filter_constraints.
"""
# We can only be doing this if all of the joining ops are AND (no OR),
# so we don't even need to worry about OR's
query = []
remainder = []
for i in range(0, len(self.pieces), 2):
const = self.pieces[i]
if const.hash_field == index.hash_key:
query.append(const)
elif index.range_key is not None and const.range_field == index.range_key:
query.append(const)
else:
remainder.append(const)
if len(query) == 1:
query_constraints = query[0]
else:
query_constraints = Conjunction.and_(query)
if not remainder:
filter_constraints = None
elif len(remainder) == 1:
filter_constraints = remainder[0]
else:
filter_constraints = Conjunction.and_(remainder)
return (query_constraints, filter_constraints) | [
"def",
"remove_index",
"(",
"self",
",",
"index",
")",
":",
"# We can only be doing this if all of the joining ops are AND (no OR),",
"# so we don't even need to worry about OR's",
"query",
"=",
"[",
"]",
"remainder",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
... | This one takes some explanation. When we do a query with a WHERE
statement, it may end up being a scan and it may end up being a query.
If it is a query, we need to remove the hash and range key constraints
from the expression and return that as the query_constraints. The
remaining constraints, if any, are returned as the filter_constraints. | [
"This",
"one",
"takes",
"some",
"explanation",
".",
"When",
"we",
"do",
"a",
"query",
"with",
"a",
"WHERE",
"statement",
"it",
"may",
"end",
"up",
"being",
"a",
"scan",
"and",
"it",
"may",
"end",
"up",
"being",
"a",
"query",
".",
"If",
"it",
"is",
... | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/expressions/constraint.py#L181-L212 | train | 29,865 |
stevearc/dql | dql/engine.py | default | def default(value):
""" Default encoder for JSON """
if isinstance(value, Decimal):
primative = float(value)
if int(primative) == primative:
return int(primative)
else:
return primative
elif isinstance(value, set):
return list(value)
elif isinstance(value, Binary):
return b64encode(value.value)
raise TypeError("Cannot encode %s value %r" % (type(value), value)) | python | def default(value):
""" Default encoder for JSON """
if isinstance(value, Decimal):
primative = float(value)
if int(primative) == primative:
return int(primative)
else:
return primative
elif isinstance(value, set):
return list(value)
elif isinstance(value, Binary):
return b64encode(value.value)
raise TypeError("Cannot encode %s value %r" % (type(value), value)) | [
"def",
"default",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"Decimal",
")",
":",
"primative",
"=",
"float",
"(",
"value",
")",
"if",
"int",
"(",
"primative",
")",
"==",
"primative",
":",
"return",
"int",
"(",
"primative",
")",
"e... | Default encoder for JSON | [
"Default",
"encoder",
"for",
"JSON"
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L48-L60 | train | 29,866 |
stevearc/dql | dql/engine.py | add_query_kwargs | def add_query_kwargs(kwargs, visitor, constraints, index):
""" Construct KeyConditionExpression and FilterExpression """
(query_const, filter_const) = constraints.remove_index(index)
kwargs["key_condition_expr"] = query_const.build(visitor)
if filter_const:
kwargs["filter"] = filter_const.build(visitor)
if index.name != "TABLE":
kwargs["index"] = index.name | python | def add_query_kwargs(kwargs, visitor, constraints, index):
""" Construct KeyConditionExpression and FilterExpression """
(query_const, filter_const) = constraints.remove_index(index)
kwargs["key_condition_expr"] = query_const.build(visitor)
if filter_const:
kwargs["filter"] = filter_const.build(visitor)
if index.name != "TABLE":
kwargs["index"] = index.name | [
"def",
"add_query_kwargs",
"(",
"kwargs",
",",
"visitor",
",",
"constraints",
",",
"index",
")",
":",
"(",
"query_const",
",",
"filter_const",
")",
"=",
"constraints",
".",
"remove_index",
"(",
"index",
")",
"kwargs",
"[",
"\"key_condition_expr\"",
"]",
"=",
... | Construct KeyConditionExpression and FilterExpression | [
"Construct",
"KeyConditionExpression",
"and",
"FilterExpression"
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L67-L74 | train | 29,867 |
stevearc/dql | dql/engine.py | iter_insert_items | def iter_insert_items(tree):
""" Iterate over the items to insert from an INSERT statement """
if tree.list_values:
keys = tree.attrs
for values in tree.list_values:
if len(keys) != len(values):
raise SyntaxError(
"Values '%s' do not match attributes " "'%s'" % (values, keys)
)
yield dict(zip(keys, map(resolve, values)))
elif tree.map_values:
for item in tree.map_values:
data = {}
for (key, val) in item:
data[key] = resolve(val)
yield data
else:
raise SyntaxError("No insert data found") | python | def iter_insert_items(tree):
""" Iterate over the items to insert from an INSERT statement """
if tree.list_values:
keys = tree.attrs
for values in tree.list_values:
if len(keys) != len(values):
raise SyntaxError(
"Values '%s' do not match attributes " "'%s'" % (values, keys)
)
yield dict(zip(keys, map(resolve, values)))
elif tree.map_values:
for item in tree.map_values:
data = {}
for (key, val) in item:
data[key] = resolve(val)
yield data
else:
raise SyntaxError("No insert data found") | [
"def",
"iter_insert_items",
"(",
"tree",
")",
":",
"if",
"tree",
".",
"list_values",
":",
"keys",
"=",
"tree",
".",
"attrs",
"for",
"values",
"in",
"tree",
".",
"list_values",
":",
"if",
"len",
"(",
"keys",
")",
"!=",
"len",
"(",
"values",
")",
":",
... | Iterate over the items to insert from an INSERT statement | [
"Iterate",
"over",
"the",
"items",
"to",
"insert",
"from",
"an",
"INSERT",
"statement"
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L77-L94 | train | 29,868 |
stevearc/dql | dql/engine.py | Engine.connect | def connect(self, *args, **kwargs):
""" Proxy to DynamoDBConnection.connect. """
self.connection = DynamoDBConnection.connect(*args, **kwargs)
self._session = kwargs.get("session")
if self._session is None:
self._session = botocore.session.get_session() | python | def connect(self, *args, **kwargs):
""" Proxy to DynamoDBConnection.connect. """
self.connection = DynamoDBConnection.connect(*args, **kwargs)
self._session = kwargs.get("session")
if self._session is None:
self._session = botocore.session.get_session() | [
"def",
"connect",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"connection",
"=",
"DynamoDBConnection",
".",
"connect",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"_session",
"=",
"kwargs",
".",
"... | Proxy to DynamoDBConnection.connect. | [
"Proxy",
"to",
"DynamoDBConnection",
".",
"connect",
"."
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L132-L137 | train | 29,869 |
stevearc/dql | dql/engine.py | Engine.connection | def connection(self, connection):
""" Change the dynamo connection """
if connection is not None:
connection.subscribe("capacity", self._on_capacity_data)
connection.default_return_capacity = True
if self._connection is not None:
connection.unsubscribe("capacity", self._on_capacity_data)
self._connection = connection
self._cloudwatch_connection = None
self.cached_descriptions = {} | python | def connection(self, connection):
""" Change the dynamo connection """
if connection is not None:
connection.subscribe("capacity", self._on_capacity_data)
connection.default_return_capacity = True
if self._connection is not None:
connection.unsubscribe("capacity", self._on_capacity_data)
self._connection = connection
self._cloudwatch_connection = None
self.cached_descriptions = {} | [
"def",
"connection",
"(",
"self",
",",
"connection",
")",
":",
"if",
"connection",
"is",
"not",
"None",
":",
"connection",
".",
"subscribe",
"(",
"\"capacity\"",
",",
"self",
".",
"_on_capacity_data",
")",
"connection",
".",
"default_return_capacity",
"=",
"Tr... | Change the dynamo connection | [
"Change",
"the",
"dynamo",
"connection"
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L150-L159 | train | 29,870 |
stevearc/dql | dql/engine.py | Engine.cloudwatch_connection | def cloudwatch_connection(self):
""" Lazy create a connection to cloudwatch """
if self._cloudwatch_connection is None:
conn = self._session.create_client("cloudwatch", self.connection.region)
self._cloudwatch_connection = conn
return self._cloudwatch_connection | python | def cloudwatch_connection(self):
""" Lazy create a connection to cloudwatch """
if self._cloudwatch_connection is None:
conn = self._session.create_client("cloudwatch", self.connection.region)
self._cloudwatch_connection = conn
return self._cloudwatch_connection | [
"def",
"cloudwatch_connection",
"(",
"self",
")",
":",
"if",
"self",
".",
"_cloudwatch_connection",
"is",
"None",
":",
"conn",
"=",
"self",
".",
"_session",
".",
"create_client",
"(",
"\"cloudwatch\"",
",",
"self",
".",
"connection",
".",
"region",
")",
"sel... | Lazy create a connection to cloudwatch | [
"Lazy",
"create",
"a",
"connection",
"to",
"cloudwatch"
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L162-L167 | train | 29,871 |
stevearc/dql | dql/engine.py | Engine._format_explain | def _format_explain(self):
""" Format the results of an EXPLAIN """
lines = []
for (command, kwargs) in self._call_list:
lines.append(command + " " + pformat(kwargs))
return "\n".join(lines) | python | def _format_explain(self):
""" Format the results of an EXPLAIN """
lines = []
for (command, kwargs) in self._call_list:
lines.append(command + " " + pformat(kwargs))
return "\n".join(lines) | [
"def",
"_format_explain",
"(",
"self",
")",
":",
"lines",
"=",
"[",
"]",
"for",
"(",
"command",
",",
"kwargs",
")",
"in",
"self",
".",
"_call_list",
":",
"lines",
".",
"append",
"(",
"command",
"+",
"\" \"",
"+",
"pformat",
"(",
"kwargs",
")",
")",
... | Format the results of an EXPLAIN | [
"Format",
"the",
"results",
"of",
"an",
"EXPLAIN"
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L169-L174 | train | 29,872 |
stevearc/dql | dql/engine.py | Engine._pretty_format | def _pretty_format(self, statement, result):
""" Format the return value of a query for humans """
if result is None:
return "Success"
ret = result
if statement.action in ("SELECT", "SCAN"):
if statement.save_file:
filename = statement.save_file[0]
if filename[0] in ['"', "'"]:
filename = unwrap(filename)
ret = "Saved %d record%s to %s" % (result, plural(result), filename)
elif isinstance(result, int):
if result == result.scanned_count:
ret = "%d" % result
else:
ret = "%d (scanned count: %d)" % (result, result.scanned_count)
elif statement.action == "UPDATE":
if isinstance(result, int):
ret = "Updated %d item%s" % (result, plural(result))
elif statement.action == "DELETE":
ret = "Deleted %d item%s" % (result, plural(result))
elif statement.action == "CREATE":
if result:
ret = "Created table %r" % statement.table
else:
ret = "Table %r already exists" % statement.table
elif statement.action == "INSERT":
ret = "Inserted %d item%s" % (result, plural(result))
elif statement.action == "DROP":
if result:
ret = "Dropped table %r" % statement.table
else:
ret = "Table %r does not exist" % statement.table
elif statement.action == "ANALYZE":
ret = self._pretty_format(statement[1], result)
elif statement.action == "LOAD":
ret = "Loaded %d item%s" % (result, plural(result))
return ret | python | def _pretty_format(self, statement, result):
""" Format the return value of a query for humans """
if result is None:
return "Success"
ret = result
if statement.action in ("SELECT", "SCAN"):
if statement.save_file:
filename = statement.save_file[0]
if filename[0] in ['"', "'"]:
filename = unwrap(filename)
ret = "Saved %d record%s to %s" % (result, plural(result), filename)
elif isinstance(result, int):
if result == result.scanned_count:
ret = "%d" % result
else:
ret = "%d (scanned count: %d)" % (result, result.scanned_count)
elif statement.action == "UPDATE":
if isinstance(result, int):
ret = "Updated %d item%s" % (result, plural(result))
elif statement.action == "DELETE":
ret = "Deleted %d item%s" % (result, plural(result))
elif statement.action == "CREATE":
if result:
ret = "Created table %r" % statement.table
else:
ret = "Table %r already exists" % statement.table
elif statement.action == "INSERT":
ret = "Inserted %d item%s" % (result, plural(result))
elif statement.action == "DROP":
if result:
ret = "Dropped table %r" % statement.table
else:
ret = "Table %r does not exist" % statement.table
elif statement.action == "ANALYZE":
ret = self._pretty_format(statement[1], result)
elif statement.action == "LOAD":
ret = "Loaded %d item%s" % (result, plural(result))
return ret | [
"def",
"_pretty_format",
"(",
"self",
",",
"statement",
",",
"result",
")",
":",
"if",
"result",
"is",
"None",
":",
"return",
"\"Success\"",
"ret",
"=",
"result",
"if",
"statement",
".",
"action",
"in",
"(",
"\"SELECT\"",
",",
"\"SCAN\"",
")",
":",
"if",... | Format the return value of a query for humans | [
"Format",
"the",
"return",
"value",
"of",
"a",
"query",
"for",
"humans"
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L176-L213 | train | 29,873 |
stevearc/dql | dql/engine.py | Engine.describe_all | def describe_all(self, refresh=True):
""" Describe all tables in the connected region """
tables = self.connection.list_tables()
descs = []
for tablename in tables:
descs.append(self.describe(tablename, refresh))
return descs | python | def describe_all(self, refresh=True):
""" Describe all tables in the connected region """
tables = self.connection.list_tables()
descs = []
for tablename in tables:
descs.append(self.describe(tablename, refresh))
return descs | [
"def",
"describe_all",
"(",
"self",
",",
"refresh",
"=",
"True",
")",
":",
"tables",
"=",
"self",
".",
"connection",
".",
"list_tables",
"(",
")",
"descs",
"=",
"[",
"]",
"for",
"tablename",
"in",
"tables",
":",
"descs",
".",
"append",
"(",
"self",
"... | Describe all tables in the connected region | [
"Describe",
"all",
"tables",
"in",
"the",
"connected",
"region"
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L215-L221 | train | 29,874 |
stevearc/dql | dql/engine.py | Engine.execute | def execute(self, commands, pretty_format=False):
"""
Parse and run a DQL string
Parameters
----------
commands : str
The DQL command string
pretty_format : bool
Pretty-format the return value. (e.g. 4 -> 'Updated 4 items')
"""
tree = parser.parseString(commands)
self.consumed_capacities = []
self._analyzing = False
self._query_rate_limit = None
for statement in tree:
try:
result = self._run(statement)
except ExplainSignal:
return self._format_explain()
if pretty_format:
return self._pretty_format(tree[-1], result)
return result | python | def execute(self, commands, pretty_format=False):
"""
Parse and run a DQL string
Parameters
----------
commands : str
The DQL command string
pretty_format : bool
Pretty-format the return value. (e.g. 4 -> 'Updated 4 items')
"""
tree = parser.parseString(commands)
self.consumed_capacities = []
self._analyzing = False
self._query_rate_limit = None
for statement in tree:
try:
result = self._run(statement)
except ExplainSignal:
return self._format_explain()
if pretty_format:
return self._pretty_format(tree[-1], result)
return result | [
"def",
"execute",
"(",
"self",
",",
"commands",
",",
"pretty_format",
"=",
"False",
")",
":",
"tree",
"=",
"parser",
".",
"parseString",
"(",
"commands",
")",
"self",
".",
"consumed_capacities",
"=",
"[",
"]",
"self",
".",
"_analyzing",
"=",
"False",
"se... | Parse and run a DQL string
Parameters
----------
commands : str
The DQL command string
pretty_format : bool
Pretty-format the return value. (e.g. 4 -> 'Updated 4 items') | [
"Parse",
"and",
"run",
"a",
"DQL",
"string"
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L283-L306 | train | 29,875 |
stevearc/dql | dql/engine.py | Engine._run | def _run(self, tree):
""" Run a query from a parse tree """
if tree.throttle:
limiter = self._parse_throttle(tree.table, tree.throttle)
self._query_rate_limit = limiter
del tree["throttle"]
return self._run(tree)
if tree.action == "SELECT":
return self._select(tree, self.allow_select_scan)
elif tree.action == "SCAN":
return self._scan(tree)
elif tree.action == "DELETE":
return self._delete(tree)
elif tree.action == "UPDATE":
return self._update(tree)
elif tree.action == "CREATE":
return self._create(tree)
elif tree.action == "INSERT":
return self._insert(tree)
elif tree.action == "DROP":
return self._drop(tree)
elif tree.action == "ALTER":
return self._alter(tree)
elif tree.action == "DUMP":
return self._dump(tree)
elif tree.action == "LOAD":
return self._load(tree)
elif tree.action == "EXPLAIN":
return self._explain(tree)
elif tree.action == "ANALYZE":
self._analyzing = True
self.connection.default_return_capacity = True
return self._run(tree[1])
else:
raise SyntaxError("Unrecognized action '%s'" % tree.action) | python | def _run(self, tree):
""" Run a query from a parse tree """
if tree.throttle:
limiter = self._parse_throttle(tree.table, tree.throttle)
self._query_rate_limit = limiter
del tree["throttle"]
return self._run(tree)
if tree.action == "SELECT":
return self._select(tree, self.allow_select_scan)
elif tree.action == "SCAN":
return self._scan(tree)
elif tree.action == "DELETE":
return self._delete(tree)
elif tree.action == "UPDATE":
return self._update(tree)
elif tree.action == "CREATE":
return self._create(tree)
elif tree.action == "INSERT":
return self._insert(tree)
elif tree.action == "DROP":
return self._drop(tree)
elif tree.action == "ALTER":
return self._alter(tree)
elif tree.action == "DUMP":
return self._dump(tree)
elif tree.action == "LOAD":
return self._load(tree)
elif tree.action == "EXPLAIN":
return self._explain(tree)
elif tree.action == "ANALYZE":
self._analyzing = True
self.connection.default_return_capacity = True
return self._run(tree[1])
else:
raise SyntaxError("Unrecognized action '%s'" % tree.action) | [
"def",
"_run",
"(",
"self",
",",
"tree",
")",
":",
"if",
"tree",
".",
"throttle",
":",
"limiter",
"=",
"self",
".",
"_parse_throttle",
"(",
"tree",
".",
"table",
",",
"tree",
".",
"throttle",
")",
"self",
".",
"_query_rate_limit",
"=",
"limiter",
"del"... | Run a query from a parse tree | [
"Run",
"a",
"query",
"from",
"a",
"parse",
"tree"
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L308-L342 | train | 29,876 |
stevearc/dql | dql/engine.py | Engine._parse_throttle | def _parse_throttle(self, tablename, throttle):
""" Parse a 'throttle' statement and return a RateLimit """
amount = []
desc = self.describe(tablename)
throughputs = [desc.read_throughput, desc.write_throughput]
for value, throughput in zip(throttle[1:], throughputs):
if value == "*":
amount.append(0)
elif value[-1] == "%":
amount.append(throughput * float(value[:-1]) / 100.0)
else:
amount.append(float(value))
cap = Capacity(*amount) # pylint: disable=E1120
return RateLimit(total=cap, callback=self._on_throttle) | python | def _parse_throttle(self, tablename, throttle):
""" Parse a 'throttle' statement and return a RateLimit """
amount = []
desc = self.describe(tablename)
throughputs = [desc.read_throughput, desc.write_throughput]
for value, throughput in zip(throttle[1:], throughputs):
if value == "*":
amount.append(0)
elif value[-1] == "%":
amount.append(throughput * float(value[:-1]) / 100.0)
else:
amount.append(float(value))
cap = Capacity(*amount) # pylint: disable=E1120
return RateLimit(total=cap, callback=self._on_throttle) | [
"def",
"_parse_throttle",
"(",
"self",
",",
"tablename",
",",
"throttle",
")",
":",
"amount",
"=",
"[",
"]",
"desc",
"=",
"self",
".",
"describe",
"(",
"tablename",
")",
"throughputs",
"=",
"[",
"desc",
".",
"read_throughput",
",",
"desc",
".",
"write_th... | Parse a 'throttle' statement and return a RateLimit | [
"Parse",
"a",
"throttle",
"statement",
"and",
"return",
"a",
"RateLimit"
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L344-L357 | train | 29,877 |
stevearc/dql | dql/engine.py | Engine._on_capacity_data | def _on_capacity_data(self, conn, command, kwargs, response, capacity):
""" Log the received consumed capacity data """
if self._analyzing:
self.consumed_capacities.append((command, capacity))
if self._query_rate_limit is not None:
self._query_rate_limit.on_capacity(
conn, command, kwargs, response, capacity
)
elif self.rate_limit is not None:
self.rate_limit.callback = self._on_throttle
self.rate_limit.on_capacity(conn, command, kwargs, response, capacity) | python | def _on_capacity_data(self, conn, command, kwargs, response, capacity):
""" Log the received consumed capacity data """
if self._analyzing:
self.consumed_capacities.append((command, capacity))
if self._query_rate_limit is not None:
self._query_rate_limit.on_capacity(
conn, command, kwargs, response, capacity
)
elif self.rate_limit is not None:
self.rate_limit.callback = self._on_throttle
self.rate_limit.on_capacity(conn, command, kwargs, response, capacity) | [
"def",
"_on_capacity_data",
"(",
"self",
",",
"conn",
",",
"command",
",",
"kwargs",
",",
"response",
",",
"capacity",
")",
":",
"if",
"self",
".",
"_analyzing",
":",
"self",
".",
"consumed_capacities",
".",
"append",
"(",
"(",
"command",
",",
"capacity",
... | Log the received consumed capacity data | [
"Log",
"the",
"received",
"consumed",
"capacity",
"data"
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L359-L369 | train | 29,878 |
stevearc/dql | dql/engine.py | Engine._on_throttle | def _on_throttle(self, conn, command, kwargs, response, capacity, seconds):
""" Print out a message when the query is throttled """
LOG.info(
"Throughput limit exceeded during %s. " "Sleeping for %d second%s",
command,
seconds,
plural(seconds),
) | python | def _on_throttle(self, conn, command, kwargs, response, capacity, seconds):
""" Print out a message when the query is throttled """
LOG.info(
"Throughput limit exceeded during %s. " "Sleeping for %d second%s",
command,
seconds,
plural(seconds),
) | [
"def",
"_on_throttle",
"(",
"self",
",",
"conn",
",",
"command",
",",
"kwargs",
",",
"response",
",",
"capacity",
",",
"seconds",
")",
":",
"LOG",
".",
"info",
"(",
"\"Throughput limit exceeded during %s. \"",
"\"Sleeping for %d second%s\"",
",",
"command",
",",
... | Print out a message when the query is throttled | [
"Print",
"out",
"a",
"message",
"when",
"the",
"query",
"is",
"throttled"
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L371-L378 | train | 29,879 |
stevearc/dql | dql/engine.py | Engine._explain | def _explain(self, tree):
""" Set up the engine to do a dry run of a query """
self._explaining = True
self._call_list = []
old_call = self.connection.call
def fake_call(command, **kwargs):
""" Replacement for connection.call that logs args """
if command == "describe_table":
return old_call(command, **kwargs)
self._call_list.append((command, kwargs))
raise ExplainSignal
self.connection.call = fake_call
try:
ret = self._run(tree[1])
try:
list(ret)
except TypeError:
pass
finally:
self.connection.call = old_call
self._explaining = False | python | def _explain(self, tree):
""" Set up the engine to do a dry run of a query """
self._explaining = True
self._call_list = []
old_call = self.connection.call
def fake_call(command, **kwargs):
""" Replacement for connection.call that logs args """
if command == "describe_table":
return old_call(command, **kwargs)
self._call_list.append((command, kwargs))
raise ExplainSignal
self.connection.call = fake_call
try:
ret = self._run(tree[1])
try:
list(ret)
except TypeError:
pass
finally:
self.connection.call = old_call
self._explaining = False | [
"def",
"_explain",
"(",
"self",
",",
"tree",
")",
":",
"self",
".",
"_explaining",
"=",
"True",
"self",
".",
"_call_list",
"=",
"[",
"]",
"old_call",
"=",
"self",
".",
"connection",
".",
"call",
"def",
"fake_call",
"(",
"command",
",",
"*",
"*",
"kwa... | Set up the engine to do a dry run of a query | [
"Set",
"up",
"the",
"engine",
"to",
"do",
"a",
"dry",
"run",
"of",
"a",
"query"
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L380-L402 | train | 29,880 |
stevearc/dql | dql/engine.py | Engine._iter_where_in | def _iter_where_in(self, tree):
""" Iterate over the KEYS IN and generate primary keys """
desc = self.describe(tree.table, require=True)
for keypair in tree.keys_in:
yield desc.primary_key(*map(resolve, keypair)) | python | def _iter_where_in(self, tree):
""" Iterate over the KEYS IN and generate primary keys """
desc = self.describe(tree.table, require=True)
for keypair in tree.keys_in:
yield desc.primary_key(*map(resolve, keypair)) | [
"def",
"_iter_where_in",
"(",
"self",
",",
"tree",
")",
":",
"desc",
"=",
"self",
".",
"describe",
"(",
"tree",
".",
"table",
",",
"require",
"=",
"True",
")",
"for",
"keypair",
"in",
"tree",
".",
"keys_in",
":",
"yield",
"desc",
".",
"primary_key",
... | Iterate over the KEYS IN and generate primary keys | [
"Iterate",
"over",
"the",
"KEYS",
"IN",
"and",
"generate",
"primary",
"keys"
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L449-L453 | train | 29,881 |
stevearc/dql | dql/engine.py | Engine._query_and_op | def _query_and_op(self, tree, table, method_name, method_kwargs):
""" Query the table and perform an operation on each item """
result = []
if tree.keys_in:
if tree.using:
raise SyntaxError("Cannot use USING with KEYS IN")
keys = self._iter_where_in(tree)
else:
visitor = Visitor(self.reserved_words)
(action, kwargs, _) = self._build_query(table, tree, visitor)
attrs = [visitor.get_field(table.hash_key.name)]
if table.range_key is not None:
attrs.append(visitor.get_field(table.range_key.name))
kwargs["attributes"] = attrs
kwargs["expr_values"] = visitor.expression_values
kwargs["alias"] = visitor.attribute_names
# If there is no 'where' on this update/delete, check with the
# caution_callback before proceeding.
if (
visitor.expression_values is None
and callable(self.caution_callback)
and not self.caution_callback(method_name) # pylint: disable=E1102
):
return False
method = getattr(self.connection, action + "2")
keys = method(table.name, **kwargs)
if self._explaining:
try:
list(keys)
except ExplainSignal:
keys = [{}]
method = getattr(self.connection, method_name + "2")
count = 0
for key in keys:
try:
ret = method(table.name, key, **method_kwargs)
except CheckFailed:
continue
count += 1
if ret:
result.append(ret)
if result:
return result
else:
return count | python | def _query_and_op(self, tree, table, method_name, method_kwargs):
""" Query the table and perform an operation on each item """
result = []
if tree.keys_in:
if tree.using:
raise SyntaxError("Cannot use USING with KEYS IN")
keys = self._iter_where_in(tree)
else:
visitor = Visitor(self.reserved_words)
(action, kwargs, _) = self._build_query(table, tree, visitor)
attrs = [visitor.get_field(table.hash_key.name)]
if table.range_key is not None:
attrs.append(visitor.get_field(table.range_key.name))
kwargs["attributes"] = attrs
kwargs["expr_values"] = visitor.expression_values
kwargs["alias"] = visitor.attribute_names
# If there is no 'where' on this update/delete, check with the
# caution_callback before proceeding.
if (
visitor.expression_values is None
and callable(self.caution_callback)
and not self.caution_callback(method_name) # pylint: disable=E1102
):
return False
method = getattr(self.connection, action + "2")
keys = method(table.name, **kwargs)
if self._explaining:
try:
list(keys)
except ExplainSignal:
keys = [{}]
method = getattr(self.connection, method_name + "2")
count = 0
for key in keys:
try:
ret = method(table.name, key, **method_kwargs)
except CheckFailed:
continue
count += 1
if ret:
result.append(ret)
if result:
return result
else:
return count | [
"def",
"_query_and_op",
"(",
"self",
",",
"tree",
",",
"table",
",",
"method_name",
",",
"method_kwargs",
")",
":",
"result",
"=",
"[",
"]",
"if",
"tree",
".",
"keys_in",
":",
"if",
"tree",
".",
"using",
":",
"raise",
"SyntaxError",
"(",
"\"Cannot use US... | Query the table and perform an operation on each item | [
"Query",
"the",
"table",
"and",
"perform",
"an",
"operation",
"on",
"each",
"item"
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L620-L665 | train | 29,882 |
stevearc/dql | dql/engine.py | Engine._delete | def _delete(self, tree):
""" Run a DELETE statement """
tablename = tree.table
table = self.describe(tablename, require=True)
kwargs = {}
visitor = Visitor(self.reserved_words)
if tree.where:
constraints = ConstraintExpression.from_where(tree.where)
kwargs["condition"] = constraints.build(visitor)
kwargs["expr_values"] = visitor.expression_values
kwargs["alias"] = visitor.attribute_names
return self._query_and_op(tree, table, "delete_item", kwargs) | python | def _delete(self, tree):
""" Run a DELETE statement """
tablename = tree.table
table = self.describe(tablename, require=True)
kwargs = {}
visitor = Visitor(self.reserved_words)
if tree.where:
constraints = ConstraintExpression.from_where(tree.where)
kwargs["condition"] = constraints.build(visitor)
kwargs["expr_values"] = visitor.expression_values
kwargs["alias"] = visitor.attribute_names
return self._query_and_op(tree, table, "delete_item", kwargs) | [
"def",
"_delete",
"(",
"self",
",",
"tree",
")",
":",
"tablename",
"=",
"tree",
".",
"table",
"table",
"=",
"self",
".",
"describe",
"(",
"tablename",
",",
"require",
"=",
"True",
")",
"kwargs",
"=",
"{",
"}",
"visitor",
"=",
"Visitor",
"(",
"self",
... | Run a DELETE statement | [
"Run",
"a",
"DELETE",
"statement"
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L667-L678 | train | 29,883 |
stevearc/dql | dql/engine.py | Engine._update | def _update(self, tree):
""" Run an UPDATE statement """
tablename = tree.table
table = self.describe(tablename, require=True)
kwargs = {}
if tree.returns:
kwargs["returns"] = "_".join(tree.returns)
else:
kwargs["returns"] = "NONE"
visitor = Visitor(self.reserved_words)
updates = UpdateExpression.from_update(tree.update)
kwargs["expression"] = updates.build(visitor)
if tree.where:
constraints = ConstraintExpression.from_where(tree.where)
kwargs["condition"] = constraints.build(visitor)
kwargs["expr_values"] = visitor.expression_values
kwargs["alias"] = visitor.attribute_names
return self._query_and_op(tree, table, "update_item", kwargs) | python | def _update(self, tree):
""" Run an UPDATE statement """
tablename = tree.table
table = self.describe(tablename, require=True)
kwargs = {}
if tree.returns:
kwargs["returns"] = "_".join(tree.returns)
else:
kwargs["returns"] = "NONE"
visitor = Visitor(self.reserved_words)
updates = UpdateExpression.from_update(tree.update)
kwargs["expression"] = updates.build(visitor)
if tree.where:
constraints = ConstraintExpression.from_where(tree.where)
kwargs["condition"] = constraints.build(visitor)
kwargs["expr_values"] = visitor.expression_values
kwargs["alias"] = visitor.attribute_names
return self._query_and_op(tree, table, "update_item", kwargs) | [
"def",
"_update",
"(",
"self",
",",
"tree",
")",
":",
"tablename",
"=",
"tree",
".",
"table",
"table",
"=",
"self",
".",
"describe",
"(",
"tablename",
",",
"require",
"=",
"True",
")",
"kwargs",
"=",
"{",
"}",
"if",
"tree",
".",
"returns",
":",
"kw... | Run an UPDATE statement | [
"Run",
"an",
"UPDATE",
"statement"
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L680-L699 | train | 29,884 |
stevearc/dql | dql/engine.py | Engine._parse_global_index | def _parse_global_index(self, clause, attrs):
""" Parse a global index clause and return a GlobalIndex """
index_type, name = clause[:2]
name = resolve(name)
def get_key(field, data_type=None):
""" Get or set the DynamoKey from the field name """
if field in attrs:
key = attrs[field]
if data_type is not None:
if TYPES[data_type] != key.data_type:
raise SyntaxError(
"Key %r %s already declared with type %s" % field,
data_type,
key.data_type,
)
else:
if data_type is None:
raise SyntaxError("Missing data type for %r" % field)
key = DynamoKey(field, data_type=TYPES[data_type])
attrs[field] = key
return key
g_hash_key = get_key(*clause.hash_key)
g_range_key = None
# For some reason I can't get the throughput section to have a name
# Use an index instead
tp_index = 3
if clause.range_key:
tp_index += 1
g_range_key = get_key(*clause.range_key)
if clause.include_vars:
tp_index += 1
kwargs = {}
if tp_index < len(clause):
throughput = clause[tp_index]
kwargs["throughput"] = Throughput(*map(resolve, throughput))
index_type = clause.index_type[0]
if index_type in ("ALL", "INDEX"):
factory = GlobalIndex.all
elif index_type == "KEYS":
factory = GlobalIndex.keys
elif index_type == "INCLUDE":
factory = GlobalIndex.include
if not clause.include_vars:
raise SyntaxError("Include index %r missing include fields" % name)
kwargs["includes"] = [resolve(v) for v in clause.include_vars]
return factory(name, g_hash_key, g_range_key, **kwargs) | python | def _parse_global_index(self, clause, attrs):
""" Parse a global index clause and return a GlobalIndex """
index_type, name = clause[:2]
name = resolve(name)
def get_key(field, data_type=None):
""" Get or set the DynamoKey from the field name """
if field in attrs:
key = attrs[field]
if data_type is not None:
if TYPES[data_type] != key.data_type:
raise SyntaxError(
"Key %r %s already declared with type %s" % field,
data_type,
key.data_type,
)
else:
if data_type is None:
raise SyntaxError("Missing data type for %r" % field)
key = DynamoKey(field, data_type=TYPES[data_type])
attrs[field] = key
return key
g_hash_key = get_key(*clause.hash_key)
g_range_key = None
# For some reason I can't get the throughput section to have a name
# Use an index instead
tp_index = 3
if clause.range_key:
tp_index += 1
g_range_key = get_key(*clause.range_key)
if clause.include_vars:
tp_index += 1
kwargs = {}
if tp_index < len(clause):
throughput = clause[tp_index]
kwargs["throughput"] = Throughput(*map(resolve, throughput))
index_type = clause.index_type[0]
if index_type in ("ALL", "INDEX"):
factory = GlobalIndex.all
elif index_type == "KEYS":
factory = GlobalIndex.keys
elif index_type == "INCLUDE":
factory = GlobalIndex.include
if not clause.include_vars:
raise SyntaxError("Include index %r missing include fields" % name)
kwargs["includes"] = [resolve(v) for v in clause.include_vars]
return factory(name, g_hash_key, g_range_key, **kwargs) | [
"def",
"_parse_global_index",
"(",
"self",
",",
"clause",
",",
"attrs",
")",
":",
"index_type",
",",
"name",
"=",
"clause",
"[",
":",
"2",
"]",
"name",
"=",
"resolve",
"(",
"name",
")",
"def",
"get_key",
"(",
"field",
",",
"data_type",
"=",
"None",
"... | Parse a global index clause and return a GlobalIndex | [
"Parse",
"a",
"global",
"index",
"clause",
"and",
"return",
"a",
"GlobalIndex"
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L760-L807 | train | 29,885 |
stevearc/dql | dql/engine.py | Engine._insert | def _insert(self, tree):
""" Run an INSERT statement """
tablename = tree.table
count = 0
kwargs = {}
batch = self.connection.batch_write(tablename, **kwargs)
with batch:
for item in iter_insert_items(tree):
batch.put(item)
count += 1
return count | python | def _insert(self, tree):
""" Run an INSERT statement """
tablename = tree.table
count = 0
kwargs = {}
batch = self.connection.batch_write(tablename, **kwargs)
with batch:
for item in iter_insert_items(tree):
batch.put(item)
count += 1
return count | [
"def",
"_insert",
"(",
"self",
",",
"tree",
")",
":",
"tablename",
"=",
"tree",
".",
"table",
"count",
"=",
"0",
"kwargs",
"=",
"{",
"}",
"batch",
"=",
"self",
".",
"connection",
".",
"batch_write",
"(",
"tablename",
",",
"*",
"*",
"kwargs",
")",
"... | Run an INSERT statement | [
"Run",
"an",
"INSERT",
"statement"
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L809-L819 | train | 29,886 |
stevearc/dql | dql/engine.py | Engine._drop | def _drop(self, tree):
""" Run a DROP statement """
tablename = tree.table
kwargs = {}
try:
ret = self.connection.delete_table(tablename, **kwargs)
except DynamoDBError as e:
if e.kwargs["Code"] == "ResourceNotFoundException" and tree.exists:
return False
raise
return True | python | def _drop(self, tree):
""" Run a DROP statement """
tablename = tree.table
kwargs = {}
try:
ret = self.connection.delete_table(tablename, **kwargs)
except DynamoDBError as e:
if e.kwargs["Code"] == "ResourceNotFoundException" and tree.exists:
return False
raise
return True | [
"def",
"_drop",
"(",
"self",
",",
"tree",
")",
":",
"tablename",
"=",
"tree",
".",
"table",
"kwargs",
"=",
"{",
"}",
"try",
":",
"ret",
"=",
"self",
".",
"connection",
".",
"delete_table",
"(",
"tablename",
",",
"*",
"*",
"kwargs",
")",
"except",
"... | Run a DROP statement | [
"Run",
"a",
"DROP",
"statement"
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L821-L831 | train | 29,887 |
stevearc/dql | dql/engine.py | Engine._update_throughput | def _update_throughput(self, tablename, read, write, index):
""" Update the throughput on a table or index """
def get_desc():
""" Get the table or global index description """
desc = self.describe(tablename, refresh=True, require=True)
if index is not None:
return desc.global_indexes[index]
return desc
desc = get_desc()
def num_or_star(value):
""" Convert * to 0, otherwise resolve a number """
return 0 if value == "*" else resolve(value)
read = num_or_star(read)
write = num_or_star(write)
if read <= 0:
read = desc.read_throughput
if write <= 0:
write = desc.write_throughput
throughput = Throughput(read, write)
kwargs = {}
if index:
kwargs["global_indexes"] = {index: throughput}
else:
kwargs["throughput"] = throughput
self.connection.update_table(tablename, **kwargs)
desc = get_desc()
while desc.status == "UPDATING": # pragma: no cover
time.sleep(5)
desc = get_desc() | python | def _update_throughput(self, tablename, read, write, index):
""" Update the throughput on a table or index """
def get_desc():
""" Get the table or global index description """
desc = self.describe(tablename, refresh=True, require=True)
if index is not None:
return desc.global_indexes[index]
return desc
desc = get_desc()
def num_or_star(value):
""" Convert * to 0, otherwise resolve a number """
return 0 if value == "*" else resolve(value)
read = num_or_star(read)
write = num_or_star(write)
if read <= 0:
read = desc.read_throughput
if write <= 0:
write = desc.write_throughput
throughput = Throughput(read, write)
kwargs = {}
if index:
kwargs["global_indexes"] = {index: throughput}
else:
kwargs["throughput"] = throughput
self.connection.update_table(tablename, **kwargs)
desc = get_desc()
while desc.status == "UPDATING": # pragma: no cover
time.sleep(5)
desc = get_desc() | [
"def",
"_update_throughput",
"(",
"self",
",",
"tablename",
",",
"read",
",",
"write",
",",
"index",
")",
":",
"def",
"get_desc",
"(",
")",
":",
"\"\"\" Get the table or global index description \"\"\"",
"desc",
"=",
"self",
".",
"describe",
"(",
"tablename",
",... | Update the throughput on a table or index | [
"Update",
"the",
"throughput",
"on",
"a",
"table",
"or",
"index"
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L833-L866 | train | 29,888 |
stevearc/dql | dql/engine.py | Engine._alter | def _alter(self, tree):
""" Run an ALTER statement """
if tree.throughput:
[read, write] = tree.throughput
index = None
if tree.index:
index = tree.index
self._update_throughput(tree.table, read, write, index)
elif tree.drop_index:
updates = [IndexUpdate.delete(tree.drop_index[0])]
try:
self.connection.update_table(tree.table, index_updates=updates)
except DynamoDBError as e:
if tree.exists and e.kwargs["Code"] == "ResourceNotFoundException":
pass
else:
raise
elif tree.create_index:
# GlobalIndex
attrs = {}
index = self._parse_global_index(tree.create_index, attrs)
updates = [IndexUpdate.create(index)]
try:
self.connection.update_table(tree.table, index_updates=updates)
except DynamoDBError as e:
if (
tree.not_exists
and e.kwargs["Code"] == "ValidationException"
and "already exists" in e.kwargs["Message"]
):
pass
else:
raise
else:
raise SyntaxError("No alter command found") | python | def _alter(self, tree):
""" Run an ALTER statement """
if tree.throughput:
[read, write] = tree.throughput
index = None
if tree.index:
index = tree.index
self._update_throughput(tree.table, read, write, index)
elif tree.drop_index:
updates = [IndexUpdate.delete(tree.drop_index[0])]
try:
self.connection.update_table(tree.table, index_updates=updates)
except DynamoDBError as e:
if tree.exists and e.kwargs["Code"] == "ResourceNotFoundException":
pass
else:
raise
elif tree.create_index:
# GlobalIndex
attrs = {}
index = self._parse_global_index(tree.create_index, attrs)
updates = [IndexUpdate.create(index)]
try:
self.connection.update_table(tree.table, index_updates=updates)
except DynamoDBError as e:
if (
tree.not_exists
and e.kwargs["Code"] == "ValidationException"
and "already exists" in e.kwargs["Message"]
):
pass
else:
raise
else:
raise SyntaxError("No alter command found") | [
"def",
"_alter",
"(",
"self",
",",
"tree",
")",
":",
"if",
"tree",
".",
"throughput",
":",
"[",
"read",
",",
"write",
"]",
"=",
"tree",
".",
"throughput",
"index",
"=",
"None",
"if",
"tree",
".",
"index",
":",
"index",
"=",
"tree",
".",
"index",
... | Run an ALTER statement | [
"Run",
"an",
"ALTER",
"statement"
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L868-L902 | train | 29,889 |
stevearc/dql | dql/engine.py | Engine._dump | def _dump(self, tree):
""" Run a DUMP statement """
schema = []
if tree.tables:
for table in tree.tables:
desc = self.describe(table, refresh=True, require=True)
schema.append(desc.schema)
else:
for table in self.describe_all():
schema.append(table.schema)
return "\n\n".join(schema) | python | def _dump(self, tree):
""" Run a DUMP statement """
schema = []
if tree.tables:
for table in tree.tables:
desc = self.describe(table, refresh=True, require=True)
schema.append(desc.schema)
else:
for table in self.describe_all():
schema.append(table.schema)
return "\n\n".join(schema) | [
"def",
"_dump",
"(",
"self",
",",
"tree",
")",
":",
"schema",
"=",
"[",
"]",
"if",
"tree",
".",
"tables",
":",
"for",
"table",
"in",
"tree",
".",
"tables",
":",
"desc",
"=",
"self",
".",
"describe",
"(",
"table",
",",
"refresh",
"=",
"True",
",",... | Run a DUMP statement | [
"Run",
"a",
"DUMP",
"statement"
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L904-L915 | train | 29,890 |
stevearc/dql | dql/engine.py | Engine._load | def _load(self, tree):
""" Run a LOAD statement """
filename = tree.load_file[0]
if filename[0] in ['"', "'"]:
filename = unwrap(filename)
if not os.path.exists(filename):
raise Exception("No such file %r" % filename)
batch = self.connection.batch_write(tree.table)
count = 0
with batch:
remainder, ext = os.path.splitext(filename)
if ext.lower() in [".gz", ".gzip"]:
ext = os.path.splitext(remainder)[1]
opened = gzip.open(filename, "rb")
else:
opened = open(filename, "r")
with opened as ifile:
if ext.lower() == ".csv":
reader = csv.DictReader(ifile)
for row in reader:
batch.put(row)
count += 1
elif ext.lower() == ".json":
for row in ifile:
batch.put(json.loads(row))
count += 1
else:
try:
while True:
batch.put(pickle.load(ifile))
count += 1
except EOFError:
pass
return count | python | def _load(self, tree):
""" Run a LOAD statement """
filename = tree.load_file[0]
if filename[0] in ['"', "'"]:
filename = unwrap(filename)
if not os.path.exists(filename):
raise Exception("No such file %r" % filename)
batch = self.connection.batch_write(tree.table)
count = 0
with batch:
remainder, ext = os.path.splitext(filename)
if ext.lower() in [".gz", ".gzip"]:
ext = os.path.splitext(remainder)[1]
opened = gzip.open(filename, "rb")
else:
opened = open(filename, "r")
with opened as ifile:
if ext.lower() == ".csv":
reader = csv.DictReader(ifile)
for row in reader:
batch.put(row)
count += 1
elif ext.lower() == ".json":
for row in ifile:
batch.put(json.loads(row))
count += 1
else:
try:
while True:
batch.put(pickle.load(ifile))
count += 1
except EOFError:
pass
return count | [
"def",
"_load",
"(",
"self",
",",
"tree",
")",
":",
"filename",
"=",
"tree",
".",
"load_file",
"[",
"0",
"]",
"if",
"filename",
"[",
"0",
"]",
"in",
"[",
"'\"'",
",",
"\"'\"",
"]",
":",
"filename",
"=",
"unwrap",
"(",
"filename",
")",
"if",
"not"... | Run a LOAD statement | [
"Run",
"a",
"LOAD",
"statement"
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L917-L950 | train | 29,891 |
stevearc/dql | dql/engine.py | FragmentEngine.execute | def execute(self, fragment, pretty_format=True):
"""
Run or aggregate a query fragment
Concat the fragment to any stored fragments. If they form a complete
query, run it and return the result. If not, store them and return
None.
"""
self.fragments = (self.fragments + "\n" + fragment).lstrip()
try:
line_parser.parseString(self.fragments)
except ParseException:
pass
else:
self.last_query = self.fragments.strip()
self.fragments = ""
return super(FragmentEngine, self).execute(self.last_query, pretty_format)
return None | python | def execute(self, fragment, pretty_format=True):
"""
Run or aggregate a query fragment
Concat the fragment to any stored fragments. If they form a complete
query, run it and return the result. If not, store them and return
None.
"""
self.fragments = (self.fragments + "\n" + fragment).lstrip()
try:
line_parser.parseString(self.fragments)
except ParseException:
pass
else:
self.last_query = self.fragments.strip()
self.fragments = ""
return super(FragmentEngine, self).execute(self.last_query, pretty_format)
return None | [
"def",
"execute",
"(",
"self",
",",
"fragment",
",",
"pretty_format",
"=",
"True",
")",
":",
"self",
".",
"fragments",
"=",
"(",
"self",
".",
"fragments",
"+",
"\"\\n\"",
"+",
"fragment",
")",
".",
"lstrip",
"(",
")",
"try",
":",
"line_parser",
".",
... | Run or aggregate a query fragment
Concat the fragment to any stored fragments. If they form a complete
query, run it and return the result. If not, store them and return
None. | [
"Run",
"or",
"aggregate",
"a",
"query",
"fragment"
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L974-L992 | train | 29,892 |
stevearc/dql | dql/engine.py | FragmentEngine.pformat_exc | def pformat_exc(self, exc):
""" Format an exception message for the last query's parse error """
lines = []
try:
pre_nl = self.last_query.rindex("\n", 0, exc.loc) + 1
except ValueError:
pre_nl = 0
try:
post_nl = self.last_query.index("\n", exc.loc)
except ValueError:
post_nl = len(self.last_query)
lines.append(self.last_query[:post_nl])
lines.append(" " * (exc.loc - pre_nl) + "^")
lines.append(str(exc))
return "\n".join(lines) | python | def pformat_exc(self, exc):
""" Format an exception message for the last query's parse error """
lines = []
try:
pre_nl = self.last_query.rindex("\n", 0, exc.loc) + 1
except ValueError:
pre_nl = 0
try:
post_nl = self.last_query.index("\n", exc.loc)
except ValueError:
post_nl = len(self.last_query)
lines.append(self.last_query[:post_nl])
lines.append(" " * (exc.loc - pre_nl) + "^")
lines.append(str(exc))
return "\n".join(lines) | [
"def",
"pformat_exc",
"(",
"self",
",",
"exc",
")",
":",
"lines",
"=",
"[",
"]",
"try",
":",
"pre_nl",
"=",
"self",
".",
"last_query",
".",
"rindex",
"(",
"\"\\n\"",
",",
"0",
",",
"exc",
".",
"loc",
")",
"+",
"1",
"except",
"ValueError",
":",
"p... | Format an exception message for the last query's parse error | [
"Format",
"an",
"exception",
"message",
"for",
"the",
"last",
"query",
"s",
"parse",
"error"
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L994-L1008 | train | 29,893 |
stevearc/dql | dql/expressions/update.py | UpdateExpression.from_update | def from_update(cls, update):
""" Factory for creating an Update expression """
expressions = []
if update.set_expr:
expressions.append(UpdateSetMany.from_clause(update.set_expr))
if update.remove_expr:
expressions.append(UpdateRemove.from_clause(update.remove_expr))
if update.add_expr:
expressions.append(UpdateAdd.from_clause(update.add_expr))
if update.delete_expr:
expressions.append(UpdateDelete.from_clause(update.delete_expr))
return cls(expressions) | python | def from_update(cls, update):
""" Factory for creating an Update expression """
expressions = []
if update.set_expr:
expressions.append(UpdateSetMany.from_clause(update.set_expr))
if update.remove_expr:
expressions.append(UpdateRemove.from_clause(update.remove_expr))
if update.add_expr:
expressions.append(UpdateAdd.from_clause(update.add_expr))
if update.delete_expr:
expressions.append(UpdateDelete.from_clause(update.delete_expr))
return cls(expressions) | [
"def",
"from_update",
"(",
"cls",
",",
"update",
")",
":",
"expressions",
"=",
"[",
"]",
"if",
"update",
".",
"set_expr",
":",
"expressions",
".",
"append",
"(",
"UpdateSetMany",
".",
"from_clause",
"(",
"update",
".",
"set_expr",
")",
")",
"if",
"update... | Factory for creating an Update expression | [
"Factory",
"for",
"creating",
"an",
"Update",
"expression"
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/expressions/update.py#L31-L42 | train | 29,894 |
stevearc/dql | dql/models.py | TableField.to_index | def to_index(self, index_type, index_name, includes=None):
""" Create an index field from this field """
return IndexField(self.name, self.data_type, index_type, index_name, includes) | python | def to_index(self, index_type, index_name, includes=None):
""" Create an index field from this field """
return IndexField(self.name, self.data_type, index_type, index_name, includes) | [
"def",
"to_index",
"(",
"self",
",",
"index_type",
",",
"index_name",
",",
"includes",
"=",
"None",
")",
":",
"return",
"IndexField",
"(",
"self",
".",
"name",
",",
"self",
".",
"data_type",
",",
"index_type",
",",
"index_name",
",",
"includes",
")"
] | Create an index field from this field | [
"Create",
"an",
"index",
"field",
"from",
"this",
"field"
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/models.py#L122-L124 | train | 29,895 |
stevearc/dql | dql/models.py | GlobalIndex.from_description | def from_description(cls, description, attrs):
""" Create an object from a dynamo3 response """
hash_key = None
range_key = None
index_type = description["Projection"]["ProjectionType"]
includes = description["Projection"].get("NonKeyAttributes")
for data in description["KeySchema"]:
name = data["AttributeName"]
if name not in attrs:
continue
key_type = data["KeyType"]
if key_type == "HASH":
hash_key = TableField(name, attrs[name].data_type, key_type)
elif key_type == "RANGE":
range_key = TableField(name, attrs[name].data_type, key_type)
throughput = description["ProvisionedThroughput"]
return cls(
description["IndexName"],
index_type,
description["IndexStatus"],
hash_key,
range_key,
throughput["ReadCapacityUnits"],
throughput["WriteCapacityUnits"],
description.get("IndexSizeBytes", 0),
includes,
description,
) | python | def from_description(cls, description, attrs):
""" Create an object from a dynamo3 response """
hash_key = None
range_key = None
index_type = description["Projection"]["ProjectionType"]
includes = description["Projection"].get("NonKeyAttributes")
for data in description["KeySchema"]:
name = data["AttributeName"]
if name not in attrs:
continue
key_type = data["KeyType"]
if key_type == "HASH":
hash_key = TableField(name, attrs[name].data_type, key_type)
elif key_type == "RANGE":
range_key = TableField(name, attrs[name].data_type, key_type)
throughput = description["ProvisionedThroughput"]
return cls(
description["IndexName"],
index_type,
description["IndexStatus"],
hash_key,
range_key,
throughput["ReadCapacityUnits"],
throughput["WriteCapacityUnits"],
description.get("IndexSizeBytes", 0),
includes,
description,
) | [
"def",
"from_description",
"(",
"cls",
",",
"description",
",",
"attrs",
")",
":",
"hash_key",
"=",
"None",
"range_key",
"=",
"None",
"index_type",
"=",
"description",
"[",
"\"Projection\"",
"]",
"[",
"\"ProjectionType\"",
"]",
"includes",
"=",
"description",
... | Create an object from a dynamo3 response | [
"Create",
"an",
"object",
"from",
"a",
"dynamo3",
"response"
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/models.py#L235-L262 | train | 29,896 |
stevearc/dql | dql/models.py | GlobalIndex.pformat | def pformat(self, consumed_capacity=None):
""" Pretty format for insertion into table pformat """
consumed_capacity = consumed_capacity or {}
lines = []
parts = ["GLOBAL", self.index_type, "INDEX", self.name]
if self.status != "ACTIVE":
parts.insert(0, "[%s]" % self.status)
lines.append(" ".join(parts))
lines.append(" items: {0:,} ({1:,} bytes)".format(self.item_count, self.size))
read = "Read: " + format_throughput(
self.read_throughput, consumed_capacity.get("read")
)
write = "Write: " + format_throughput(
self.write_throughput, consumed_capacity.get("write")
)
lines.append(" " + read + " " + write)
lines.append(" " + self.hash_key.schema)
if self.range_key is not None:
lines.append(" " + self.range_key.schema)
if self.includes is not None:
keys = "[%s]" % ", ".join(("'%s'" % i for i in self.includes))
lines.append(" Projection: %s" % keys)
return "\n".join(lines) | python | def pformat(self, consumed_capacity=None):
""" Pretty format for insertion into table pformat """
consumed_capacity = consumed_capacity or {}
lines = []
parts = ["GLOBAL", self.index_type, "INDEX", self.name]
if self.status != "ACTIVE":
parts.insert(0, "[%s]" % self.status)
lines.append(" ".join(parts))
lines.append(" items: {0:,} ({1:,} bytes)".format(self.item_count, self.size))
read = "Read: " + format_throughput(
self.read_throughput, consumed_capacity.get("read")
)
write = "Write: " + format_throughput(
self.write_throughput, consumed_capacity.get("write")
)
lines.append(" " + read + " " + write)
lines.append(" " + self.hash_key.schema)
if self.range_key is not None:
lines.append(" " + self.range_key.schema)
if self.includes is not None:
keys = "[%s]" % ", ".join(("'%s'" % i for i in self.includes))
lines.append(" Projection: %s" % keys)
return "\n".join(lines) | [
"def",
"pformat",
"(",
"self",
",",
"consumed_capacity",
"=",
"None",
")",
":",
"consumed_capacity",
"=",
"consumed_capacity",
"or",
"{",
"}",
"lines",
"=",
"[",
"]",
"parts",
"=",
"[",
"\"GLOBAL\"",
",",
"self",
".",
"index_type",
",",
"\"INDEX\"",
",",
... | Pretty format for insertion into table pformat | [
"Pretty",
"format",
"for",
"insertion",
"into",
"table",
"pformat"
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/models.py#L282-L305 | train | 29,897 |
stevearc/dql | dql/models.py | GlobalIndex.schema | def schema(self):
""" The DQL fragment for constructing this index """
if self.status == "DELETING":
return ""
parts = ["GLOBAL", self.index_type, "INDEX"]
parts.append("('%s', %s," % (self.name, self.hash_key.name))
if self.range_key:
parts.append("%s," % self.range_key.name)
if self.includes:
parts.append("[%s]," % ", ".join(("'%s'" % i for i in self.includes)))
parts.append(
"THROUGHPUT (%d, %d))" % (self.read_throughput, self.write_throughput)
)
return " ".join(parts) | python | def schema(self):
""" The DQL fragment for constructing this index """
if self.status == "DELETING":
return ""
parts = ["GLOBAL", self.index_type, "INDEX"]
parts.append("('%s', %s," % (self.name, self.hash_key.name))
if self.range_key:
parts.append("%s," % self.range_key.name)
if self.includes:
parts.append("[%s]," % ", ".join(("'%s'" % i for i in self.includes)))
parts.append(
"THROUGHPUT (%d, %d))" % (self.read_throughput, self.write_throughput)
)
return " ".join(parts) | [
"def",
"schema",
"(",
"self",
")",
":",
"if",
"self",
".",
"status",
"==",
"\"DELETING\"",
":",
"return",
"\"\"",
"parts",
"=",
"[",
"\"GLOBAL\"",
",",
"self",
".",
"index_type",
",",
"\"INDEX\"",
"]",
"parts",
".",
"append",
"(",
"\"('%s', %s,\"",
"%",
... | The DQL fragment for constructing this index | [
"The",
"DQL",
"fragment",
"for",
"constructing",
"this",
"index"
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/models.py#L308-L322 | train | 29,898 |
stevearc/dql | dql/models.py | TableMeta.get_matching_indexes | def get_matching_indexes(self, possible_hash, possible_range):
"""
Get all indexes that could be queried on using a set of keys.
If any indexes match both hash AND range keys, indexes that only match
the hash key will be excluded from the result.
Parameters
----------
possible_hash : set
The names of fields that could be used as the hash key
possible_range : set
The names of fields that could be used as the range key
"""
matches = [
index
for index in self.iter_query_indexes()
if index.hash_key in possible_hash
]
range_matches = [
index for index in matches if index.range_key in possible_range
]
if range_matches:
return range_matches
return matches | python | def get_matching_indexes(self, possible_hash, possible_range):
"""
Get all indexes that could be queried on using a set of keys.
If any indexes match both hash AND range keys, indexes that only match
the hash key will be excluded from the result.
Parameters
----------
possible_hash : set
The names of fields that could be used as the hash key
possible_range : set
The names of fields that could be used as the range key
"""
matches = [
index
for index in self.iter_query_indexes()
if index.hash_key in possible_hash
]
range_matches = [
index for index in matches if index.range_key in possible_range
]
if range_matches:
return range_matches
return matches | [
"def",
"get_matching_indexes",
"(",
"self",
",",
"possible_hash",
",",
"possible_range",
")",
":",
"matches",
"=",
"[",
"index",
"for",
"index",
"in",
"self",
".",
"iter_query_indexes",
"(",
")",
"if",
"index",
".",
"hash_key",
"in",
"possible_hash",
"]",
"r... | Get all indexes that could be queried on using a set of keys.
If any indexes match both hash AND range keys, indexes that only match
the hash key will be excluded from the result.
Parameters
----------
possible_hash : set
The names of fields that could be used as the hash key
possible_range : set
The names of fields that could be used as the range key | [
"Get",
"all",
"indexes",
"that",
"could",
"be",
"queried",
"on",
"using",
"a",
"set",
"of",
"keys",
"."
] | e9d3aa22873076dae5ebd02e35318aa996b1e56a | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/models.py#L409-L434 | train | 29,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.