_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q273100 | _build_row | test | def _build_row(cells, padding, begin, sep, end):
"Return a string which represents a row of data cells."
pad = " " * padding
padded_cells = [pad + cell + pad for cell in cells]
# SolveBio: we're only displaying Key-Value tuples (dimension of 2).
# enforce that we don't wrap lines by setting a max
# limit on row width which is equal to TTY_COLS (see printing)
rendered_cells = (begin + sep.join(padded_cells) + end).rstrip()
if len(rendered_cells) > TTY_COLS:
if not cells[-1].endswith(" ") and not cells[-1].endswith("-"):
terminating_str = " ... "
else:
terminating_str = ""
rendered_cells = "{0}{1}{2}".format(
rendered_cells[:TTY_COLS - len(terminating_str) - 1],
terminating_str, end)
return rendered_cells | python | {
"resource": ""
} |
q273101 | _build_line | test | def _build_line(colwidths, padding, begin, fill, sep, end):
"Return a string which represents a horizontal line."
cells = [fill * (w + 2 * padding) for w in colwidths]
return _build_row(cells, 0, begin, sep, end) | python | {
"resource": ""
} |
q273102 | _mediawiki_cell_attrs | test | def _mediawiki_cell_attrs(row, colaligns):
"Prefix every cell in a row with an HTML alignment attribute."
alignment = {"left": '',
"right": 'align="right"| ',
"center": 'align="center"| ',
"decimal": 'align="right"| '}
row2 = [alignment[a] + c for c, a in zip(row, colaligns)]
return row2 | python | {
"resource": ""
} |
q273103 | _format_table | test | def _format_table(fmt, headers, rows, colwidths, colaligns):
"""Produce a plain-text representation of the table."""
lines = []
hidden = fmt.with_header_hide if headers else fmt.without_header_hide
pad = fmt.padding
headerrow = fmt.headerrow if fmt.headerrow else fmt.datarow
if fmt.lineabove and "lineabove" not in hidden:
lines.append(_build_line(colwidths, pad, *fmt.lineabove))
if headers:
lines.append(_build_row(headers, pad, *headerrow))
if fmt.linebelowheader and "linebelowheader" not in hidden:
begin, fill, sep, end = fmt.linebelowheader
if fmt.usecolons:
segs = [
_line_segment_with_colons(fmt.linebelowheader, a, w + 2 * pad)
for w, a in zip(colwidths, colaligns)]
lines.append(_build_row(segs, 0, begin, sep, end))
else:
lines.append(_build_line(colwidths, pad, *fmt.linebelowheader))
if rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
# initial rows with a line below
for row in rows[:-1]:
lines.append(_build_row(row, pad, *fmt.datarow))
lines.append(_build_line(colwidths, pad, *fmt.linebetweenrows))
# the last row without a line below
lines.append(_build_row(rows[-1], pad, *fmt.datarow))
else:
for row in rows:
lines.append(_build_row(row, pad, *fmt.datarow))
if fmt.linebelow and "linebelow" not in hidden:
lines.append(_build_line(colwidths, pad, *fmt.linebelow))
return "\n".join(lines) | python | {
"resource": ""
} |
q273104 | Dataset.migrate | test | def migrate(self, target, follow=True, **kwargs):
"""
Migrate the data from this dataset to a target dataset.
Valid optional kwargs include:
* source_params
* target_fields
* include_errors
* commit_mode
"""
if 'id' not in self or not self['id']:
raise Exception(
'No source dataset ID found. '
'Please instantiate the Dataset '
'object with an ID.')
# Target can be provided as a Dataset, or as an ID.
if isinstance(target, Dataset):
target_id = target.id
else:
target_id = target
migration = DatasetMigration.create(
source_id=self['id'],
target_id=target_id,
**kwargs)
if follow:
migration.follow()
return migration | python | {
"resource": ""
} |
q273105 | Object.validate_full_path | test | def validate_full_path(cls, full_path, **kwargs):
"""Helper method to parse a full or partial path and
return a full path as well as a dict containing path parts.
Uses the following rules when processing the path:
* If no domain, uses the current user's account domain
* If no vault, uses the current user's personal vault.
* If no path, uses '/' (vault root)
Returns a tuple containing:
* The validated full_path
* A dictionary with the components:
* domain: the domain of the vault
* vault: the name of the vault, without domain
* vault_full_path: domain:vault
* path: the object path within the vault
* parent_path: the parent path to the object
* filename: the object's filename (if any)
* full_path: the validated full path
The following components may be overridden using kwargs:
* vault
* path
Object paths (also known as "paths") must begin with a forward slash.
The following path formats are supported:
domain:vault:/path -> object "path" in the root of "domain:vault"
domain:vault/path -> object "path" in the root of "domain:vault"
vault:/path -> object "path" in the root of "vault"
vault/path -> object "path" in the root of "vault"
~/path -> object "path" in the root of personal vault
vault/ -> root of "vault"
~/ -> root of your personal vault
The following two formats are not supported:
path -> invalid/ambiguous path (exception)
vault:path -> invalid/ambiguous path (exception)
vault:path/path -> unsupported, interpreted as domain:vault/path
"""
from solvebio.resource.vault import Vault
_client = kwargs.pop('client', None) or cls._client or client
if not full_path:
raise Exception(
'Invalid path: ',
'Full path must be in one of the following formats: '
'"vault:/path", "domain:vault:/path", or "~/path"')
# Parse the vault's full_path, using overrides if any
input_vault = kwargs.get('vault') or full_path
try:
vault_full_path, path_dict = \
Vault.validate_full_path(input_vault, client=_client)
except Exception as err:
raise Exception('Could not determine vault from "{0}": {1}'
.format(input_vault, err))
if kwargs.get('path'):
# Allow override of the object_path.
full_path = '{0}:/{1}'.format(vault_full_path, kwargs['path'])
match = cls.PATH_RE.match(full_path)
if match:
object_path = match.groupdict()['path']
else:
raise Exception(
'Cannot find a valid object path in "{0}". '
'Full path must be in one of the following formats: '
'"vault:/path", "domain:vault:/path", or "~/path"'
.format(full_path))
# Remove double slashes
object_path = re.sub('//+', '/', object_path)
if object_path != '/':
# Remove trailing slash
object_path = object_path.rstrip('/')
path_dict['path'] = object_path
# TODO: parent_path and filename
full_path = '{domain}:{vault}:{path}'.format(**path_dict)
path_dict['full_path'] = full_path
return full_path, path_dict | python | {
"resource": ""
} |
q273106 | upload | test | def upload(args):
"""
Given a folder or file, upload all the folders and files contained
within it, skipping ones that already exist on the remote.
"""
base_remote_path, path_dict = Object.validate_full_path(
args.full_path, vault=args.vault, path=args.path)
# Assert the vault exists and is accessible
vault = Vault.get_by_full_path(path_dict['vault_full_path'])
# If not the vault root, validate remote path exists and is a folder
if path_dict['path'] != '/':
Object.get_by_full_path(base_remote_path, assert_type='folder')
for local_path in args.local_path:
local_path = local_path.rstrip('/')
local_start = os.path.basename(local_path)
if os.path.isdir(local_path):
_upload_folder(path_dict['domain'], vault,
base_remote_path, local_path, local_start)
else:
Object.upload_file(local_path, path_dict['path'],
vault.full_path) | python | {
"resource": ""
} |
q273107 | Vault.validate_full_path | test | def validate_full_path(cls, full_path, **kwargs):
"""Helper method to return a full path from a full or partial path.
If no domain, assumes user's account domain
If the vault is "~", assumes personal vault.
Valid vault paths include:
domain:vault
domain:vault:/path
domain:vault/path
vault:/path
vault
~/
Invalid vault paths include:
/vault/
/path
/
:/
Does not allow overrides for any vault path components.
"""
_client = kwargs.pop('client', None) or cls._client or client
full_path = full_path.strip()
if not full_path:
raise Exception(
'Vault path "{0}" is invalid. Path must be in the format: '
'"domain:vault:/path" or "vault:/path".'.format(full_path)
)
match = cls.VAULT_PATH_RE.match(full_path)
if not match:
raise Exception(
'Vault path "{0}" is invalid. Path must be in the format: '
'"domain:vault:/path" or "vault:/path".'.format(full_path)
)
path_parts = match.groupdict()
# Handle the special case where "~" means personal vault
if path_parts.get('vault') == '~':
path_parts = dict(domain=None, vault=None)
# If any values are None, set defaults from the user.
if None in path_parts.values():
user = _client.get('/v1/user', {})
defaults = {
'domain': user['account']['domain'],
'vault': 'user-{0}'.format(user['id'])
}
path_parts = dict((k, v or defaults.get(k))
for k, v in path_parts.items())
# Rebuild the full path
full_path = '{domain}:{vault}'.format(**path_parts)
path_parts['vault_full_path'] = full_path
return full_path, path_parts | python | {
"resource": ""
} |
q273108 | validate_api_host_url | test | def validate_api_host_url(url):
"""
Validate SolveBio API host url.
Valid urls must not be empty and
must contain either HTTP or HTTPS scheme.
"""
if not url:
raise SolveError('No SolveBio API host is set')
parsed = urlparse(url)
if parsed.scheme not in ['http', 'https']:
raise SolveError(
'Invalid API host: %s. '
'Missing url scheme (HTTP or HTTPS).' % url
)
elif not parsed.netloc:
raise SolveError('Invalid API host: %s.' % url)
return True | python | {
"resource": ""
} |
q273109 | Manifest.add | test | def add(self, *args):
"""
Add one or more files or URLs to the manifest.
If files contains a glob, it is expanded.
All files are uploaded to SolveBio. The Upload
object is used to fill the manifest.
"""
def _is_url(path):
p = urlparse(path)
return bool(p.scheme)
for path in args:
path = os.path.expanduser(path)
if _is_url(path):
self.add_url(path)
elif os.path.isfile(path):
self.add_file(path)
elif os.path.isdir(path):
for f in os.listdir(path):
self.add_file(f)
elif glob.glob(path):
for f in glob.glob(path):
self.add_file(f)
else:
raise ValueError(
'Path: "{0}" is not a valid format or does not exist. '
'Manifest paths must be files, directories, or URLs.'
.format(path)
) | python | {
"resource": ""
} |
q273110 | Annotator.annotate | test | def annotate(self, records, **kwargs):
"""Annotate a set of records with stored fields.
Args:
records: A list or iterator (can be a Query object)
chunk_size: The number of records to annotate at once (max 500).
Returns:
A generator that yields one annotated record at a time.
"""
# Update annotator_params with any kwargs
self.annotator_params.update(**kwargs)
chunk_size = self.annotator_params.get('chunk_size', self.CHUNK_SIZE)
chunk = []
for i, record in enumerate(records):
chunk.append(record)
if (i + 1) % chunk_size == 0:
for r in self._execute(chunk):
yield r
chunk = []
if chunk:
for r in self._execute(chunk):
yield r
chunk = [] | python | {
"resource": ""
} |
q273111 | Expression.evaluate | test | def evaluate(self, data=None, data_type='string', is_list=False):
"""Evaluates the expression with the provided context and format."""
payload = {
'data': data,
'expression': self.expr,
'data_type': data_type,
'is_list': is_list
}
res = self._client.post('/v1/evaluate', payload)
return res['result'] | python | {
"resource": ""
} |
q273112 | TabularOutputFormatter.format_name | test | def format_name(self, format_name):
"""Set the default format name.
:param str format_name: The display format name.
:raises ValueError: if the format is not recognized.
"""
if format_name in self.supported_formats:
self._format_name = format_name
else:
raise ValueError('unrecognized format_name "{}"'.format(
format_name)) | python | {
"resource": ""
} |
q273113 | TabularOutputFormatter.register_new_formatter | test | def register_new_formatter(cls, format_name, handler, preprocessors=(),
kwargs=None):
"""Register a new output formatter.
:param str format_name: The name of the format.
:param callable handler: The function that formats the data.
:param tuple preprocessors: The preprocessors to call before
formatting.
:param dict kwargs: Keys/values for keyword argument defaults.
"""
cls._output_formats[format_name] = OutputFormatHandler(
format_name, preprocessors, handler, kwargs or {}) | python | {
"resource": ""
} |
q273114 | TabularOutputFormatter.format_output | test | def format_output(self, data, headers, format_name=None,
preprocessors=(), column_types=None, **kwargs):
"""Format the headers and data using a specific formatter.
*format_name* must be a supported formatter (see
:attr:`supported_formats`).
:param iterable data: An :term:`iterable` (e.g. list) of rows.
:param iterable headers: The column headers.
:param str format_name: The display format to use (optional, if the
:class:`TabularOutputFormatter` object has a default format set).
:param tuple preprocessors: Additional preprocessors to call before
any formatter preprocessors.
:param \*\*kwargs: Optional arguments for the formatter.
:return: The formatted data.
:rtype: str
:raises ValueError: If the *format_name* is not recognized.
"""
format_name = format_name or self._format_name
if format_name not in self.supported_formats:
raise ValueError('unrecognized format "{}"'.format(format_name))
(_, _preprocessors, formatter,
fkwargs) = self._output_formats[format_name]
fkwargs.update(kwargs)
if column_types is None:
data = list(data)
column_types = self._get_column_types(data)
for f in unique_items(preprocessors + _preprocessors):
data, headers = f(data, headers, column_types=column_types,
**fkwargs)
return formatter(list(data), headers, column_types=column_types, **fkwargs) | python | {
"resource": ""
} |
q273115 | adapter | test | def adapter(data, headers, table_format=None, preserve_whitespace=False,
**kwargs):
"""Wrap tabulate inside a function for TabularOutputFormatter."""
keys = ('floatfmt', 'numalign', 'stralign', 'showindex', 'disable_numparse')
tkwargs = {'tablefmt': table_format}
tkwargs.update(filter_dict_by_key(kwargs, keys))
if table_format in supported_markup_formats:
tkwargs.update(numalign=None, stralign=None)
tabulate.PRESERVE_WHITESPACE = preserve_whitespace
return iter(tabulate.tabulate(data, headers, **tkwargs).split('\n')) | python | {
"resource": ""
} |
q273116 | get_user_config_dir | test | def get_user_config_dir(app_name, app_author, roaming=True, force_xdg=True):
"""Returns the config folder for the application. The default behavior
is to return whatever is most appropriate for the operating system.
For an example application called ``"My App"`` by ``"Acme"``,
something like the following folders could be returned:
macOS (non-XDG):
``~/Library/Application Support/My App``
Mac OS X (XDG):
``~/.config/my-app``
Unix:
``~/.config/my-app``
Windows 7 (roaming):
``C:\\Users\<user>\AppData\Roaming\Acme\My App``
Windows 7 (not roaming):
``C:\\Users\<user>\AppData\Local\Acme\My App``
:param app_name: the application name. This should be properly capitalized
and can contain whitespace.
:param app_author: The app author's name (or company). This should be
properly capitalized and can contain whitespace.
:param roaming: controls if the folder should be roaming or not on Windows.
Has no effect on non-Windows systems.
:param force_xdg: if this is set to `True`, then on macOS the XDG Base
Directory Specification will be followed. Has no effect
on non-macOS systems.
"""
if WIN:
key = 'APPDATA' if roaming else 'LOCALAPPDATA'
folder = os.path.expanduser(os.environ.get(key, '~'))
return os.path.join(folder, app_author, app_name)
if MAC and not force_xdg:
return os.path.join(os.path.expanduser(
'~/Library/Application Support'), app_name)
return os.path.join(
os.path.expanduser(os.environ.get('XDG_CONFIG_HOME', '~/.config')),
_pathify(app_name)) | python | {
"resource": ""
} |
q273117 | get_system_config_dirs | test | def get_system_config_dirs(app_name, app_author, force_xdg=True):
r"""Returns a list of system-wide config folders for the application.
For an example application called ``"My App"`` by ``"Acme"``,
something like the following folders could be returned:
macOS (non-XDG):
``['/Library/Application Support/My App']``
Mac OS X (XDG):
``['/etc/xdg/my-app']``
Unix:
``['/etc/xdg/my-app']``
Windows 7:
``['C:\ProgramData\Acme\My App']``
:param app_name: the application name. This should be properly capitalized
and can contain whitespace.
:param app_author: The app author's name (or company). This should be
properly capitalized and can contain whitespace.
:param force_xdg: if this is set to `True`, then on macOS the XDG Base
Directory Specification will be followed. Has no effect
on non-macOS systems.
"""
if WIN:
folder = os.environ.get('PROGRAMDATA')
return [os.path.join(folder, app_author, app_name)]
if MAC and not force_xdg:
return [os.path.join('/Library/Application Support', app_name)]
dirs = os.environ.get('XDG_CONFIG_DIRS', '/etc/xdg')
paths = [os.path.expanduser(x) for x in dirs.split(os.pathsep)]
return [os.path.join(d, _pathify(app_name)) for d in paths] | python | {
"resource": ""
} |
q273118 | Config.read_default_config | test | def read_default_config(self):
"""Read the default config file.
:raises DefaultConfigValidationError: There was a validation error with
the *default* file.
"""
if self.validate:
self.default_config = ConfigObj(configspec=self.default_file,
list_values=False, _inspec=True,
encoding='utf8')
valid = self.default_config.validate(Validator(), copy=True,
preserve_errors=True)
if valid is not True:
for name, section in valid.items():
if section is True:
continue
for key, value in section.items():
if isinstance(value, ValidateError):
raise DefaultConfigValidationError(
'section [{}], key "{}": {}'.format(
name, key, value))
elif self.default_file:
self.default_config, _ = self.read_config_file(self.default_file)
self.update(self.default_config) | python | {
"resource": ""
} |
q273119 | Config.read | test | def read(self):
"""Read the default, additional, system, and user config files.
:raises DefaultConfigValidationError: There was a validation error with
the *default* file.
"""
if self.default_file:
self.read_default_config()
return self.read_config_files(self.all_config_files()) | python | {
"resource": ""
} |
q273120 | Config.user_config_file | test | def user_config_file(self):
"""Get the absolute path to the user config file."""
return os.path.join(
get_user_config_dir(self.app_name, self.app_author),
self.filename) | python | {
"resource": ""
} |
q273121 | Config.system_config_files | test | def system_config_files(self):
"""Get a list of absolute paths to the system config files."""
return [os.path.join(f, self.filename) for f in get_system_config_dirs(
self.app_name, self.app_author)] | python | {
"resource": ""
} |
q273122 | Config.additional_files | test | def additional_files(self):
"""Get a list of absolute paths to the additional config files."""
return [os.path.join(f, self.filename) for f in self.additional_dirs] | python | {
"resource": ""
} |
q273123 | Config.write_default_config | test | def write_default_config(self, overwrite=False):
"""Write the default config to the user's config file.
:param bool overwrite: Write over an existing config if it exists.
"""
destination = self.user_config_file()
if not overwrite and os.path.exists(destination):
return
with io.open(destination, mode='wb') as f:
self.default_config.write(f) | python | {
"resource": ""
} |
q273124 | Config.read_config_files | test | def read_config_files(self, files):
"""Read a list of config files.
:param iterable files: An iterable (e.g. list) of files to read.
"""
errors = {}
for _file in files:
config, valid = self.read_config_file(_file)
self.update(config)
if valid is not True:
errors[_file] = valid
return errors or True | python | {
"resource": ""
} |
q273125 | truncate_string | test | def truncate_string(value, max_width=None):
"""Truncate string values."""
if isinstance(value, text_type) and max_width is not None and len(value) > max_width:
return value[:max_width]
return value | python | {
"resource": ""
} |
q273126 | replace | test | def replace(s, replace):
"""Replace multiple values in a string"""
for r in replace:
s = s.replace(*r)
return s | python | {
"resource": ""
} |
q273127 | BaseCommand.call_in_sequence | test | def call_in_sequence(self, cmds, shell=True):
"""Run multiple commmands in a row, exiting if one fails."""
for cmd in cmds:
if subprocess.call(cmd, shell=shell) == 1:
sys.exit(1) | python | {
"resource": ""
} |
q273128 | BaseCommand.apply_options | test | def apply_options(self, cmd, options=()):
"""Apply command-line options."""
for option in (self.default_cmd_options + options):
cmd = self.apply_option(cmd, option,
active=getattr(self, option, False))
return cmd | python | {
"resource": ""
} |
q273129 | BaseCommand.apply_option | test | def apply_option(self, cmd, option, active=True):
"""Apply a command-line option."""
return re.sub(r'{{{}\:(?P<option>[^}}]*)}}'.format(option),
'\g<option>' if active else '', cmd) | python | {
"resource": ""
} |
q273130 | lint.initialize_options | test | def initialize_options(self):
"""Set the default options."""
self.branch = 'master'
self.fix = False
super(lint, self).initialize_options() | python | {
"resource": ""
} |
q273131 | lint.run | test | def run(self):
"""Run the linter."""
cmd = 'pep8radius {branch} {{fix: --in-place}}{{verbose: -vv}}'
cmd = cmd.format(branch=self.branch)
self.call_and_exit(self.apply_options(cmd, ('fix', ))) | python | {
"resource": ""
} |
q273132 | docs.run | test | def run(self):
"""Generate and view the documentation."""
cmds = (self.clean_docs_cmd, self.html_docs_cmd, self.view_docs_cmd)
self.call_in_sequence(cmds) | python | {
"resource": ""
} |
q273133 | truncate_string | test | def truncate_string(data, headers, max_field_width=None, **_):
"""Truncate very long strings. Only needed for tabular
representation, because trying to tabulate very long data
is problematic in terms of performance, and does not make any
sense visually.
:param iterable data: An :term:`iterable` (e.g. list) of rows.
:param iterable headers: The column headers.
:param int max_field_width: Width to truncate field for display
:return: The processed data and headers.
:rtype: tuple
"""
return (([utils.truncate_string(v, max_field_width) for v in row] for row in data),
[utils.truncate_string(h, max_field_width) for h in headers]) | python | {
"resource": ""
} |
q273134 | format_numbers | test | def format_numbers(data, headers, column_types=(), integer_format=None,
float_format=None, **_):
"""Format numbers according to a format specification.
This uses Python's format specification to format numbers of the following
types: :class:`int`, :class:`py2:long` (Python 2), :class:`float`, and
:class:`~decimal.Decimal`. See the :ref:`python:formatspec` for more
information about the format strings.
.. NOTE::
A column is only formatted if all of its values are the same type
(except for :data:`None`).
:param iterable data: An :term:`iterable` (e.g. list) of rows.
:param iterable headers: The column headers.
:param iterable column_types: The columns' type objects (e.g. int or float).
:param str integer_format: The format string to use for integer columns.
:param str float_format: The format string to use for float columns.
:return: The processed data and headers.
:rtype: tuple
"""
if (integer_format is None and float_format is None) or not column_types:
return iter(data), headers
def _format_number(field, column_type):
if integer_format and column_type is int and type(field) in int_types:
return format(field, integer_format)
elif float_format and column_type is float and type(field) in float_types:
return format(field, float_format)
return field
data = ([_format_number(v, column_types[i]) for i, v in enumerate(row)] for row in data)
return data, headers | python | {
"resource": ""
} |
q273135 | _format_row | test | def _format_row(headers, row):
"""Format a row."""
formatted_row = [' | '.join(field) for field in zip(headers, row)]
return '\n'.join(formatted_row) | python | {
"resource": ""
} |
q273136 | adapter | test | def adapter(data, headers, **kwargs):
"""Wrap vertical table in a function for TabularOutputFormatter."""
keys = ('sep_title', 'sep_character', 'sep_length')
return vertical_table(data, headers, **filter_dict_by_key(kwargs, keys)) | python | {
"resource": ""
} |
q273137 | adapter | test | def adapter(data, headers, table_format=None, **kwargs):
"""Wrap terminaltables inside a function for TabularOutputFormatter."""
keys = ('title', )
table = table_format_handler[table_format]
t = table([headers] + list(data), **filter_dict_by_key(kwargs, keys))
dimensions = terminaltables.width_and_alignment.max_dimensions(
t.table_data,
t.padding_left,
t.padding_right)[:3]
for r in t.gen_table(*dimensions):
yield u''.join(r) | python | {
"resource": ""
} |
q273138 | render_template | test | def render_template(template_file, dst_file, **kwargs):
"""Copy template and substitute template strings
File `template_file` is copied to `dst_file`. Then, each template variable
is replaced by a value. Template variables are of the form
{{val}}
Example:
Contents of template_file:
VAR1={{val1}}
VAR2={{val2}}
VAR3={{val3}}
render_template(template_file, output_file, val1="hello", val2="world")
Contents of output_file:
VAR1=hello
VAR2=world
VAR3={{val3}}
:param template_file: Path to the template file.
:param dst_file: Path to the destination file.
:param kwargs: Keys correspond to template variables.
:return:
"""
with open(template_file) as f:
template_text = f.read()
dst_text = template_text
for key, value in kwargs.iteritems():
dst_text = dst_text .replace("{{" + key + "}}", value)
with open(dst_file, "wt") as f:
f.write(dst_text) | python | {
"resource": ""
} |
q273139 | Session.isNum | test | def isNum(self, type):
"""
is the type a numerical value?
:param type: PKCS#11 type like `CKA_CERTIFICATE_TYPE`
:rtype: bool
"""
if type in (CKA_CERTIFICATE_TYPE,
CKA_CLASS,
CKA_KEY_GEN_MECHANISM,
CKA_KEY_TYPE,
CKA_MODULUS_BITS,
CKA_VALUE_BITS,
CKA_VALUE_LEN):
return True
return False | python | {
"resource": ""
} |
q273140 | Session.isBool | test | def isBool(self, type):
"""
is the type a boolean value?
:param type: PKCS#11 type like `CKA_ALWAYS_SENSITIVE`
:rtype: bool
"""
if type in (CKA_ALWAYS_SENSITIVE,
CKA_DECRYPT,
CKA_DERIVE,
CKA_ENCRYPT,
CKA_EXTRACTABLE,
CKA_HAS_RESET,
CKA_LOCAL,
CKA_MODIFIABLE,
CKA_NEVER_EXTRACTABLE,
CKA_PRIVATE,
CKA_RESET_ON_INIT,
CKA_SECONDARY_AUTH,
CKA_SENSITIVE,
CKA_SIGN,
CKA_SIGN_RECOVER,
CKA_TOKEN,
CKA_TRUSTED,
CKA_UNWRAP,
CKA_VERIFY,
CKA_VERIFY_RECOVER,
CKA_WRAP,
CKA_WRAP_WITH_TRUSTED):
return True
return False | python | {
"resource": ""
} |
q273141 | Session.isBin | test | def isBin(self, type):
"""
is the type a byte array value?
:param type: PKCS#11 type like `CKA_MODULUS`
:rtype: bool
"""
return (not self.isBool(type)) \
and (not self.isString(type)) \
and (not self.isNum(type)) | python | {
"resource": ""
} |
q273142 | Session.generateKey | test | def generateKey(self, template, mecha=MechanismAESGENERATEKEY):
"""
generate a secret key
:param template: template for the secret key
:param mecha: mechanism to use
:return: handle of the generated key
:rtype: PyKCS11.LowLevel.CK_OBJECT_HANDLE
"""
t = self._template2ckattrlist(template)
ck_handle = PyKCS11.LowLevel.CK_OBJECT_HANDLE()
m = mecha.to_native()
rv = self.lib.C_GenerateKey(self.session, m, t, ck_handle)
if rv != CKR_OK:
raise PyKCS11Error(rv)
return ck_handle | python | {
"resource": ""
} |
q273143 | Session.generateKeyPair | test | def generateKeyPair(self, templatePub, templatePriv,
mecha=MechanismRSAGENERATEKEYPAIR):
"""
generate a key pair
:param templatePub: template for the public key
:param templatePriv: template for the private key
:param mecha: mechanism to use
:return: a tuple of handles (pub, priv)
:rtype: tuple
"""
tPub = self._template2ckattrlist(templatePub)
tPriv = self._template2ckattrlist(templatePriv)
ck_pub_handle = PyKCS11.LowLevel.CK_OBJECT_HANDLE()
ck_prv_handle = PyKCS11.LowLevel.CK_OBJECT_HANDLE()
m = mecha.to_native()
rv = self.lib.C_GenerateKeyPair(self.session, m, tPub, tPriv,
ck_pub_handle, ck_prv_handle)
if rv != CKR_OK:
raise PyKCS11Error(rv)
return ck_pub_handle, ck_prv_handle | python | {
"resource": ""
} |
q273144 | Session.findObjects | test | def findObjects(self, template=()):
"""
find the objects matching the template pattern
:param template: list of attributes tuples (attribute,value).
The default value is () and all the objects are returned
:type template: list
:return: a list of object ids
:rtype: list
"""
t = self._template2ckattrlist(template)
# we search for 10 objects by default. speed/memory tradeoff
result = PyKCS11.LowLevel.ckobjlist(10)
rv = self.lib.C_FindObjectsInit(self.session, t)
if rv != CKR_OK:
raise PyKCS11Error(rv)
res = []
while True:
rv = self.lib.C_FindObjects(self.session, result)
if rv != CKR_OK:
raise PyKCS11Error(rv)
for x in result:
# make a copy of the handle: the original value get
# corrupted (!!)
a = CK_OBJECT_HANDLE(self)
a.assign(x.value())
res.append(a)
if len(result) == 0:
break
rv = self.lib.C_FindObjectsFinal(self.session)
if rv != CKR_OK:
raise PyKCS11Error(rv)
return res | python | {
"resource": ""
} |
q273145 | QRcode._insert_img | test | def _insert_img(qr_img, icon_img=None, factor=4, icon_box=None, static_dir=None):
"""Inserts a small icon to QR Code image"""
img_w, img_h = qr_img.size
size_w = int(img_w) / int(factor)
size_h = int(img_h) / int(factor)
try:
# load icon from current dir
icon_fp = os.path.join(icon_img)
if static_dir:
# load icon from app's static dir
icon_fp = os.path.join(static_dir, icon_img)
if icon_img.split("://")[0] in ["http", "https", "ftp"]:
icon_fp = BytesIO(urlopen(icon_img).read()) # download icon
icon = Image.open(icon_fp)
except:
return qr_img
icon_w, icon_h = icon.size
icon_w = size_w if icon_w > size_w else icon_w
icon_h = size_h if icon_h > size_h else icon_h
icon = icon.resize((int(icon_w), int(icon_h)), Image.ANTIALIAS)
icon = icon.convert("RGBA")
left = int((img_w - icon_w) / 2)
top = int((img_h - icon_h) / 2)
icon_box = (int(icon_box[0]), int(icon_box[1])) if icon_box else (left, top)
qr_img.paste(im=icon, box=icon_box, mask=icon)
return qr_img | python | {
"resource": ""
} |
q273146 | panel | test | def panel(context, panel, build, bed, version):
"""Export gene panels to .bed like format.
Specify any number of panels on the command line
"""
LOG.info("Running scout export panel")
adapter = context.obj['adapter']
# Save all chromosomes found in the collection if panels
chromosomes_found = set()
if not panel:
LOG.warning("Please provide at least one gene panel")
context.abort()
LOG.info("Exporting panels: {}".format(', '.join(panel)))
if bed:
if version:
version = [version]
lines = export_panels(
adapter=adapter,
panels=panel,
versions=version,
build=build,
)
else:
lines = export_gene_panels(
adapter=adapter,
panels=panel,
version=version,
)
for line in lines:
click.echo(line) | python | {
"resource": ""
} |
q273147 | _first_weekday | test | def _first_weekday(weekday, d):
"""
Given a weekday and a date, will increment the date until it's
weekday matches that of the given weekday, then that date is returned.
"""
while weekday != d.weekday():
d += timedelta(days=1)
return d | python | {
"resource": ""
} |
q273148 | Repeater.repeat | test | def repeat(self, day=None):
"""
Add 'num' to the day and count that day until we reach end_repeat, or
until we're outside of the current month, counting the days
as we go along.
"""
if day is None:
day = self.day
try:
d = date(self.year, self.month, day)
except ValueError: # out of range day
return self.count
if self.count_first and d <= self.end_repeat:
self.count_it(d.day)
d += timedelta(days=self.num)
if self.end_on is not None:
while d.month == self.month and \
d <= self.end_repeat and \
d.day <= self.end_on:
self.count_it(d.day)
d += timedelta(days=self.num)
else:
while d.month == self.month and d <= self.end_repeat:
self.count_it(d.day)
d += timedelta(days=self.num) | python | {
"resource": ""
} |
q273149 | Repeater.repeat_reverse | test | def repeat_reverse(self, start, end):
"""
Starts from 'start' day and counts backwards until 'end' day.
'start' should be >= 'end'. If it's equal to, does nothing.
If a day falls outside of end_repeat, it won't be counted.
"""
day = start
diff = start - end
try:
if date(self.year, self.month, day) <= self.end_repeat:
self.count_it(day)
# a value error likely means the event runs past the end of the month,
# like an event that ends on the 31st, but the month doesn't have that
# many days. Ignore it b/c the dates won't be added to calendar anyway
except ValueError:
pass
for i in xrange(diff):
day -= 1
try:
if date(self.year, self.month, day) <= self.end_repeat:
self.count_it(day)
except ValueError:
pass | python | {
"resource": ""
} |
q273150 | WeeklyRepeater._biweekly_helper | test | def _biweekly_helper(self):
"""Created to take some of the load off of _handle_weekly_repeat_out"""
self.num = 14
mycount = self.repeat_biweekly()
if mycount:
if self.event.is_chunk() and min(mycount) not in xrange(1, 8):
mycount = _chunk_fill_out_first_week(
self.year, self.month, mycount, self.event,
diff=self.event.start_end_diff,
)
for k, v in mycount.items():
for item in v:
self.count[k].append(item) | python | {
"resource": ""
} |
q273151 | CountHandler._handle_single_chunk | test | def _handle_single_chunk(self, event):
"""
This handles either a non-repeating event chunk, or the first
month of a repeating event chunk.
"""
if not event.starts_same_month_as(self.month) and not \
event.repeats('NEVER'):
# no repeating chunk events if we're not in it's start month
return
# add the events into an empty defaultdict. This is better than passing
# in self.count, which we don't want to make another copy of because it
# could be very large.
mycount = defaultdict(list)
r = Repeater(
mycount, self.year, self.month, day=event.l_start_date.day,
end_repeat=event.end_repeat, event=event, count_first=True,
end_on=event.l_end_date.day, num=1
)
if event.starts_same_month_as(self.month):
if not event.ends_same_month_as(self.month):
# The chunk event starts this month,
# but does NOT end this month
r.end_on = None
else:
# event chunks can be maximum of 7 days, so if an event chunk
# didn't start this month, we know it will end this month.
r.day = 1
r.repeat()
# now we add in the events we generated to self.count
for k, v in r.count.items():
self.count[k].extend(v) | python | {
"resource": ""
} |
q273152 | export_variants | test | def export_variants(adapter, collaborator, document_id=None, case_id=None):
"""Export causative variants for a collaborator
Args:
adapter(MongoAdapter)
collaborator(str)
document_id(str): Search for a specific variant
case_id(str): Search causative variants for a case
Yields:
variant_obj(scout.Models.Variant): Variants marked as causative ordered by position.
"""
# Store the variants in a list for sorting
variants = []
if document_id:
yield adapter.variant(document_id)
return
variant_ids = adapter.get_causatives(
institute_id=collaborator,
case_id=case_id
)
##TODO add check so that same variant is not included more than once
for document_id in variant_ids:
variant_obj = adapter.variant(document_id)
chrom = variant_obj['chromosome']
# Convert chromosome to integer for sorting
chrom_int = CHROMOSOME_INTEGERS.get(chrom)
if not chrom_int:
LOG.info("Unknown chromosome %s", chrom)
continue
# Add chromosome and position to prepare for sorting
variants.append((chrom_int, variant_obj['position'], variant_obj))
# Sort varants based on position
variants.sort(key=lambda x: (x[0], x[1]))
for variant in variants:
variant_obj = variant[2]
yield variant_obj | python | {
"resource": ""
} |
q273153 | export_verified_variants | test | def export_verified_variants(aggregate_variants, unique_callers):
"""Create the lines for an excel file with verified variants for
an institute
Args:
aggregate_variants(list): a list of variants with aggregates case data
unique_callers(set): a unique list of available callers
Returns:
document_lines(list): list of lines to include in the document
"""
document_lines = []
for variant in aggregate_variants:
# get genotype and allele depth for each sample
samples = []
for sample in variant['samples']:
line = [] # line elements corespond to contants.variants_export.VERIFIED_VARIANTS_HEADER
line.append(variant['institute'])
line.append(variant['_id']) # variant database ID
line.append(variant['category'])
line.append(variant['variant_type'])
line.append(variant['display_name'][:30]) # variant display name
# Build local link to variant:
case_name = variant['case_obj']['display_name'] # case display name
local_link = '/'.join([ '', variant['institute'], case_name, variant['_id'] ])
line.append(local_link)
line.append(variant.get('validation'))
line.append(case_name)
case_individual = next(ind for ind in variant['case_obj']['individuals'] if ind['individual_id'] == sample['sample_id'])
if case_individual['phenotype'] == 2:
line.append(' '.join([sample.get('display_name'),'(A)'])) # label sample as affected
else:
line.append(sample.get('display_name'))
line.append(''.join(['chr',variant['chromosome'],':',str(variant['position'])])) # position
line.append('>'.join([variant.get('reference')[:10],variant.get('alternative')[:10]])) # change
genes = []
prot_effect = []
funct_anno = []
for gene in variant.get('genes'): # this will be a unique long field in the document
genes.append(gene.get('hgnc_symbol',''))
funct_anno.append(gene.get('functional_annotation'))
for transcript in gene.get('transcripts'):
if transcript.get('is_canonical') and transcript.get('protein_sequence_name'):
prot_effect.append(urllib.parse.unquote(transcript.get('protein_sequence_name')))
line.append(','.join(prot_effect))
line.append(','.join(funct_anno))
line.append(','.join(genes))
line.append(variant.get('rank_score'))
line.append(variant.get('cadd_score'))
line.append(sample.get('genotype_call'))
line.append(sample['allele_depths'][0])
line.append(sample['allele_depths'][1])
line.append(sample['genotype_quality'])
# Set callers values. One cell per caller, leave blank if not applicable
for caller in unique_callers:
if variant.get(caller):
line.append(variant.get(caller))
else:
line.append('-')
document_lines.append(line)
return document_lines | python | {
"resource": ""
} |
q273154 | export_mt_variants | test | def export_mt_variants(variants, sample_id):
"""Export mitochondrial variants for a case to create a MT excel report
Args:
variants(list): all MT variants for a case, sorted by position
sample_id(str) : the id of a sample within the case
Returns:
document_lines(list): list of lines to include in the document
"""
document_lines = []
for variant in variants:
line = []
position = variant.get('position')
change = '>'.join([variant.get('reference'),variant.get('alternative')])
line.append(position)
line.append(change)
line.append(str(position)+change)
genes = []
prot_effect = []
for gene in variant.get('genes'):
genes.append(gene.get('hgnc_symbol',''))
for transcript in gene.get('transcripts'):
if transcript.get('is_canonical') and transcript.get('protein_sequence_name'):
prot_effect.append(urllib.parse.unquote(transcript.get('protein_sequence_name')))
line.append(','.join(prot_effect))
line.append(','.join(genes))
ref_ad = ''
alt_ad = ''
for sample in variant['samples']:
if sample.get('sample_id') == sample_id:
ref_ad = sample['allele_depths'][0]
alt_ad = sample['allele_depths'][1]
line.append(ref_ad)
line.append(alt_ad)
document_lines.append(line)
return document_lines | python | {
"resource": ""
} |
q273155 | user | test | def user(context, user_id, update_role, add_institute, remove_admin, remove_institute):
"""
Update a user in the database
"""
adapter = context.obj['adapter']
user_obj = adapter.user(user_id)
if not user_obj:
LOG.warning("User %s could not be found", user_id)
context.abort()
existing_roles = set(user_obj.get('roles',[]))
if update_role:
if not update_role in user_obj['roles']:
existing_roles = set(user_obj['roles'])
existing_roles.add(update_role)
LOG.info("Adding role %s to user", update_role)
else:
LOG.warning("User already have role %s", update_role)
if remove_admin:
try:
existing_roles.remove('admin')
LOG.info("Removing admin rights from user %s", user_id)
except KeyError as err:
LOG.info("User %s does not have admin rights", user_id)
user_obj['roles'] = list(existing_roles)
existing_institutes = set(user_obj.get('institutes',[]))
for institute_id in add_institute:
institute_obj = adapter.institute(institute_id)
if not institute_obj:
LOG.warning("Institute %s could not be found", institute_id)
else:
existing_institutes.add(institute_id)
LOG.info("Adding institute %s to user", institute_id)
for institute_id in remove_institute:
try:
existing_institutes.remove(institute_id)
LOG.info("Removing institute %s from user", institute_id)
except KeyError as err:
LOG.info("User does not have access to institute %s", institute_id)
user_obj['institutes'] = list(existing_institutes)
updated_user = adapter.update_user(user_obj) | python | {
"resource": ""
} |
q273156 | str_variants | test | def str_variants(institute_id, case_name):
"""Display a list of STR variants."""
page = int(request.args.get('page', 1))
variant_type = request.args.get('variant_type', 'clinical')
form = StrFiltersForm(request.args)
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
query = form.data
query['variant_type'] = variant_type
variants_query = store.variants(case_obj['_id'], category='str',
query=query)
data = controllers.str_variants(store, institute_obj, case_obj,
variants_query, page)
return dict(institute=institute_obj, case=case_obj,
variant_type = variant_type, form=form, page=page, **data) | python | {
"resource": ""
} |
q273157 | sv_variant | test | def sv_variant(institute_id, case_name, variant_id):
"""Display a specific structural variant."""
data = controllers.sv_variant(store, institute_id, case_name, variant_id)
return data | python | {
"resource": ""
} |
q273158 | str_variant | test | def str_variant(institute_id, case_name, variant_id):
"""Display a specific STR variant."""
data = controllers.str_variant(store, institute_id, case_name, variant_id)
return data | python | {
"resource": ""
} |
q273159 | verify | test | def verify(institute_id, case_name, variant_id, variant_category, order):
"""Start procedure to validate variant using other techniques."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
variant_obj = store.variant(variant_id)
user_obj = store.user(current_user.email)
comment = request.form.get('verification_comment')
try:
controllers.variant_verification(store=store, mail=mail, institute_obj=institute_obj, case_obj=case_obj, user_obj=user_obj, comment=comment,
variant_obj=variant_obj, sender=current_app.config['MAIL_USERNAME'], variant_url=request.referrer, order=order, url_builder=url_for)
except controllers.MissingVerificationRecipientError:
flash('No verification recipients added to institute.', 'danger')
return redirect(request.referrer) | python | {
"resource": ""
} |
q273160 | clinvar | test | def clinvar(institute_id, case_name, variant_id):
"""Build a clinVar submission form for a variant."""
data = controllers.clinvar_export(store, institute_id, case_name, variant_id)
if request.method == 'GET':
return data
else: #POST
form_dict = request.form.to_dict()
submission_objects = set_submission_objects(form_dict) # A tuple of submission objects (variants and casedata objects)
# Add submission data to an open clinvar submission object,
# or create a new if no open submission is found in database
open_submission = store.get_open_clinvar_submission(current_user.email, institute_id)
updated_submission = store.add_to_submission(open_submission['_id'], submission_objects)
# Redirect to clinvar submissions handling page, and pass it the updated_submission_object
return redirect(url_for('cases.clinvar_submissions', institute_id=institute_id)) | python | {
"resource": ""
} |
q273161 | cancer_variants | test | def cancer_variants(institute_id, case_name):
"""Show cancer variants overview."""
data = controllers.cancer_variants(store, request.args, institute_id, case_name)
return data | python | {
"resource": ""
} |
q273162 | variant_acmg | test | def variant_acmg(institute_id, case_name, variant_id):
"""ACMG classification form."""
if request.method == 'GET':
data = controllers.variant_acmg(store, institute_id, case_name, variant_id)
return data
else:
criteria = []
criteria_terms = request.form.getlist('criteria')
for term in criteria_terms:
criteria.append(dict(
term=term,
comment=request.form.get("comment-{}".format(term)),
links=[request.form.get("link-{}".format(term))],
))
acmg = controllers.variant_acmg_post(store, institute_id, case_name, variant_id,
current_user.email, criteria)
flash("classified as: {}".format(acmg), 'info')
return redirect(url_for('.variant', institute_id=institute_id, case_name=case_name,
variant_id=variant_id)) | python | {
"resource": ""
} |
q273163 | evaluation | test | def evaluation(evaluation_id):
"""Show or delete an ACMG evaluation."""
evaluation_obj = store.get_evaluation(evaluation_id)
controllers.evaluation(store, evaluation_obj)
if request.method == 'POST':
link = url_for('.variant', institute_id=evaluation_obj['institute']['_id'],
case_name=evaluation_obj['case']['display_name'],
variant_id=evaluation_obj['variant_specific'])
store.delete_evaluation(evaluation_obj)
return redirect(link)
return dict(evaluation=evaluation_obj, institute=evaluation_obj['institute'],
case=evaluation_obj['case'], variant=evaluation_obj['variant'],
CRITERIA=ACMG_CRITERIA) | python | {
"resource": ""
} |
q273164 | acmg | test | def acmg():
"""Calculate an ACMG classification from submitted criteria."""
criteria = request.args.getlist('criterion')
classification = get_acmg(criteria)
return jsonify(dict(classification=classification)) | python | {
"resource": ""
} |
q273165 | upload_panel | test | def upload_panel(institute_id, case_name):
"""Parse gene panel file and fill in HGNC symbols for filter."""
file = form.symbol_file.data
if file.filename == '':
flash('No selected file', 'warning')
return redirect(request.referrer)
try:
stream = io.StringIO(file.stream.read().decode('utf-8'), newline=None)
except UnicodeDecodeError as error:
flash("Only text files are supported!", 'warning')
return redirect(request.referrer)
category = request.args.get('category')
if(category == 'sv'):
form = SvFiltersForm(request.args)
else:
form = FiltersForm(request.args)
hgnc_symbols = set(form.hgnc_symbols.data)
new_hgnc_symbols = controllers.upload_panel(store, institute_id, case_name, stream)
hgnc_symbols.update(new_hgnc_symbols)
form.hgnc_symbols.data = ','.join(hgnc_symbols)
# reset gene panels
form.gene_panels.data = ''
# HTTP redirect code 307 asks that the browser preserves the method of request (POST).
if(category == 'sv'):
return redirect(url_for('.sv_variants', institute_id=institute_id, case_name=case_name,
**form.data), code=307)
else:
return redirect(url_for('.variants', institute_id=institute_id, case_name=case_name,
**form.data), code=307) | python | {
"resource": ""
} |
q273166 | download_verified | test | def download_verified():
"""Download all verified variants for user's cases"""
user_obj = store.user(current_user.email)
user_institutes = user_obj.get('institutes')
temp_excel_dir = os.path.join(variants_bp.static_folder, 'verified_folder')
os.makedirs(temp_excel_dir, exist_ok=True)
written_files = controllers.verified_excel_file(store, user_institutes, temp_excel_dir)
if written_files:
today = datetime.datetime.now().strftime('%Y-%m-%d')
# zip the files on the fly and serve the archive to the user
data = io.BytesIO()
with zipfile.ZipFile(data, mode='w') as z:
for f_name in pathlib.Path(temp_excel_dir).iterdir():
zipfile.ZipFile
z.write(f_name, os.path.basename(f_name))
data.seek(0)
# remove temp folder with excel files in it
shutil.rmtree(temp_excel_dir)
return send_file(
data,
mimetype='application/zip',
as_attachment=True,
attachment_filename='_'.join(['scout', 'verified_variants', today])+'.zip'
)
else:
flash("No verified variants could be exported for user's institutes", 'warning')
return redirect(request.referrer) | python | {
"resource": ""
} |
q273167 | genes_by_alias | test | def genes_by_alias(hgnc_genes):
"""Return a dictionary with hgnc symbols as keys
Value of the dictionaries are information about the hgnc ids for a symbol.
If the symbol is primary for a gene then 'true_id' will exist.
A list of hgnc ids that the symbol points to is in ids.
Args:
hgnc_genes(dict): a dictionary with hgnc_id as key and gene info as value
Returns:
alias_genes(dict):
{
'hgnc_symbol':{
'true_id': int,
'ids': list(int)
}
}
"""
alias_genes = {}
for hgnc_id in hgnc_genes:
gene = hgnc_genes[hgnc_id]
# This is the primary symbol:
hgnc_symbol = gene['hgnc_symbol']
for alias in gene['previous_symbols']:
true_id = None
if alias == hgnc_symbol:
true_id = hgnc_id
if alias in alias_genes:
alias_genes[alias.upper()]['ids'].add(hgnc_id)
if true_id:
alias_genes[alias.upper()]['true_id'] = hgnc_id
else:
alias_genes[alias.upper()] = {
'true': true_id,
'ids': set([hgnc_id])
}
return alias_genes | python | {
"resource": ""
} |
q273168 | add_incomplete_penetrance | test | def add_incomplete_penetrance(genes, alias_genes, hpo_lines):
"""Add information of incomplete penetrance"""
LOG.info("Add incomplete penetrance info")
for hgnc_symbol in get_incomplete_penetrance_genes(hpo_lines):
for hgnc_id in get_correct_ids(hgnc_symbol, alias_genes):
genes[hgnc_id]['incomplete_penetrance'] = True | python | {
"resource": ""
} |
q273169 | link_genes | test | def link_genes(ensembl_lines, hgnc_lines, exac_lines, mim2gene_lines,
genemap_lines, hpo_lines):
"""Gather information from different sources and return a gene dict
Extract information collected from a number of sources and combine them
into a gene dict with HGNC symbols as keys.
hgnc_id works as the primary symbol and it is from this source we gather
as much information as possible (hgnc_complete_set.txt)
Coordinates are gathered from ensemble and the entries are linked from hgnc
to ensembl via ENSGID.
From exac the gene intolerance scores are collected, genes are linked to hgnc
via hgnc symbol. This is a unstable symbol since they often change.
Args:
ensembl_lines(iterable(str)): Strings with ensembl gene information
hgnc_lines(iterable(str)): Strings with hgnc gene information
exac_lines(iterable(str)): Strings with exac PLi score info
mim2gene_lines(iterable(str))
genemap_lines(iterable(str))
hpo_lines(iterable(str)): Strings with hpo gene information
Yields:
gene(dict): A dictionary with gene information
"""
genes = {}
LOG.info("Linking genes")
# HGNC genes are the main source, these define the gene dataset to use
# Try to use as much information as possible from hgnc
for hgnc_gene in parse_hgnc_genes(hgnc_lines):
hgnc_id = hgnc_gene['hgnc_id']
genes[hgnc_id] = hgnc_gene
add_ensembl_info(genes, ensembl_lines)
symbol_to_id = genes_by_alias(genes)
add_exac_info(genes, symbol_to_id, exac_lines)
add_omim_info(genes, symbol_to_id, genemap_lines, mim2gene_lines)
add_incomplete_penetrance(genes, symbol_to_id, hpo_lines)
return genes | python | {
"resource": ""
} |
q273170 | matchmaker_request | test | def matchmaker_request(url, token, method, content_type=None, accept=None, data=None):
"""Send a request to MatchMaker and return its response
Args:
url(str): url to send request to
token(str): MME server authorization token
method(str): 'GET', 'POST' or 'DELETE'
content_type(str): MME request Content-Type
accept(str): accepted response
data(dict): eventual data to send in request
Returns:
json_response(dict): server response
"""
headers = Headers()
headers = { 'X-Auth-Token': token}
if content_type:
headers['Content-Type'] = content_type
if accept:
headers['Accept'] = accept
#sending data anyway so response will not be cached
req_data = data or {'timestamp' : datetime.datetime.now().timestamp()}
json_response = None
try:
LOG.info('Sending {} request to MME url {}. Data sent: {}'.format(
method, url, req_data))
resp = requests.request(
method = method,
url = url,
headers = headers,
data = json.dumps(req_data)
)
json_response = resp.json()
LOG.info('MME server response was:{}'.format(json_response))
if isinstance(json_response, str):
json_response = {
'message' : json_response,
}
elif isinstance(json_response, list): #asking for connected nodes
return json_response
json_response['status_code'] = resp.status_code
except Exception as err:
LOG.info('An error occurred while sending HTTP request to server ({})'.format(err))
json_response = {
'message' : str(err)
}
return json_response | python | {
"resource": ""
} |
q273171 | mme_nodes | test | def mme_nodes(mme_base_url, token):
"""Return the available MatchMaker nodes
Args:
mme_base_url(str): base URL of MME service
token(str): MME server authorization token
Returns:
nodes(list): a list of node disctionaries
"""
nodes = []
if not mme_base_url or not token:
return nodes
url = ''.join([mme_base_url, '/nodes'])
nodes = matchmaker_request(url=url, token=token, method='GET')
LOG.info('Matchmaker has the following connected nodes:{}'.format(nodes))
return nodes | python | {
"resource": ""
} |
q273172 | get_cytoband_coordinates | test | def get_cytoband_coordinates(chrom, pos):
"""Get the cytoband coordinate for a position
Args:
chrom(str)
pos(int)
Returns:
coordinate(str)
"""
coordinate = ""
if chrom in CYTOBANDS:
for interval in CYTOBANDS[chrom][pos]:
coordinate = interval.data
return coordinate | python | {
"resource": ""
} |
q273173 | get_sub_category | test | def get_sub_category(alt_len, ref_len, category, svtype=None):
"""Get the subcategory for a VCF variant
The sub categories are:
'snv', 'indel', 'del', 'ins', 'dup', 'bnd', 'inv'
Args:
alt_len(int)
ref_len(int)
category(str)
svtype(str)
Returns:
subcategory(str)
"""
subcategory = ''
if category in ('snv', 'indel', 'cancer'):
if ref_len == alt_len:
subcategory = 'snv'
else:
subcategory = 'indel'
elif category == 'sv':
subcategory = svtype
return subcategory | python | {
"resource": ""
} |
q273174 | get_length | test | def get_length(alt_len, ref_len, category, pos, end, svtype=None, svlen=None):
"""Return the length of a variant
Args:
alt_len(int)
ref_len(int)
category(str)
svtype(str)
svlen(int)
"""
# -1 would indicate uncertain length
length = -1
if category in ('snv', 'indel', 'cancer'):
if ref_len == alt_len:
length = alt_len
else:
length = abs(ref_len - alt_len)
elif category == 'sv':
if svtype == 'bnd':
length = int(10e10)
else:
if svlen:
length = abs(int(svlen))
# Some software does not give a length but they give END
elif end:
if end != pos:
length = end - pos
return length | python | {
"resource": ""
} |
q273175 | get_end | test | def get_end(pos, alt, category, snvend=None, svend=None, svlen=None):
"""Return the end coordinate for a variant
Args:
pos(int)
alt(str)
category(str)
snvend(str)
svend(int)
svlen(int)
Returns:
end(int)
"""
# If nothing is known we set end to be same as start
end = pos
# If variant is snv or indel we know that cyvcf2 can handle end pos
if category in ('snv', 'indel', 'cancer'):
end = snvend
# With SVs we have to be a bit more careful
elif category == 'sv':
# The END field from INFO usually works fine
end = svend
# For some cases like insertions the callers set end to same as pos
# In those cases we can hope that there is a svlen...
if svend == pos:
if svlen:
end = pos + svlen
# If variant is 'BND' they have ':' in alt field
# Information about other end is in the alt field
if ':' in alt:
match = BND_ALT_PATTERN.match(alt)
if match:
end = int(match.group(2))
return end | python | {
"resource": ""
} |
q273176 | parse_coordinates | test | def parse_coordinates(variant, category):
"""Find out the coordinates for a variant
Args:
variant(cyvcf2.Variant)
Returns:
coordinates(dict): A dictionary on the form:
{
'position':<int>,
'end':<int>,
'end_chrom':<str>,
'length':<int>,
'sub_category':<str>,
'mate_id':<str>,
'cytoband_start':<str>,
'cytoband_end':<str>,
}
"""
ref = variant.REF
if variant.ALT:
alt = variant.ALT[0]
if category=="str" and not variant.ALT:
alt = '.'
chrom_match = CHR_PATTERN.match(variant.CHROM)
chrom = chrom_match.group(2)
svtype = variant.INFO.get('SVTYPE')
if svtype:
svtype = svtype.lower()
mate_id = variant.INFO.get('MATEID')
svlen = variant.INFO.get('SVLEN')
svend = variant.INFO.get('END')
snvend = int(variant.end)
position = int(variant.POS)
ref_len = len(ref)
alt_len = len(alt)
sub_category = get_sub_category(alt_len, ref_len, category, svtype)
end = get_end(position, alt, category, snvend, svend)
length = get_length(alt_len, ref_len, category, position, end, svtype, svlen)
end_chrom = chrom
if sub_category == 'bnd':
if ':' in alt:
match = BND_ALT_PATTERN.match(alt)
# BND will often be translocations between different chromosomes
if match:
other_chrom = match.group(1)
match = CHR_PATTERN.match(other_chrom)
end_chrom = match.group(2)
cytoband_start = get_cytoband_coordinates(chrom, position)
cytoband_end = get_cytoband_coordinates(end_chrom, end)
coordinates = {
'position': position,
'end': end,
'length': length,
'sub_category': sub_category,
'mate_id': mate_id,
'cytoband_start': cytoband_start,
'cytoband_end': cytoband_end,
'end_chrom': end_chrom,
}
return coordinates | python | {
"resource": ""
} |
q273177 | cli | test | def cli(infile):
"""docstring for cli"""
lines = get_file_handle(infile)
cytobands = parse_cytoband(lines)
print("Check some coordinates:")
print("checking chrom 1 pos 2")
intervals = cytobands['1'][2]
for interval in intervals:
print(interval)
print(interval.begin)
print(interval.end)
print(interval.data)
# print(interval.__dict__)
print(cytobands['1'][2])
print("checking chrom 8 pos 101677777")
print(cytobands['8'][101677777])
print("checking chrom X pos 4200000 - 6000000")
print(cytobands['X'][4200000:6000000]) | python | {
"resource": ""
} |
q273178 | panels | test | def panels():
"""Show all panels for a case."""
if request.method == 'POST':
# update an existing panel
csv_file = request.files['csv_file']
content = csv_file.stream.read()
lines = None
try:
if b'\n' in content:
lines = content.decode('utf-8', 'ignore').split('\n')
else:
lines = content.decode('windows-1252').split('\r')
except Exception as err:
flash('Something went wrong while parsing the panel CSV file! ({})'.format(err), 'danger')
return redirect(request.referrer)
new_panel_name = request.form.get('new_panel_name')
if new_panel_name: #create a new panel
new_panel_id = controllers.new_panel(
store=store,
institute_id=request.form['institute'],
panel_name=new_panel_name,
display_name=request.form['display_name'],
csv_lines=lines,
)
if new_panel_id is None:
flash('Something went wrong and the panel list was not updated!','warning')
return redirect(request.referrer)
else:
flash("new gene panel added, {}!".format(new_panel_name),'success')
return redirect(url_for('panels.panel', panel_id=new_panel_id))
else: # modify an existing panel
update_option = request.form['modify_option']
panel_obj= controllers.update_panel(
store=store,
panel_name=request.form['panel_name'],
csv_lines=lines,
option=update_option
)
if panel_obj is None:
return abort(404, "gene panel not found: {}".format(request.form['panel_name']))
else:
return redirect(url_for('panels.panel', panel_id=panel_obj['_id']))
institutes = list(user_institutes(store, current_user))
panel_names = [name
for institute in institutes
for name in
store.gene_panels(institute_id=institute['_id']).distinct('panel_name')]
panel_versions = {}
for name in panel_names:
panel_versions[name]=store.gene_panels(panel_id=name)
panel_groups = []
for institute_obj in institutes:
institute_panels = store.latest_panels(institute_obj['_id'])
panel_groups.append((institute_obj, institute_panels))
return dict(panel_groups=panel_groups, panel_names=panel_names,
panel_versions=panel_versions, institutes=institutes) | python | {
"resource": ""
} |
q273179 | panel_update | test | def panel_update(panel_id):
"""Update panel to a new version."""
panel_obj = store.panel(panel_id)
update_version = request.form.get('version', None)
new_panel_id = store.apply_pending(panel_obj, update_version)
return redirect(url_for('panels.panel', panel_id=new_panel_id)) | python | {
"resource": ""
} |
q273180 | panel_export | test | def panel_export(panel_id):
"""Export panel to PDF file"""
panel_obj = store.panel(panel_id)
data = controllers.panel_export(store, panel_obj)
data['report_created_at'] = datetime.datetime.now().strftime("%Y-%m-%d")
html_report = render_template('panels/panel_pdf_simple.html', **data)
return render_pdf(HTML(string=html_report), download_filename=data['panel']['panel_name']+'_'+str(data['panel']['version'])+'_'+datetime.datetime.now().strftime("%Y-%m-%d")+'_scout.pdf') | python | {
"resource": ""
} |
q273181 | gene_edit | test | def gene_edit(panel_id, hgnc_id):
"""Edit additional information about a panel gene."""
panel_obj = store.panel(panel_id)
hgnc_gene = store.hgnc_gene(hgnc_id)
panel_gene = controllers.existing_gene(store, panel_obj, hgnc_id)
form = PanelGeneForm()
transcript_choices = []
for transcript in hgnc_gene['transcripts']:
if transcript.get('refseq_id'):
refseq_id = transcript.get('refseq_id')
transcript_choices.append((refseq_id, refseq_id))
form.disease_associated_transcripts.choices = transcript_choices
if form.validate_on_submit():
action = 'edit' if panel_gene else 'add'
info_data = form.data.copy()
if 'csrf_token' in info_data:
del info_data['csrf_token']
store.add_pending(panel_obj, hgnc_gene, action=action, info=info_data)
return redirect(url_for('.panel', panel_id=panel_id))
if panel_gene:
for field_key in ['disease_associated_transcripts', 'reduced_penetrance',
'mosaicism', 'inheritance_models', 'database_entry_version', 'comment']:
form_field = getattr(form, field_key)
if not form_field.data:
panel_value = panel_gene.get(field_key)
if panel_value is not None:
form_field.process_data(panel_value)
return dict(panel=panel_obj, form=form, gene=hgnc_gene, panel_gene=panel_gene) | python | {
"resource": ""
} |
q273182 | delivery_report | test | def delivery_report(context, case_id, report_path,
update):
"""Add delivery report to an existing case."""
adapter = context.obj['adapter']
try:
load_delivery_report(adapter=adapter, case_id=case_id,
report_path=report_path, update=update)
LOG.info("saved report to case!")
except Exception as e:
LOG.error(e)
context.abort() | python | {
"resource": ""
} |
q273183 | hpo_terms | test | def hpo_terms(store, query = None, limit = None):
"""Retrieves a list of HPO terms from scout database
Args:
store (obj): an adapter to the scout database
query (str): the term to search in the database
limit (str): the number of desired results
Returns:
hpo_phenotypes (dict): the complete list of HPO objects stored in scout
"""
hpo_phenotypes = {}
if limit:
limit=int(limit)
hpo_phenotypes['phenotypes'] = list(store.hpo_terms(text=query, limit=limit))
return hpo_phenotypes | python | {
"resource": ""
} |
q273184 | whitelist | test | def whitelist(context):
"""Show all objects in the whitelist collection"""
LOG.info("Running scout view users")
adapter = context.obj['adapter']
## TODO add a User interface to the adapter
for whitelist_obj in adapter.whitelist_collection.find():
click.echo(whitelist_obj['_id']) | python | {
"resource": ""
} |
q273185 | build_phenotype | test | def build_phenotype(phenotype_id, adapter):
"""Build a small phenotype object
Build a dictionary with phenotype_id and description
Args:
phenotype_id (str): The phenotype id
adapter (scout.adapter.MongoAdapter)
Returns:
phenotype_obj (dict):
dict(
phenotype_id = str,
feature = str, # description of phenotype
)
"""
phenotype_obj = {}
phenotype = adapter.hpo_term(phenotype_id)
if phenotype:
phenotype_obj['phenotype_id'] = phenotype['hpo_id']
phenotype_obj['feature'] = phenotype['description']
return phenotype | python | {
"resource": ""
} |
q273186 | gene | test | def gene(store, hgnc_id):
"""Parse information about a gene."""
res = {'builds': {'37': None, '38': None}, 'symbol': None, 'description': None, 'ensembl_id': None, 'record': None}
for build in res['builds']:
record = store.hgnc_gene(hgnc_id, build=build)
if record:
record['position'] = "{this[chromosome]}:{this[start]}-{this[end]}".format(this=record)
res['aliases'] = record['aliases']
res['hgnc_id'] = record['hgnc_id']
res['description'] = record['description']
res['builds'][build] = record
res['symbol'] = record['hgnc_symbol']
res['description'] = record['description']
res['entrez_id'] = record.get('entrez_id')
res['pli_score'] = record.get('pli_score')
add_gene_links(record, int(build))
res['omim_id'] = record.get('omim_id')
res['incomplete_penetrance'] = record.get('incomplete_penetrance',False)
res['inheritance_models'] = record.get('inheritance_models',[])
for transcript in record['transcripts']:
transcript['position'] = ("{this[chrom]}:{this[start]}-{this[end]}"
.format(this=transcript))
add_tx_links(transcript, build)
for phenotype in record.get('phenotypes',[]):
phenotype['omim_link'] = omim(phenotype.get('mim_number'))
if not res['record']:
res['record'] = record
# If none of the genes where found
if not any(res.values()):
raise ValueError
return res | python | {
"resource": ""
} |
q273187 | genes_to_json | test | def genes_to_json(store, query):
"""Fetch matching genes and convert to JSON."""
gene_query = store.hgnc_genes(query, search=True)
json_terms = [{'name': "{} | {} ({})".format(gene['hgnc_id'], gene['hgnc_symbol'],
', '.join(gene['aliases'])),
'id': gene['hgnc_id']} for gene in gene_query]
return json_terms | python | {
"resource": ""
} |
q273188 | index | test | def index():
"""Display the Scout dashboard."""
accessible_institutes = current_user.institutes
if not 'admin' in current_user.roles:
accessible_institutes = current_user.institutes
if not accessible_institutes:
flash('Not allowed to see information - please visit the dashboard later!')
return redirect(url_for('cases.dahboard_general.html'))
LOG.debug('User accessible institutes: {}'.format(accessible_institutes))
institutes = [inst for inst in store.institutes(accessible_institutes)]
# Insert a entry that displays all institutes in the beginning of the array
institutes.insert(0, {'_id': None, 'display_name': 'All institutes'})
institute_id = None
slice_query = None
panel=1
if request.method=='POST':
institute_id = request.form.get('institute')
slice_query = request.form.get('query')
panel=request.form.get('pane_id')
elif request.method=='GET':
institute_id = request.args.get('institute')
slice_query = request.args.get('query')
# User should be restricted to their own institute if:
#1) Their default institute when the page is first loaded
#2) if they ask for an institute that they don't belong to
#3) if they want perform a query on all institutes
if not institute_id:
institute_id = accessible_institutes[0]
elif (not current_user.is_admin) and (slice_query and institute_id == 'None'):
institute_id = accessible_institutes[0]
elif (not institute_id in accessible_institutes) and not (institute_id == 'None'):
institute_id = accessible_institutes[0]
LOG.info("Fetch all cases with institute: %s", institute_id)
data = get_dashboard_info(store, institute_id, slice_query)
data['institutes'] = institutes
data['choice'] = institute_id
total_cases = data['total_cases']
LOG.info("Found %s cases", total_cases)
if total_cases == 0:
flash('no cases found for institute {} (with that query) - please visit the dashboard later!'.format(institute_id), 'info')
# return redirect(url_for('cases.index'))
return render_template(
'dashboard/dashboard_general.html', institute=institute_id, query=slice_query, panel=panel, **data) | python | {
"resource": ""
} |
q273189 | transcripts | test | def transcripts(context, build, hgnc_id, json):
"""Show all transcripts in the database"""
LOG.info("Running scout view transcripts")
adapter = context.obj['adapter']
if not json:
click.echo("Chromosome\tstart\tend\ttranscript_id\thgnc_id\trefseq\tis_primary")
for tx_obj in adapter.transcripts(build=build, hgnc_id=hgnc_id):
if json:
pp(tx_obj)
continue
click.echo("{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}".format(
tx_obj['chrom'],
tx_obj['start'],
tx_obj['end'],
tx_obj['ensembl_transcript_id'],
tx_obj['hgnc_id'],
tx_obj.get('refseq_id', ''),
tx_obj.get('is_primary') or '',
)) | python | {
"resource": ""
} |
q273190 | day_display | test | def day_display(year, month, all_month_events, day):
"""
Returns the events that occur on the given day.
Works by getting all occurrences for the month, then drilling
down to only those occurring on the given day.
"""
# Get a dict with all of the events for the month
count = CountHandler(year, month, all_month_events).get_count()
pks = [x[1] for x in count[day]] # list of pks for events on given day
# List enables sorting.
# See the comments in EventMonthView in views.py for more info
day_events = list(Event.objects.filter(pk__in=pks).order_by(
'start_date').prefetch_related('cancellations'))
day_events.sort(key=lambda x: x.l_start_date.hour)
return day_events | python | {
"resource": ""
} |
q273191 | sv_variants | test | def sv_variants(store, institute_obj, case_obj, variants_query, page=1, per_page=50):
"""Pre-process list of SV variants."""
skip_count = (per_page * max(page - 1, 0))
more_variants = True if variants_query.count() > (skip_count + per_page) else False
genome_build = case_obj.get('genome_build', '37')
if genome_build not in ['37','38']:
genome_build = '37'
return {
'variants': (parse_variant(store, institute_obj, case_obj, variant, genome_build=genome_build) for variant in
variants_query.skip(skip_count).limit(per_page)),
'more_variants': more_variants,
} | python | {
"resource": ""
} |
q273192 | str_variants | test | def str_variants(store, institute_obj, case_obj, variants_query, page=1, per_page=50):
"""Pre-process list of STR variants."""
# Nothing unique to STRs on this level. Inheritance?
return variants(store, institute_obj, case_obj, variants_query, page, per_page) | python | {
"resource": ""
} |
q273193 | str_variant | test | def str_variant(store, institute_id, case_name, variant_id):
"""Pre-process an STR variant entry for detail page.
Adds information to display variant
Args:
store(scout.adapter.MongoAdapter)
institute_id(str)
case_name(str)
variant_id(str)
Returns:
detailed_information(dict): {
'institute': <institute_obj>,
'case': <case_obj>,
'variant': <variant_obj>,
'overlapping_snvs': <overlapping_snvs>,
'manual_rank_options': MANUAL_RANK_OPTIONS,
'dismiss_variant_options': DISMISS_VARIANT_OPTIONS
}
"""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
variant_obj = store.variant(variant_id)
# fill in information for pilup view
variant_case(store, case_obj, variant_obj)
variant_obj['callers'] = callers(variant_obj, category='str')
# variant_obj['str_ru']
# variant_obj['str_repid']
# variant_obj['str_ref']
variant_obj['comments'] = store.events(institute_obj, case=case_obj,
variant_id=variant_obj['variant_id'], comments=True)
return {
'institute': institute_obj,
'case': case_obj,
'variant': variant_obj,
'overlapping_snvs': overlapping_snvs,
'manual_rank_options': MANUAL_RANK_OPTIONS,
'dismiss_variant_options': DISMISS_VARIANT_OPTIONS
} | python | {
"resource": ""
} |
q273194 | sv_variant | test | def sv_variant(store, institute_id, case_name, variant_id=None, variant_obj=None, add_case=True,
get_overlapping=True):
"""Pre-process an SV variant entry for detail page.
Adds information to display variant
Args:
store(scout.adapter.MongoAdapter)
institute_id(str)
case_name(str)
variant_id(str)
variant_obj(dcit)
add_case(bool): If information about case files should be added
Returns:
detailed_information(dict): {
'institute': <institute_obj>,
'case': <case_obj>,
'variant': <variant_obj>,
'overlapping_snvs': <overlapping_snvs>,
'manual_rank_options': MANUAL_RANK_OPTIONS,
'dismiss_variant_options': DISMISS_VARIANT_OPTIONS
}
"""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
if not variant_obj:
variant_obj = store.variant(variant_id)
if add_case:
# fill in information for pilup view
variant_case(store, case_obj, variant_obj)
# frequencies
variant_obj['frequencies'] = [
('1000G', variant_obj.get('thousand_genomes_frequency')),
('1000G (left)', variant_obj.get('thousand_genomes_frequency_left')),
('1000G (right)', variant_obj.get('thousand_genomes_frequency_right')),
('ClinGen CGH (benign)', variant_obj.get('clingen_cgh_benign')),
('ClinGen CGH (pathogenic)', variant_obj.get('clingen_cgh_pathogenic')),
('ClinGen NGI', variant_obj.get('clingen_ngi')),
('SweGen', variant_obj.get('swegen')),
('Decipher', variant_obj.get('decipher')),
]
variant_obj['callers'] = callers(variant_obj, category='sv')
overlapping_snvs = []
if get_overlapping:
overlapping_snvs = (parse_variant(store, institute_obj, case_obj, variant) for variant in
store.overlapping(variant_obj))
# parse_gene function is not called for SVs, but a link to ensembl gene is required
for gene_obj in variant_obj['genes']:
if gene_obj.get('common'):
ensembl_id = gene_obj['common']['ensembl_id']
try:
build = int(gene_obj['common'].get('build','37'))
except Exception:
build = 37
gene_obj['ensembl_link'] = ensembl(ensembl_id, build=build)
variant_obj['comments'] = store.events(institute_obj, case=case_obj,
variant_id=variant_obj['variant_id'], comments=True)
case_clinvars = store.case_to_clinVars(case_obj.get('display_name'))
if variant_id in case_clinvars:
variant_obj['clinvar_clinsig'] = case_clinvars.get(variant_id)['clinsig']
if not 'end_chrom' in variant_obj:
variant_obj['end_chrom'] = variant_obj['chromosome']
return {
'institute': institute_obj,
'case': case_obj,
'variant': variant_obj,
'overlapping_snvs': overlapping_snvs,
'manual_rank_options': MANUAL_RANK_OPTIONS,
'dismiss_variant_options': DISMISS_VARIANT_OPTIONS
} | python | {
"resource": ""
} |
q273195 | parse_variant | test | def parse_variant(store, institute_obj, case_obj, variant_obj, update=False, genome_build='37',
get_compounds = True):
"""Parse information about variants.
- Adds information about compounds
- Updates the information about compounds if necessary and 'update=True'
Args:
store(scout.adapter.MongoAdapter)
institute_obj(scout.models.Institute)
case_obj(scout.models.Case)
variant_obj(scout.models.Variant)
update(bool): If variant should be updated in database
genome_build(str)
"""
has_changed = False
compounds = variant_obj.get('compounds', [])
if compounds and get_compounds:
# Check if we need to add compound information
# If it is the first time the case is viewed we fill in some compound information
if 'not_loaded' not in compounds[0]:
new_compounds = store.update_variant_compounds(variant_obj)
variant_obj['compounds'] = new_compounds
has_changed = True
# sort compounds on combined rank score
variant_obj['compounds'] = sorted(variant_obj['compounds'],
key=lambda compound: -compound['combined_score'])
# Update the hgnc symbols if they are incorrect
variant_genes = variant_obj.get('genes')
if variant_genes is not None:
for gene_obj in variant_genes:
# If there is no hgnc id there is nothin we can do
if not gene_obj['hgnc_id']:
continue
# Else we collect the gene object and check the id
if gene_obj.get('hgnc_symbol') is None:
hgnc_gene = store.hgnc_gene(gene_obj['hgnc_id'], build=genome_build)
if not hgnc_gene:
continue
has_changed = True
gene_obj['hgnc_symbol'] = hgnc_gene['hgnc_symbol']
# We update the variant if some information was missing from loading
# Or if symbold in reference genes have changed
if update and has_changed:
variant_obj = store.update_variant(variant_obj)
variant_obj['comments'] = store.events(institute_obj, case=case_obj,
variant_id=variant_obj['variant_id'], comments=True)
if variant_genes:
variant_obj.update(get_predictions(variant_genes))
if variant_obj.get('category') == 'cancer':
variant_obj.update(get_variant_info(variant_genes))
for compound_obj in compounds:
compound_obj.update(get_predictions(compound_obj.get('genes', [])))
if isinstance(variant_obj.get('acmg_classification'), int):
acmg_code = ACMG_MAP[variant_obj['acmg_classification']]
variant_obj['acmg_classification'] = ACMG_COMPLETE_MAP[acmg_code]
# convert length for SV variants
variant_length = variant_obj.get('length')
variant_obj['length'] = {100000000000: 'inf', -1: 'n.d.'}.get(variant_length, variant_length)
if not 'end_chrom' in variant_obj:
variant_obj['end_chrom'] = variant_obj['chromosome']
return variant_obj | python | {
"resource": ""
} |
q273196 | variants_export_header | test | def variants_export_header(case_obj):
"""Returns a header for the CSV file with the filtered variants to be exported.
Args:
case_obj(scout.models.Case)
Returns:
header: includes the fields defined in scout.constants.variants_export EXPORT_HEADER
+ AD_reference, AD_alternate, GT_quality for each sample analysed for a case
"""
header = []
header = header + EXPORT_HEADER
# Add fields specific for case samples
for individual in case_obj['individuals']:
display_name = str(individual['display_name'])
header.append('AD_reference_'+display_name) # Add AD reference field for a sample
header.append('AD_alternate_'+display_name) # Add AD alternate field for a sample
header.append('GT_quality_'+display_name) # Add Genotype quality field for a sample
return header | python | {
"resource": ""
} |
q273197 | get_variant_info | test | def get_variant_info(genes):
"""Get variant information"""
data = {'canonical_transcripts': []}
for gene_obj in genes:
if not gene_obj.get('canonical_transcripts'):
tx = gene_obj['transcripts'][0]
tx_id = tx['transcript_id']
exon = tx.get('exon', '-')
c_seq = tx.get('coding_sequence_name', '-')
else:
tx_id = gene_obj['canonical_transcripts']
exon = gene_obj.get('exon', '-')
c_seq = gene_obj.get('hgvs_identifier', '-')
if len(c_seq) > 20:
c_seq = c_seq[:20] + '...'
if len(genes) == 1:
value = ':'.join([tx_id,exon,c_seq])
else:
gene_id = gene_obj.get('hgnc_symbol') or str(gene_obj['hgnc_id'])
value = ':'.join([gene_id, tx_id,exon,c_seq])
data['canonical_transcripts'].append(value)
return data | python | {
"resource": ""
} |
q273198 | get_predictions | test | def get_predictions(genes):
"""Get sift predictions from genes."""
data = {
'sift_predictions': [],
'polyphen_predictions': [],
'region_annotations': [],
'functional_annotations': []
}
for gene_obj in genes:
for pred_key in data:
gene_key = pred_key[:-1]
if len(genes) == 1:
value = gene_obj.get(gene_key, '-')
else:
gene_id = gene_obj.get('hgnc_symbol') or str(gene_obj['hgnc_id'])
value = ':'.join([gene_id, gene_obj.get(gene_key, '-')])
data[pred_key].append(value)
return data | python | {
"resource": ""
} |
q273199 | variant_case | test | def variant_case(store, case_obj, variant_obj):
"""Pre-process case for the variant view.
Adds information about files from case obj to variant
Args:
store(scout.adapter.MongoAdapter)
case_obj(scout.models.Case)
variant_obj(scout.models.Variant)
"""
case_obj['bam_files'] = []
case_obj['mt_bams'] = []
case_obj['bai_files'] = []
case_obj['mt_bais'] = []
case_obj['sample_names'] = []
for individual in case_obj['individuals']:
bam_path = individual.get('bam_file')
mt_bam = individual.get('mt_bam')
case_obj['sample_names'].append(individual.get('display_name'))
if bam_path and os.path.exists(bam_path):
case_obj['bam_files'].append(individual['bam_file'])
case_obj['bai_files'].append(find_bai_file(individual['bam_file']))
if mt_bam and os.path.exists(mt_bam):
case_obj['mt_bams'].append(individual['mt_bam'])
case_obj['mt_bais'].append(find_bai_file(individual['mt_bam']))
else:
LOG.debug("%s: no bam file found", individual['individual_id'])
try:
genes = variant_obj.get('genes', [])
if len(genes) == 1:
hgnc_gene_obj = store.hgnc_gene(variant_obj['genes'][0]['hgnc_id'])
if hgnc_gene_obj:
vcf_path = store.get_region_vcf(case_obj, gene_obj=hgnc_gene_obj)
case_obj['region_vcf_file'] = vcf_path
else:
case_obj['region_vcf_file'] = None
elif len(genes) > 1:
chrom = variant_obj['genes'][0]['common']['chromosome']
start = min(gene['common']['start'] for gene in variant_obj['genes'])
end = max(gene['common']['end'] for gene in variant_obj['genes'])
# Create a reduced VCF with variants in the region
vcf_path = store.get_region_vcf(case_obj, chrom=chrom, start=start, end=end)
case_obj['region_vcf_file'] = vcf_path
except (SyntaxError, Exception):
LOG.warning("skip VCF region for alignment view") | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.