text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def warn(self,message):
"""write a warning to the log file.
Parameters
----------
message : str
the warning text
"""
s = str(datetime.now()) + " WARNING: " + message + '\n'
if self.echo:
print(s,end='')
if self.filename:
self.f.write(s)
self.f.flush
warnings.warn(s,PyemuWarning) | [
"def",
"warn",
"(",
"self",
",",
"message",
")",
":",
"s",
"=",
"str",
"(",
"datetime",
".",
"now",
"(",
")",
")",
"+",
"\" WARNING: \"",
"+",
"message",
"+",
"'\\n'",
"if",
"self",
".",
"echo",
":",
"print",
"(",
"s",
",",
"end",
"=",
"''",
")... | 25.733333 | 15.6 |
def dec2bin(s):
"""
dec2bin
十进制 to 二进制: bin()
:param s:
:return:
"""
if not isinstance(s, int):
num = int(s)
else:
num = s
mid = []
while True:
if num == 0:
break
num, rem = divmod(num, 2)
mid.append(base[rem])
return ''.join([str(x) for x in mid[::-1]]) | [
"def",
"dec2bin",
"(",
"s",
")",
":",
"if",
"not",
"isinstance",
"(",
"s",
",",
"int",
")",
":",
"num",
"=",
"int",
"(",
"s",
")",
"else",
":",
"num",
"=",
"s",
"mid",
"=",
"[",
"]",
"while",
"True",
":",
"if",
"num",
"==",
"0",
":",
"break... | 17.526316 | 19.736842 |
def delete_audit_sink(self, name, **kwargs):
"""
delete an AuditSink
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_audit_sink(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the AuditSink (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_audit_sink_with_http_info(name, **kwargs)
else:
(data) = self.delete_audit_sink_with_http_info(name, **kwargs)
return data | [
"def",
"delete_audit_sink",
"(",
"self",
",",
"name",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"delete_audit_sink_with_htt... | 91.153846 | 64.076923 |
def route(method, pattern, handler=None):
"""register a routing rule
Example:
route('GET', '/path/<param>', handler)
"""
if handler is None:
return partial(route, method, pattern)
return routes.append(method, pattern, handler) | [
"def",
"route",
"(",
"method",
",",
"pattern",
",",
"handler",
"=",
"None",
")",
":",
"if",
"handler",
"is",
"None",
":",
"return",
"partial",
"(",
"route",
",",
"method",
",",
"pattern",
")",
"return",
"routes",
".",
"append",
"(",
"method",
",",
"p... | 23.272727 | 17.181818 |
def picard_mark_duplicates(job, bam, bai, validation_stringency='LENIENT'):
"""
Runs Picard MarkDuplicates on a BAM file. Requires that the BAM file be coordinate sorted.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID for BAM file
:param str bai: FileStoreID for BAM index file
:param str validation_stringency: BAM file validation stringency, default is LENIENT
:return: FileStoreIDs for BAM and BAI files
:rtype: tuple
"""
work_dir = job.fileStore.getLocalTempDir()
# Retrieve file path
job.fileStore.readGlobalFile(bam, os.path.join(work_dir, 'sorted.bam'))
job.fileStore.readGlobalFile(bai, os.path.join(work_dir, 'sorted.bai'))
# Call: picardtools
command = ['MarkDuplicates',
'INPUT=sorted.bam',
'OUTPUT=mkdups.bam',
'METRICS_FILE=metrics.txt',
'ASSUME_SORTED=true',
'CREATE_INDEX=true',
'VALIDATION_STRINGENCY=%s' % validation_stringency.upper()]
# picard-tools container doesn't have JAVA_OPTS variable
# Set TMPDIR to /data to prevent writing temporary files to /tmp
docker_parameters = ['--rm',
'--log-driver', 'none',
'-e', 'JAVA_OPTIONS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory),
'-v', '{}:/data'.format(work_dir)]
start_time = time.time()
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/ucsc_cgl/picardtools:1.95--dd5ac549b95eb3e5d166a5e310417ef13651994e',
dockerParameters=docker_parameters)
end_time = time.time()
_log_runtime(job, start_time, end_time, "Picard MarkDuplicates")
bam = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'mkdups.bam'))
bai = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'mkdups.bai'))
return bam, bai | [
"def",
"picard_mark_duplicates",
"(",
"job",
",",
"bam",
",",
"bai",
",",
"validation_stringency",
"=",
"'LENIENT'",
")",
":",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"# Retrieve file path",
"job",
".",
"fileStore",
".",
"re... | 43.363636 | 22.363636 |
def pairwise_corr(data, columns=None, covar=None, tail='two-sided',
method='pearson', padjust='none', export_filename=None):
'''Pairwise (partial) correlations between columns of a pandas dataframe.
Parameters
----------
data : pandas DataFrame
DataFrame. Note that this function can also directly be used as a
Pandas method, in which case this argument is no longer needed.
columns : list or str
Column names in data ::
'["a", "b", "c"]' : combination between columns a, b, and c
'["a"]' : product between a and all the other numeric columns
'[["a"], ["b", "c"]]' : product between ["a"] and ["b", "c"]
'[["a", "d"], ["b", "c"]]' : product between ["a", "d"] and ["b", "c"]
'[["a", "d"], None]' : product between ["a", "d"] and all other columns
Note that if column is not specified, then the function will return the
pairwise correlation between the combination of all the numeric columns
in data. See the examples section for more details on this.
covar : None, string or list
Covariate(s) for partial correlation. Must be one or more columns
in data. Use a list if there are more than one covariate. If
``covar`` is not None, a partial correlation will be computed using
:py:func:`pingouin.partial_corr` function.
tail : string
Indicates whether to return the 'two-sided' or 'one-sided' p-values
method : string
Specify which method to use for the computation of the correlation
coefficient. Available methods are ::
'pearson' : Pearson product-moment correlation
'spearman' : Spearman rank-order correlation
'kendall' : Kendall’s tau (ordinal data)
'percbend' : percentage bend correlation (robust)
'shepherd' : Shepherd's pi correlation (robust Spearman)
padjust : string
Method used for testing and adjustment of pvalues.
Available methods are ::
'none' : no correction
'bonferroni' : one-step Bonferroni correction
'holm' : step-down method using Bonferroni adjustments
'fdr_bh' : Benjamini/Hochberg FDR correction
'fdr_by' : Benjamini/Yekutieli FDR correction
export_filename : string
Filename (without extension) for the output file.
If None, do not export the table.
By default, the file will be created in the current python console
directory. To change that, specify the filename with full path.
Returns
-------
stats : DataFrame
Stats summary ::
'X' : Name(s) of first columns
'Y' : Name(s) of second columns
'method' : method used to compute the correlation
'covar' : List of specified covariate(s) (only for partial correlation)
'tail' : indicates whether the p-values are one-sided or two-sided
'n' : Sample size (after NaN removal)
'r' : Correlation coefficients
'CI95' : 95% parametric confidence intervals
'r2' : R-squared values
'adj_r2' : Adjusted R-squared values
'z' : Standardized correlation coefficients
'p-unc' : uncorrected one or two tailed p-values
'p-corr' : corrected one or two tailed p-values
'p-adjust' : Correction method
Notes
-----
Please refer to the :py:func:`pingouin.corr()` function for a description
of the different methods. NaN are automatically removed from the data.
This function is more flexible and gives a much more detailed
output than the :py:func:`pandas.DataFrame.corr()` method (i.e. p-values,
confidence interval, Bayes Factor..). This comes however at
an increased computational cost. While this should not be discernible for
dataframe with less than 10,000 rows and/or less than 20 columns, this
function can be slow for very large dataset. For speed purpose, the Bayes
Factor is only computed when the sample size is less than 1000
(and method='pearson').
This function also works with two-dimensional multi-index columns. In this
case, columns must be list(s) of tuple(s). See the Jupyter notebook
for more details:
https://github.com/raphaelvallat/pingouin/blob/master/notebooks/04_Correlations.ipynb
If ``covar`` is specified, this function will compute the pairwise partial
correlation between the variables. If you are only interested in computing
the partial correlation matrix (i.e. the raw pairwise partial correlation
coefficient matrix, without the p-values, sample sizes, etc), a better
alternative is to use the :py:func:`pingouin.pcorr` function (see
example 7).
Examples
--------
1. One-tailed spearman correlation corrected for multiple comparisons
>>> from pingouin import pairwise_corr, read_dataset
>>> data = read_dataset('pairwise_corr').iloc[:, 1:]
>>> pairwise_corr(data, method='spearman', tail='two-sided',
... padjust='bonf') # doctest: +SKIP
2. Robust two-sided correlation with uncorrected p-values
>>> pcor = pairwise_corr(data, columns=['Openness', 'Extraversion',
... 'Neuroticism'], method='percbend')
3. One-versus-all pairwise correlations
>>> pairwise_corr(data, columns=['Neuroticism']) # doctest: +SKIP
4. Pairwise correlations between two lists of columns (cartesian product)
>>> columns = [['Neuroticism', 'Extraversion'], ['Openness']]
>>> pairwise_corr(data, columns) # doctest: +SKIP
5. As a Pandas method
>>> pcor = data.pairwise_corr(covar='Neuroticism', method='spearman')
6. Pairwise partial correlation
>>> pcor = pairwise_corr(data, covar='Neuroticism') # One covariate
>>> pcor = pairwise_corr(data, covar=['Neuroticism', 'Openness']) # Two
7. Pairwise partial correlation matrix (only the r-values)
>>> data[['Neuroticism', 'Openness', 'Extraversion']].pcorr()
Neuroticism Openness Extraversion
Neuroticism 1.000000 0.092097 -0.360421
Openness 0.092097 1.000000 0.281312
Extraversion -0.360421 0.281312 1.000000
'''
from pingouin.correlation import corr, partial_corr
if tail not in ['one-sided', 'two-sided']:
raise ValueError('Tail not recognized')
# Keep only numeric columns
data = data._get_numeric_data()
# Remove columns with constant value and/or NaN
data = data.loc[:, data.nunique(dropna=True) >= 2]
# Extract columns names
keys = data.columns.tolist()
# First ensure that columns is a list
if isinstance(columns, (str, tuple)):
columns = [columns]
def traverse(o, tree_types=(list, tuple)):
"""Helper function to flatten nested lists.
From https://stackoverflow.com/a/6340578
"""
if isinstance(o, tree_types):
for value in o:
for subvalue in traverse(value, tree_types):
yield subvalue
else:
yield o
# Check if columns index has multiple levels
if isinstance(data.columns, pd.core.index.MultiIndex):
multi_index = True
if columns is not None:
# Simple List with one element: [('L0', 'L1')]
# Simple list with >= 2 elements: [('L0', 'L1'), ('L0', 'L2')]
# Nested lists: [[('L0', 'L1')], ...] or [..., [('L0', 'L1')]]
col_flatten = list(traverse(columns, tree_types=list))
assert all(isinstance(c, (tuple, type(None))) for c in col_flatten)
else:
multi_index = False
# Then define combinations / products between columns
if columns is None:
# Case A: column is not defined --> corr between all numeric columns
combs = list(combinations(keys, 2))
else:
# Case B: column is specified
if isinstance(columns[0], list):
group1 = [e for e in columns[0] if e in keys]
# Assert that column is two-dimensional
if len(columns) == 1:
columns.append(None)
if isinstance(columns[1], list) and len(columns[1]):
# B1: [['a', 'b'], ['c', 'd']]
group2 = [e for e in columns[1] if e in keys]
else:
# B2: [['a', 'b']], [['a', 'b'], None] or [['a', 'b'], 'all']
group2 = [e for e in keys if e not in group1]
combs = list(product(group1, group2))
else:
# Column is a simple list
if len(columns) == 1:
# Case B3: one-versus-all, e.g. ['a'] or 'a'
# Check that this column exist
if columns[0] not in keys:
msg = ('"%s" is not in data or is not numeric.'
% columns[0])
raise ValueError(msg)
others = [e for e in keys if e != columns[0]]
combs = list(product(columns, others))
else:
# Combinations between all specified columns ['a', 'b', 'c']
# Make sure that we keep numeric columns
columns = [c for c in columns if c in keys]
if len(columns) == 1:
# If only one-column is left, equivalent to ['a']
others = [e for e in keys if e != columns[0]]
combs = list(product(columns, others))
else:
# combinations between ['a', 'b', 'c']
combs = list(combinations(columns, 2))
combs = np.array(combs)
if len(combs) == 0:
raise ValueError("No column combination found. Please make sure that "
"the specified columns exist in the dataframe, are "
"numeric, and contains at least two unique values.")
# Initialize empty dataframe
if multi_index:
X = list(zip(combs[:, 0, 0], combs[:, 0, 1]))
Y = list(zip(combs[:, 1, 0], combs[:, 1, 1]))
else:
X = combs[:, 0]
Y = combs[:, 1]
stats = pd.DataFrame({'X': X, 'Y': Y, 'method': method, 'tail': tail},
index=range(len(combs)),
columns=['X', 'Y', 'method', 'tail', 'n', 'outliers',
'r', 'CI95%', 'r2', 'adj_r2', 'p-val',
'BF10', 'power'])
# Now we check if covariates are present
if covar is not None:
assert isinstance(covar, (str, list)), 'covar must be list or string.'
if isinstance(covar, str):
covar = [covar]
# Check that columns exist and are numeric
assert all([c in keys for c in covar]), 'covar not in data or not num.'
# And we make sure that X or Y does not contain covar
stats = stats[~stats[['X', 'Y']].isin(covar).any(1)]
stats = stats.reset_index(drop=True)
if stats.shape[0] == 0:
raise ValueError("No column combination found. Please make sure "
"that the specified columns and covar exist in "
"the dataframe, are numeric, and contains at "
"least two unique values.")
# Compute pairwise correlations and fill dataframe
dvs = ['n', 'r', 'CI95%', 'r2', 'adj_r2', 'p-val', 'power']
dvs_out = dvs + ['outliers']
dvs_bf10 = dvs + ['BF10']
for i in range(stats.shape[0]):
col1, col2 = stats.loc[i, 'X'], stats.loc[i, 'Y']
if covar is None:
cor_st = corr(data[col1].values, data[col2].values, tail=tail,
method=method)
else:
cor_st = partial_corr(data=data, x=col1, y=col2, covar=covar,
tail=tail, method=method)
cor_st_keys = cor_st.columns.tolist()
if 'BF10' in cor_st_keys:
stats.loc[i, dvs_bf10] = cor_st[dvs_bf10].values
elif 'outliers' in cor_st_keys:
stats.loc[i, dvs_out] = cor_st[dvs_out].values
else:
stats.loc[i, dvs] = cor_st[dvs].values
# Force conversion to numeric
stats = stats.astype({'r': float, 'r2': float, 'adj_r2': float,
'n': int, 'p-val': float, 'outliers': float,
'power': float})
# Multiple comparisons
stats = stats.rename(columns={'p-val': 'p-unc'})
padjust = None if stats['p-unc'].size <= 1 else padjust
if padjust is not None:
if padjust.lower() != 'none':
reject, stats['p-corr'] = multicomp(stats['p-unc'].values,
method=padjust)
stats['p-adjust'] = padjust
else:
stats['p-corr'] = None
stats['p-adjust'] = None
# Standardize correlation coefficients (Fisher z-transformation)
stats['z'] = np.round(np.arctanh(stats['r'].values), 3)
col_order = ['X', 'Y', 'method', 'tail', 'n', 'outliers', 'r', 'CI95%',
'r2', 'adj_r2', 'z', 'p-unc', 'p-corr', 'p-adjust',
'BF10', 'power']
# Reorder columns and remove empty ones
stats = stats.reindex(columns=col_order)
stats = stats.dropna(how='all', axis=1)
# Add covariates names if present
if covar is not None:
stats.insert(loc=3, column='covar', value=str(covar))
if export_filename is not None:
_export_table(stats, export_filename)
return stats | [
"def",
"pairwise_corr",
"(",
"data",
",",
"columns",
"=",
"None",
",",
"covar",
"=",
"None",
",",
"tail",
"=",
"'two-sided'",
",",
"method",
"=",
"'pearson'",
",",
"padjust",
"=",
"'none'",
",",
"export_filename",
"=",
"None",
")",
":",
"from",
"pingouin... | 42.730519 | 22.068182 |
def verbosityToLogLevel(verbosity):
"""
Returns the specfied verbosity level interpreted as a logging level.
"""
ret = 0
if verbosity == 1:
ret = logging.INFO
elif verbosity >= 2:
ret = logging.DEBUG
return ret | [
"def",
"verbosityToLogLevel",
"(",
"verbosity",
")",
":",
"ret",
"=",
"0",
"if",
"verbosity",
"==",
"1",
":",
"ret",
"=",
"logging",
".",
"INFO",
"elif",
"verbosity",
">=",
"2",
":",
"ret",
"=",
"logging",
".",
"DEBUG",
"return",
"ret"
] | 24.5 | 15.3 |
def create(self, stage, scp_config, config=None):
'''Create a pipeline stage.
Instantiates `stage` with `config`. This essentially
translates to ``stage(config)``, except that two keys from
`scp_config` are injected into the configuration:
``tmp_dir_path`` is an execution-specific directory from
combining the top-level ``tmp_dir_path`` configuration with
:attr:`tmp_dir_suffix`; and ``third_dir_path`` is the same
path from the top-level configuration. `stage` may be either
a callable returning the stage (e.g. its class), or its name
in the configuration.
`scp_config` is the configuration for the pipeline as a
whole, and is required. `config` is the configuration for
the stage; if it is :const:`None` then it is extracted
from `scp_config`.
If you already have a fully formed configuration block
and want to create a stage, you can call
.. code-block:: python
factory.registry[stage](stage_config)
In most cases if you have a stage class object and want to
instantiate it with its defaults you can call
.. code-block:: python
stage = stage_cls(stage_cls.default_config)
.. note:: This mirrors
:meth:`yakonfig.factory.AutoFactory.create`, with
some thought that this factory class might migrate
to using that as a base in the future.
:param stage: pipeline stage class, or its name in the registry
:param dict scp_config: configuration block for the pipeline
:param dict config: configuration block for the stage, or
:const:`None` to get it from `scp_config`
'''
# Figure out what we have for a stage and its name
if isinstance(stage, basestring):
stage_name = stage
stage_obj = self.registry[stage_name]
else:
stage_name = getattr(stage, 'config_name', stage.__name__)
stage_obj = stage
# Find the configuration; get a copy we can mutate
if config is None:
config = scp_config.get(stage_name, None)
if config is None:
config = getattr(stage_obj, 'default_config', {})
config = dict(config)
# Fill in more values
if self.tmp_dir_suffix is None:
config['tmp_dir_path'] = scp_config['tmp_dir_path']
else:
config['tmp_dir_path'] = os.path.join(scp_config['tmp_dir_path'],
self.tmp_dir_suffix)
config['third_dir_path'] = scp_config['third_dir_path']
return stage_obj(config) | [
"def",
"create",
"(",
"self",
",",
"stage",
",",
"scp_config",
",",
"config",
"=",
"None",
")",
":",
"# Figure out what we have for a stage and its name",
"if",
"isinstance",
"(",
"stage",
",",
"basestring",
")",
":",
"stage_name",
"=",
"stage",
"stage_obj",
"="... | 39.656716 | 23.029851 |
def select_atoms(indices):
'''Select atoms by their indices.
You can select the first 3 atoms as follows::
select_atoms([0, 1, 2])
Return the current selection dictionary.
'''
rep = current_representation()
rep.select({'atoms': Selection(indices, current_system().n_atoms)})
return rep.selection_state | [
"def",
"select_atoms",
"(",
"indices",
")",
":",
"rep",
"=",
"current_representation",
"(",
")",
"rep",
".",
"select",
"(",
"{",
"'atoms'",
":",
"Selection",
"(",
"indices",
",",
"current_system",
"(",
")",
".",
"n_atoms",
")",
"}",
")",
"return",
"rep",... | 25.461538 | 21.307692 |
def ApprovalUrnBuilder(subject, user, approval_id):
"""Encode an approval URN."""
return aff4.ROOT_URN.Add("ACL").Add(subject).Add(user).Add(approval_id) | [
"def",
"ApprovalUrnBuilder",
"(",
"subject",
",",
"user",
",",
"approval_id",
")",
":",
"return",
"aff4",
".",
"ROOT_URN",
".",
"Add",
"(",
"\"ACL\"",
")",
".",
"Add",
"(",
"subject",
")",
".",
"Add",
"(",
"user",
")",
".",
"Add",
"(",
"approval_id",
... | 53 | 15.333333 |
def run_workers(no_subprocess, watch_paths=None, is_background=False):
"""
subprocess handler
"""
import atexit, os, subprocess, signal
if watch_paths:
from watchdog.observers import Observer
# from watchdog.observers.fsevents import FSEventsObserver as Observer
# from watchdog.observers.polling import PollingObserver as Observer
from watchdog.events import FileSystemEventHandler
def on_modified(event):
if not is_background:
print("Restarting worker due to change in %s" % event.src_path)
log.info("modified %s" % event.src_path)
try:
kill_children()
run_children()
except:
log.exception("Error while restarting worker")
handler = FileSystemEventHandler()
handler.on_modified = on_modified
# global child_pids
child_pids = []
log.info("starting %s workers" % no_subprocess)
def run_children():
global child_pids
child_pids = []
for i in range(int(no_subprocess)):
proc = subprocess.Popen([sys.executable, __file__],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
child_pids.append(proc.pid)
log.info("Started worker with pid %s" % proc.pid)
def kill_children():
"""
kill subprocess on exit of manager (this) process
"""
log.info("Stopping worker(s)")
for pid in child_pids:
if pid is not None:
os.kill(pid, signal.SIGTERM)
run_children()
atexit.register(kill_children)
signal.signal(signal.SIGTERM, kill_children)
if watch_paths:
observer = Observer()
for path in watch_paths:
if not is_background:
print("Watching for changes under %s" % path)
observer.schedule(handler, path=path, recursive=True)
observer.start()
while 1:
try:
sleep(1)
except KeyboardInterrupt:
log.info("Keyboard interrupt, exiting")
if watch_paths:
observer.stop()
observer.join()
sys.exit(0) | [
"def",
"run_workers",
"(",
"no_subprocess",
",",
"watch_paths",
"=",
"None",
",",
"is_background",
"=",
"False",
")",
":",
"import",
"atexit",
",",
"os",
",",
"subprocess",
",",
"signal",
"if",
"watch_paths",
":",
"from",
"watchdog",
".",
"observers",
"impor... | 32.636364 | 16.787879 |
def get_full_permission_string(self, perm):
"""
Return full permission string (app_label.perm_model)
"""
if not getattr(self, 'model', None):
raise AttributeError("You need to use `add_permission_logic` to "
"register the instance to the model class "
"before calling this method.")
app_label = self.model._meta.app_label
model_name = self.model._meta.object_name.lower()
return "%s.%s_%s" % (app_label, perm, model_name) | [
"def",
"get_full_permission_string",
"(",
"self",
",",
"perm",
")",
":",
"if",
"not",
"getattr",
"(",
"self",
",",
"'model'",
",",
"None",
")",
":",
"raise",
"AttributeError",
"(",
"\"You need to use `add_permission_logic` to \"",
"\"register the instance to the model c... | 49.545455 | 14.818182 |
def add_enrollment(db, uuid, organization, from_date=None, to_date=None):
"""Enroll a unique identity to an organization.
The function adds a new relationship between the unique identity
identified by 'uuid' and the given 'organization'. The given
identity and organization must exist prior to add this enrollment
in the registry. Otherwise, a 'NotFoundError' exception will be raised.
The period of the enrollment can be given with the parameters 'from_date'
and 'to_date', where "from_date <= to_date". Default values for these
dates are '1900-01-01' and '2100-01-01'.
If the given enrollment data is already in the registry, the function
will raise a 'AlreadyExistsError' exception.
:param db: database manager
:param uuid: unique identifier
:param organization: name of the organization
:param from_date: date when the enrollment starts
:param to_date: date when the enrollment ends
:raises NotFoundError: when either 'uuid' or 'organization' are not
found in the registry.
:raises InvalidValeError: raised in three cases, when either identity or
organization are None or empty strings; when "from_date" < 1900-01-01 or
"to_date" > 2100-01-01; when "from_date > to_date"
:raises AlreadyExistsError: raised when given enrollment already exists
in the registry.
"""
if uuid is None:
raise InvalidValueError('uuid cannot be None')
if uuid == '':
raise InvalidValueError('uuid cannot be an empty string')
if organization is None:
raise InvalidValueError('organization cannot be None')
if organization == '':
raise InvalidValueError('organization cannot be an empty string')
if not from_date:
from_date = MIN_PERIOD_DATE
if not to_date:
to_date = MAX_PERIOD_DATE
with db.connect() as session:
uidentity = find_unique_identity(session, uuid)
if not uidentity:
raise NotFoundError(entity=uuid)
org = find_organization(session, organization)
if not org:
raise NotFoundError(entity=organization)
try:
enroll_db(session, uidentity, org,
from_date=from_date, to_date=to_date)
except ValueError as e:
raise InvalidValueError(e) | [
"def",
"add_enrollment",
"(",
"db",
",",
"uuid",
",",
"organization",
",",
"from_date",
"=",
"None",
",",
"to_date",
"=",
"None",
")",
":",
"if",
"uuid",
"is",
"None",
":",
"raise",
"InvalidValueError",
"(",
"'uuid cannot be None'",
")",
"if",
"uuid",
"=="... | 38.542373 | 22.067797 |
def _to_dict(self):
"""
Converts object into a dictionary.
"""
for i, tag in enumerate(self.tags):
if tag in ("", None):
self.tags.pop(i)
data = {
'name': self.name,
'referenceId': self.reference_id,
'shortDescription': self.short_description,
'longDescription': self.long_description,
'itemState': self.item_state,
'linkURL': self.link_url,
'linkText': self.link_text,
'tags': self.tags,
'economics': self.economics,
'id': self.id,
'end_date': _make_tstamp(self.end_date),
'start_date': _make_tstamp(self.start_date)}
if len(self.renditions) > 0:
data['renditions'] = []
for r in self.renditions:
data['renditions'].append(r.to_dict())
if len(self.metadata) > 0:
data['customFields'] = {}
for meta in self.metadata:
data['customFields'][meta['key']] = meta['value']
[data.pop(key) for key in data.keys() if data[key] == None]
return data | [
"def",
"_to_dict",
"(",
"self",
")",
":",
"for",
"i",
",",
"tag",
"in",
"enumerate",
"(",
"self",
".",
"tags",
")",
":",
"if",
"tag",
"in",
"(",
"\"\"",
",",
"None",
")",
":",
"self",
".",
"tags",
".",
"pop",
"(",
"i",
")",
"data",
"=",
"{",
... | 36.548387 | 10.16129 |
def get_resource_metadata(self, resource=None):
"""
Get resource metadata
:param resource: The name of the resource to get metadata for
:return: list
"""
result = self._make_metadata_request(meta_id=0, metadata_type='METADATA-RESOURCE')
if resource:
result = next((item for item in result if item['ResourceID'] == resource), None)
return result | [
"def",
"get_resource_metadata",
"(",
"self",
",",
"resource",
"=",
"None",
")",
":",
"result",
"=",
"self",
".",
"_make_metadata_request",
"(",
"meta_id",
"=",
"0",
",",
"metadata_type",
"=",
"'METADATA-RESOURCE'",
")",
"if",
"resource",
":",
"result",
"=",
... | 41.1 | 20.7 |
def array_to_csv(array_like): # type: (np.array or Iterable or int or float) -> str
"""Convert an array like object to CSV.
To understand better what an array like object is see:
https://docs.scipy.org/doc/numpy/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays
Args:
array_like (np.array or Iterable or int or float): array like object to be converted to CSV.
Returns:
(str): object serialized to CSV
"""
stream = StringIO()
np.savetxt(stream, array_like, delimiter=',', fmt='%s')
return stream.getvalue() | [
"def",
"array_to_csv",
"(",
"array_like",
")",
":",
"# type: (np.array or Iterable or int or float) -> str",
"stream",
"=",
"StringIO",
"(",
")",
"np",
".",
"savetxt",
"(",
"stream",
",",
"array_like",
",",
"delimiter",
"=",
"','",
",",
"fmt",
"=",
"'%s'",
")",
... | 38.466667 | 28.333333 |
def light_general_attention(key, context, hidden_size, projected_align=False):
""" It is a implementation of the Luong et al. attention mechanism with general score. Based on the paper:
https://arxiv.org/abs/1508.04025 "Effective Approaches to Attention-based Neural Machine Translation"
Args:
key: A tensorflow tensor with dimensionality [None, None, key_size]
context: A tensorflow tensor with dimensionality [None, None, max_num_tokens, token_size]
hidden_size: Number of units in hidden representation
projected_align: Using dense layer for hidden representation of context.
If true, between input and attention mechanism insert a dense layer with dimensionality [hidden_size].
If false, a dense layer is not used.
Returns:
output: Tensor at the output with dimensionality [None, None, hidden_size]
"""
batch_size = tf.shape(context)[0]
max_num_tokens, token_size = context.get_shape().as_list()[-2:]
r_context = tf.reshape(context, shape=[-1, max_num_tokens, token_size])
# projected_key: [None, None, hidden_size]
projected_key = tf.layers.dense(key, hidden_size, kernel_initializer=xav())
r_projected_key = tf.reshape(projected_key, shape=[-1, hidden_size, 1])
# projected context: [None, None, hidden_size]
projected_context = \
tf.layers.dense(r_context, hidden_size, kernel_initializer=xav())
attn = tf.nn.softmax(tf.matmul(projected_context, r_projected_key), dim=1)
if projected_align:
log.info("Using projected attention alignment")
t_context = tf.transpose(projected_context, [0, 2, 1])
output = tf.reshape(tf.matmul(t_context, attn),
shape=[batch_size, -1, hidden_size])
else:
log.info("Using without projected attention alignment")
t_context = tf.transpose(r_context, [0, 2, 1])
output = tf.reshape(tf.matmul(t_context, attn),
shape=[batch_size, -1, token_size])
return output | [
"def",
"light_general_attention",
"(",
"key",
",",
"context",
",",
"hidden_size",
",",
"projected_align",
"=",
"False",
")",
":",
"batch_size",
"=",
"tf",
".",
"shape",
"(",
"context",
")",
"[",
"0",
"]",
"max_num_tokens",
",",
"token_size",
"=",
"context",
... | 52.605263 | 27.842105 |
def registerToDataTypes( cls ):
"""
Registers this class as a valid datatype for saving and loading via
the datatype system.
"""
from projexui.xdatatype import registerDataType
registerDataType(cls.__name__,
lambda pyvalue: pyvalue.toString(),
lambda qvariant: cls.fromString(unwrapVariant(qvariant))) | [
"def",
"registerToDataTypes",
"(",
"cls",
")",
":",
"from",
"projexui",
".",
"xdatatype",
"import",
"registerDataType",
"registerDataType",
"(",
"cls",
".",
"__name__",
",",
"lambda",
"pyvalue",
":",
"pyvalue",
".",
"toString",
"(",
")",
",",
"lambda",
"qvaria... | 40.666667 | 12.222222 |
def list_parse(name_list):
"""Parse a comma-separated list of values, or a filename (starting with @)
containing a list value on each line.
"""
if name_list and name_list[0] == '@':
value = name_list[1:]
if not os.path.exists(value):
log.warning('The file %s does not exist' % value)
return
try:
return [v.strip() for v in open(value, 'r').readlines()]
except IOError as e:
log.warning('reading %s failed: %s; ignoring this file' %
(value, e))
else:
return [v.strip() for v in name_list.split(',')] | [
"def",
"list_parse",
"(",
"name_list",
")",
":",
"if",
"name_list",
"and",
"name_list",
"[",
"0",
"]",
"==",
"'@'",
":",
"value",
"=",
"name_list",
"[",
"1",
":",
"]",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"value",
")",
":",
"log",
... | 36.176471 | 15.411765 |
def export_transcripts(adapter, build='37'):
"""Export all transcripts from the database
Args:
adapter(scout.adapter.MongoAdapter)
build(str)
Yields:
transcript(scout.models.Transcript)
"""
LOG.info("Exporting all transcripts")
for tx_obj in adapter.transcripts(build=build):
yield tx_obj | [
"def",
"export_transcripts",
"(",
"adapter",
",",
"build",
"=",
"'37'",
")",
":",
"LOG",
".",
"info",
"(",
"\"Exporting all transcripts\"",
")",
"for",
"tx_obj",
"in",
"adapter",
".",
"transcripts",
"(",
"build",
"=",
"build",
")",
":",
"yield",
"tx_obj"
] | 24.428571 | 16.857143 |
def p(self, value, event):
"""Return the conditional probability
P(X=value | parents=parent_values), where parent_values
are the values of parents in event. (event must assign each
parent a value.)
>>> bn = BayesNode('X', 'Burglary', {T: 0.2, F: 0.625})
>>> bn.p(False, {'Burglary': False, 'Earthquake': True})
0.375"""
assert isinstance(value, bool)
ptrue = self.cpt[event_values(event, self.parents)]
return if_(value, ptrue, 1 - ptrue) | [
"def",
"p",
"(",
"self",
",",
"value",
",",
"event",
")",
":",
"assert",
"isinstance",
"(",
"value",
",",
"bool",
")",
"ptrue",
"=",
"self",
".",
"cpt",
"[",
"event_values",
"(",
"event",
",",
"self",
".",
"parents",
")",
"]",
"return",
"if_",
"(",... | 46.181818 | 13.727273 |
def _StatusUpdateThreadMain(self):
"""Main function of the status update thread."""
while self._status_update_active:
# Make a local copy of the PIDs in case the dict is changed by
# the main thread.
for pid in list(self._process_information_per_pid.keys()):
self._CheckStatusAnalysisProcess(pid)
self._UpdateForemanProcessStatus()
if self._status_update_callback:
self._status_update_callback(self._processing_status)
time.sleep(self._STATUS_UPDATE_INTERVAL) | [
"def",
"_StatusUpdateThreadMain",
"(",
"self",
")",
":",
"while",
"self",
".",
"_status_update_active",
":",
"# Make a local copy of the PIDs in case the dict is changed by",
"# the main thread.",
"for",
"pid",
"in",
"list",
"(",
"self",
".",
"_process_information_per_pid",
... | 36.357143 | 16.5 |
def clear_data(self):
"""
Clear menu data from previous menu generation.
"""
self.__header.title = None
self.__header.subtitle = None
self.__prologue.text = None
self.__epilogue.text = None
self.__items_section.items = None | [
"def",
"clear_data",
"(",
"self",
")",
":",
"self",
".",
"__header",
".",
"title",
"=",
"None",
"self",
".",
"__header",
".",
"subtitle",
"=",
"None",
"self",
".",
"__prologue",
".",
"text",
"=",
"None",
"self",
".",
"__epilogue",
".",
"text",
"=",
"... | 31 | 5.888889 |
def write(self, data, mode='w'):
"""
Write data to the file.
`data` is the data to write
`mode` is the mode argument to pass to `open()`
"""
with open(self.path, mode) as f:
f.write(data) | [
"def",
"write",
"(",
"self",
",",
"data",
",",
"mode",
"=",
"'w'",
")",
":",
"with",
"open",
"(",
"self",
".",
"path",
",",
"mode",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"data",
")"
] | 26.666667 | 10.222222 |
def handle_get_vts_command(self, vt_et):
""" Handles <get_vts> command.
@return: Response string for <get_vts> command.
"""
vt_id = vt_et.attrib.get('vt_id')
vt_filter = vt_et.attrib.get('filter')
if vt_id and vt_id not in self.vts:
text = "Failed to find vulnerability test '{0}'".format(vt_id)
return simple_response_str('get_vts', 404, text)
filtered_vts = None
if vt_filter:
filtered_vts = self.vts_filter.get_filtered_vts_list(
self.vts, vt_filter)
responses = []
vts_xml = self.get_vts_xml(vt_id, filtered_vts)
responses.append(vts_xml)
return simple_response_str('get_vts', 200, 'OK', responses) | [
"def",
"handle_get_vts_command",
"(",
"self",
",",
"vt_et",
")",
":",
"vt_id",
"=",
"vt_et",
".",
"attrib",
".",
"get",
"(",
"'vt_id'",
")",
"vt_filter",
"=",
"vt_et",
".",
"attrib",
".",
"get",
"(",
"'filter'",
")",
"if",
"vt_id",
"and",
"vt_id",
"not... | 29.36 | 21.08 |
def fillNoneValues(column):
"""Fill all NaN/NaT values of a column with an empty string
Args:
column (pandas.Series): A Series object with all rows.
Returns:
column: Series with filled NaN values.
"""
if column.dtype == object:
column.fillna('', inplace=True)
return column | [
"def",
"fillNoneValues",
"(",
"column",
")",
":",
"if",
"column",
".",
"dtype",
"==",
"object",
":",
"column",
".",
"fillna",
"(",
"''",
",",
"inplace",
"=",
"True",
")",
"return",
"column"
] | 26 | 17.833333 |
def get_element_masses(self):
"""
Get the masses of elements in the package.
:returns: [kg] An array of element masses. The sequence of the elements
in the result corresponds with the sequence of elements in the
element list of the material.
"""
result = [0] * len(self.material.elements)
for compound in self.material.compounds:
c = self.get_compound_mass(compound)
f = [c * x for x in emf(compound, self.material.elements)]
result = [v+f[ix] for ix, v in enumerate(result)]
return result | [
"def",
"get_element_masses",
"(",
"self",
")",
":",
"result",
"=",
"[",
"0",
"]",
"*",
"len",
"(",
"self",
".",
"material",
".",
"elements",
")",
"for",
"compound",
"in",
"self",
".",
"material",
".",
"compounds",
":",
"c",
"=",
"self",
".",
"get_com... | 36.8125 | 19.3125 |
def cookie_signature(seed, *parts):
"""Generates a cookie signature."""
sha1 = hmac.new(seed, digestmod=hashlib.sha1)
for part in parts:
if part:
sha1.update(part)
return sha1.hexdigest() | [
"def",
"cookie_signature",
"(",
"seed",
",",
"*",
"parts",
")",
":",
"sha1",
"=",
"hmac",
".",
"new",
"(",
"seed",
",",
"digestmod",
"=",
"hashlib",
".",
"sha1",
")",
"for",
"part",
"in",
"parts",
":",
"if",
"part",
":",
"sha1",
".",
"update",
"(",... | 31 | 11.428571 |
def step_I_create_logrecords_with_table(context):
"""
Step definition that creates one more log records by using a table.
.. code-block: gherkin
When I create log records with:
| category | level | message |
| foo | ERROR | Hello Foo |
| foo.bar | WARN | Hello Foo.Bar |
Table description
------------------
| Column | Type | Required | Description |
| category | string | yes | Category (or logger) to use. |
| level | LogLevel | yes | Log level to use. |
| message | string | yes | Log message to use. |
.. code-block: python
import logging
from behave.configuration import LogLevel
for row in table.rows:
logger = logging.getLogger(row.category)
level = LogLevel.parse_type(row.level)
logger.log(level, row.message)
"""
assert context.table, "REQUIRE: context.table"
context.table.require_columns(["category", "level", "message"])
for row in context.table.rows:
category = row["category"]
if category == "__ROOT__":
category = None
level = LogLevel.parse_type(row["level"])
message = row["message"]
make_log_record(category, level, message) | [
"def",
"step_I_create_logrecords_with_table",
"(",
"context",
")",
":",
"assert",
"context",
".",
"table",
",",
"\"REQUIRE: context.table\"",
"context",
".",
"table",
".",
"require_columns",
"(",
"[",
"\"category\"",
",",
"\"level\"",
",",
"\"message\"",
"]",
")",
... | 34.216216 | 16.216216 |
def get_obj_class(self, obj_type):
""" Returns the object class based on parent and object types.
In most cases the object class can be derived from object type alone but sometimes the
same object type name is used for different object types so the parent (or even
grandparent) type is required in order to determine the exact object type.
For example, interface object type can be child of vport or router (ospf etc.). In the
first case the required class is IxnInterface while in the later case it is IxnObject.
:param obj_type: IXN object type.
:return: object class if specific class else IxnObject.
"""
if obj_type in IxnObject.str_2_class:
if type(IxnObject.str_2_class[obj_type]) is dict:
if self.obj_type() in IxnObject.str_2_class[obj_type]:
return IxnObject.str_2_class[obj_type][self.obj_type()]
elif self.obj_parent().obj_type() in IxnObject.str_2_class[obj_type]:
return IxnObject.str_2_class[obj_type][self.obj_parent().obj_type()]
else:
return IxnObject.str_2_class[obj_type]
return IxnObject | [
"def",
"get_obj_class",
"(",
"self",
",",
"obj_type",
")",
":",
"if",
"obj_type",
"in",
"IxnObject",
".",
"str_2_class",
":",
"if",
"type",
"(",
"IxnObject",
".",
"str_2_class",
"[",
"obj_type",
"]",
")",
"is",
"dict",
":",
"if",
"self",
".",
"obj_type",... | 54.045455 | 29 |
def batch(self, table_name, timeout=None):
'''
Creates a batch object which can be used as a context manager. Commits the batch on exit.
:param str table_name:
The name of the table to commit the batch to.
:param int timeout:
The server timeout, expressed in seconds.
'''
batch = TableBatch()
yield batch
self.commit_batch(table_name, batch, timeout=timeout) | [
"def",
"batch",
"(",
"self",
",",
"table_name",
",",
"timeout",
"=",
"None",
")",
":",
"batch",
"=",
"TableBatch",
"(",
")",
"yield",
"batch",
"self",
".",
"commit_batch",
"(",
"table_name",
",",
"batch",
",",
"timeout",
"=",
"timeout",
")"
] | 36.333333 | 22 |
def run_filter_radia(job, bams, radia_file, univ_options, radia_options, chrom):
"""
This module will run filterradia on the RNA and DNA bams.
ARGUMENTS
1. bams: REFER ARGUMENTS of run_radia()
2. univ_options: REFER ARGUMENTS of run_radia()
3. radia_file: <JSid of vcf generated by run_radia()>
3. radia_options: REFER ARGUMENTS of run_radia()
4. chrom: REFER ARGUMENTS of run_radia()
RETURN VALUES
1. Dict of filtered radia output vcf and logfile
|- 'radia_filtered_CHROM.vcf': <JSid>
+- 'radia_filtered_CHROM_radia.log': <JSid>
"""
job.fileStore.logToMaster('Running filter-radia on %s:%s' % (univ_options['patient'], chrom))
work_dir = job.fileStore.getLocalTempDir()
input_files = {
'rna.bam': bams['tumor_rna'],
'rna.bam.bai': bams['tumor_rnai'],
'tumor.bam': bams['tumor_dna'],
'tumor.bam.bai': bams['tumor_dnai'],
'normal.bam': bams['normal_dna'],
'normal.bam.bai': bams['normal_dnai'],
'radia.vcf': radia_file,
'genome.fasta': radia_options['genome_fasta'],
'genome.fasta.fai': radia_options['genome_fai']
}
input_files = get_files_from_filestore(job, input_files, work_dir,
docker=True)
filterradia_output = ''.join(['radia_filtered_', chrom, '.vcf'])
filterradia_log = ''.join([work_dir, '/radia_filtered_', chrom, '_radia.log'
])
parameters = [univ_options['patient'], # shortID
chrom.lstrip('chr'),
input_files['radia.vcf'],
'/data',
'/home/radia/scripts',
'-b', '/home/radia/data/hg19/blacklists/1000Genomes/phase1/',
'-d', '/home/radia/data/hg19/snp135',
'-r', '/home/radia/data/hg19/retroGenes/',
'-p', '/home/radia/data/hg19/pseudoGenes/',
'-c', '/home/radia/data/hg19/cosmic/',
'-t', '/home/radia/data/hg19/gaf/2_1',
'--noSnpEff',
'--rnaGeneBlckFile', '/home/radia/data/rnaGeneBlacklist.tab',
'--rnaGeneFamilyBlckFile',
'/home/radia/data/rnaGeneFamilyBlacklist.tab',
'-f', input_files['genome.fasta'],
'--log=INFO',
'-g', docker_path(filterradia_log)]
docker_call(tool='filterradia', tool_parameters=parameters,
work_dir=work_dir, dockerhub=univ_options['dockerhub'])
output_files = defaultdict()
output_files[filterradia_output] = \
job.fileStore.writeGlobalFile(''.join([work_dir, '/',
univ_options['patient'], '_',
chrom, '.vcf']))
output_files[os.path.basename(filterradia_log)] = \
job.fileStore.writeGlobalFile(filterradia_log)
return output_files | [
"def",
"run_filter_radia",
"(",
"job",
",",
"bams",
",",
"radia_file",
",",
"univ_options",
",",
"radia_options",
",",
"chrom",
")",
":",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Running filter-radia on %s:%s'",
"%",
"(",
"univ_options",
"[",
"'patien... | 46.854839 | 16.33871 |
def _reflect_table(self):
"""Load the tables definition from the database."""
with self.db.lock:
try:
self._table = SQLATable(self.name,
self.db.metadata,
schema=self.db.schema,
autoload=True)
except NoSuchTableError:
pass | [
"def",
"_reflect_table",
"(",
"self",
")",
":",
"with",
"self",
".",
"db",
".",
"lock",
":",
"try",
":",
"self",
".",
"_table",
"=",
"SQLATable",
"(",
"self",
".",
"name",
",",
"self",
".",
"db",
".",
"metadata",
",",
"schema",
"=",
"self",
".",
... | 40.5 | 14 |
def _process_cell(i, state, finite=False):
"""Process 3 cells and return a value from 0 to 7. """
op_1 = state[i - 1]
op_2 = state[i]
if i == len(state) - 1:
if finite:
op_3 = state[0]
else:
op_3 = 0
else:
op_3 = state[i + 1]
result = 0
for i, val in enumerate([op_3, op_2, op_1]):
if val:
result += 2**i
return result | [
"def",
"_process_cell",
"(",
"i",
",",
"state",
",",
"finite",
"=",
"False",
")",
":",
"op_1",
"=",
"state",
"[",
"i",
"-",
"1",
"]",
"op_2",
"=",
"state",
"[",
"i",
"]",
"if",
"i",
"==",
"len",
"(",
"state",
")",
"-",
"1",
":",
"if",
"finite... | 25.1875 | 17.1875 |
def scale_0to1(image_in,
exclude_outliers_below=False,
exclude_outliers_above=False):
"""Scale the two images to [0, 1] based on min/max from both.
Parameters
-----------
image_in : ndarray
Input image
exclude_outliers_{below,above} : float
Lower/upper limit, a value between 0 and 100.
Returns
-------
scaled_image : ndarray
clipped and/or scaled image
"""
min_value = image_in.min()
max_value = image_in.max()
# making a copy to ensure no side-effects
image = image_in.copy()
if exclude_outliers_below:
perctl = float(exclude_outliers_below)
image[image < np.percentile(image, perctl)] = min_value
if exclude_outliers_above:
perctl = float(exclude_outliers_above)
image[image > np.percentile(image, 100.0 - perctl)] = max_value
image = (image - min_value) / (max_value - min_value)
return image | [
"def",
"scale_0to1",
"(",
"image_in",
",",
"exclude_outliers_below",
"=",
"False",
",",
"exclude_outliers_above",
"=",
"False",
")",
":",
"min_value",
"=",
"image_in",
".",
"min",
"(",
")",
"max_value",
"=",
"image_in",
".",
"max",
"(",
")",
"# making a copy t... | 25.666667 | 20.333333 |
def fetchImageUrl(self, image_id):
"""Fetches the url to the original image from an image attachment ID
:param image_id: The image you want to fethc
:type image_id: str
:return: An url where you can download the original image
:rtype: str
:raises: FBchatException if request failed
"""
image_id = str(image_id)
data = {"photo_id": str(image_id)}
j = self._get(
ReqUrl.ATTACHMENT_PHOTO, query=data, fix_request=True, as_json=True
)
url = get_jsmods_require(j, 3)
if url is None:
raise FBchatException("Could not fetch image url from: {}".format(j))
return url | [
"def",
"fetchImageUrl",
"(",
"self",
",",
"image_id",
")",
":",
"image_id",
"=",
"str",
"(",
"image_id",
")",
"data",
"=",
"{",
"\"photo_id\"",
":",
"str",
"(",
"image_id",
")",
"}",
"j",
"=",
"self",
".",
"_get",
"(",
"ReqUrl",
".",
"ATTACHMENT_PHOTO"... | 35.684211 | 18.263158 |
def build_url_field(self, field_name, model_class):
"""
Create a field representing the object's own URL.
"""
field_class = self.serializer_url_field
field_kwargs = get_url_kwargs(model_class)
return field_class, field_kwargs | [
"def",
"build_url_field",
"(",
"self",
",",
"field_name",
",",
"model_class",
")",
":",
"field_class",
"=",
"self",
".",
"serializer_url_field",
"field_kwargs",
"=",
"get_url_kwargs",
"(",
"model_class",
")",
"return",
"field_class",
",",
"field_kwargs"
] | 33.375 | 10.625 |
def write(self, document, obj, *args, **kwargs):
"""
Returns a Deferred that fire the factory result
that should be the document.
"""
try:
document = IWritableDocument(document)
mime_type = document.mime_type
writer = self.lookup_writer(mime_type, obj)
if not writer:
msg = ("No adapter found to write object %s to %s document"
% (obj.__class__.__name__, mime_type))
raise NoWriterFoundError(msg)
return writer.write(document, obj, *args, **kwargs)
except:
return defer.fail(Failure()) | [
"def",
"write",
"(",
"self",
",",
"document",
",",
"obj",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"document",
"=",
"IWritableDocument",
"(",
"document",
")",
"mime_type",
"=",
"document",
".",
"mime_type",
"writer",
"=",
"self"... | 40.3125 | 12.8125 |
def splitFASTA(params):
"""
Read the FASTA file named params['fastaFile'] and print out its
sequences into files named 0.fasta, 1.fasta, etc. with
params['seqsPerJob'] sequences per file.
"""
assert params['fastaFile'][-1] == 'a', ('You must specify a file in '
'fasta-format that ends in '
'.fasta')
fileCount = count = seqCount = 0
outfp = None
with open(params['fastaFile']) as infp:
for seq in SeqIO.parse(infp, 'fasta'):
seqCount += 1
if count == params['seqsPerJob']:
outfp.close()
count = 0
if count == 0:
outfp = open('%d.fasta' % fileCount, 'w')
fileCount += 1
count += 1
outfp.write('>%s\n%s\n' % (seq.description, str(seq.seq)))
outfp.close()
return fileCount, seqCount | [
"def",
"splitFASTA",
"(",
"params",
")",
":",
"assert",
"params",
"[",
"'fastaFile'",
"]",
"[",
"-",
"1",
"]",
"==",
"'a'",
",",
"(",
"'You must specify a file in '",
"'fasta-format that ends in '",
"'.fasta'",
")",
"fileCount",
"=",
"count",
"=",
"seqCount",
... | 36.84 | 15.56 |
def delete_patch(self, patch_name=None, remove=False, backup=False):
""" Delete specified patch from the series
If remove is True the patch file will also be removed. If remove and
backup are True a copy of the deleted patch file will be made.
"""
if patch_name:
patch = Patch(patch_name)
else:
patch = self.db.top_patch()
if not patch:
raise NoAppliedPatch(self.db)
self._delete_patch(patch, remove=remove, backup=backup) | [
"def",
"delete_patch",
"(",
"self",
",",
"patch_name",
"=",
"None",
",",
"remove",
"=",
"False",
",",
"backup",
"=",
"False",
")",
":",
"if",
"patch_name",
":",
"patch",
"=",
"Patch",
"(",
"patch_name",
")",
"else",
":",
"patch",
"=",
"self",
".",
"d... | 39.923077 | 17.384615 |
def serialize_dict(self, value):
"""
Ensure that all values of a dictionary are properly serialized
:param value:
:return:
"""
# Check if this is a dict
if not isinstance(value, dict):
return value
# Loop over all the values and serialize them
return {
dict_key: self.serialize_value(dict_value)
for dict_key, dict_value in value.items()
} | [
"def",
"serialize_dict",
"(",
"self",
",",
"value",
")",
":",
"# Check if this is a dict",
"if",
"not",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"return",
"value",
"# Loop over all the values and serialize them",
"return",
"{",
"dict_key",
":",
"self",
"... | 27.625 | 17.5 |
def first_return():
"""Generate a random walk and return its length upto the moment
that the walker first returns to the origin.
It is mathematically provable that the walker will eventually return,
meaning that the function call will halt, although it may take
a *very* long time and your computer may run out of memory!
Thus, try this interactively only.
"""
walk = randwalk() >> drop(1) >> takewhile(lambda v: v != Origin) >> list
return len(walk) | [
"def",
"first_return",
"(",
")",
":",
"walk",
"=",
"randwalk",
"(",
")",
">>",
"drop",
"(",
"1",
")",
">>",
"takewhile",
"(",
"lambda",
"v",
":",
"v",
"!=",
"Origin",
")",
">>",
"list",
"return",
"len",
"(",
"walk",
")"
] | 40.909091 | 18.181818 |
def _send_string_clipboard(self, string: str, paste_command: model.SendMode):
"""
Use the clipboard to send a string.
"""
backup = self.clipboard.text # Keep a backup of current content, to restore the original afterwards.
if backup is None:
logger.warning("Tried to backup the X clipboard content, but got None instead of a string.")
self.clipboard.text = string
try:
self.mediator.send_string(paste_command.value)
finally:
self.ungrab_keyboard()
# Because send_string is queued, also enqueue the clipboard restore, to keep the proper action ordering.
self.__enqueue(self._restore_clipboard_text, backup) | [
"def",
"_send_string_clipboard",
"(",
"self",
",",
"string",
":",
"str",
",",
"paste_command",
":",
"model",
".",
"SendMode",
")",
":",
"backup",
"=",
"self",
".",
"clipboard",
".",
"text",
"# Keep a backup of current content, to restore the original afterwards.",
"if... | 50.642857 | 25.642857 |
def disconnect(self):
"""Disconnect from the socket."""
if self.pipeline:
self._send(*self.pipeline)
self.pipeline = None | [
"def",
"disconnect",
"(",
"self",
")",
":",
"if",
"self",
".",
"pipeline",
":",
"self",
".",
"_send",
"(",
"*",
"self",
".",
"pipeline",
")",
"self",
".",
"pipeline",
"=",
"None"
] | 30.6 | 9.6 |
def get_as_datetime_with_default(self, index, default_value):
"""
Converts array element into a Date or returns default value if conversion is not possible.
:param index: an index of element to get.
:param default_value: the default value
:return: Date value ot the element or default value if conversion is not supported.
"""
value = self[index]
return DateTimeConverter.to_datetime_with_default(value, default_value) | [
"def",
"get_as_datetime_with_default",
"(",
"self",
",",
"index",
",",
"default_value",
")",
":",
"value",
"=",
"self",
"[",
"index",
"]",
"return",
"DateTimeConverter",
".",
"to_datetime_with_default",
"(",
"value",
",",
"default_value",
")"
] | 39.5 | 26.5 |
def health_check(self):
"""Gets a single item to determine if Dynamo is functioning."""
logger.debug('Health Check on Table: {namespace}'.format(
namespace=self.namespace
))
try:
self.get_all()
return True
except ClientError as e:
logger.exception(e)
logger.error('Error encountered with Database. Assume unhealthy')
return False | [
"def",
"health_check",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"'Health Check on Table: {namespace}'",
".",
"format",
"(",
"namespace",
"=",
"self",
".",
"namespace",
")",
")",
"try",
":",
"self",
".",
"get_all",
"(",
")",
"return",
"True",
"ex... | 30.714286 | 20.357143 |
def walk_paths(self,
base: Optional[pathlib.PurePath] = pathlib.PurePath()) \
-> Iterator[pathlib.PurePath]:
"""
Recursively traverse all paths inside this entity, including the entity
itself.
:param base: The base path to prepend to the entity name.
:return: An iterator of paths.
"""
raise NotImplementedError() | [
"def",
"walk_paths",
"(",
"self",
",",
"base",
":",
"Optional",
"[",
"pathlib",
".",
"PurePath",
"]",
"=",
"pathlib",
".",
"PurePath",
"(",
")",
")",
"->",
"Iterator",
"[",
"pathlib",
".",
"PurePath",
"]",
":",
"raise",
"NotImplementedError",
"(",
")"
] | 35.545455 | 17.545455 |
def move_layer_down(self):
"""Move the layer down."""
layer = self.list_layers_in_map_report.selectedItems()[0]
index = self.list_layers_in_map_report.indexFromItem(layer).row()
item = self.list_layers_in_map_report.takeItem(index)
self.list_layers_in_map_report.insertItem(index + 1, item)
self.list_layers_in_map_report.item(index + 1).setSelected(True) | [
"def",
"move_layer_down",
"(",
"self",
")",
":",
"layer",
"=",
"self",
".",
"list_layers_in_map_report",
".",
"selectedItems",
"(",
")",
"[",
"0",
"]",
"index",
"=",
"self",
".",
"list_layers_in_map_report",
".",
"indexFromItem",
"(",
"layer",
")",
".",
"row... | 56.714286 | 21.571429 |
def mkdtemp(suffix="", prefix=template, dir=None):
"""User-callable function to create and return a unique temporary
directory. The return value is the pathname of the directory.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
The directory is readable, writable, and searchable only by the
creating user.
Caller is responsible for deleting the directory when done with it.
"""
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, prefix + name + suffix)
try:
_os.mkdir(file, 0o700)
return file
except FileExistsError:
continue # try again
raise FileExistsError(_errno.EEXIST,
"No usable temporary directory name found") | [
"def",
"mkdtemp",
"(",
"suffix",
"=",
"\"\"",
",",
"prefix",
"=",
"template",
",",
"dir",
"=",
"None",
")",
":",
"if",
"dir",
"is",
"None",
":",
"dir",
"=",
"gettempdir",
"(",
")",
"names",
"=",
"_get_candidate_names",
"(",
")",
"for",
"seq",
"in",
... | 29.965517 | 21.482759 |
def _check_type(self):
"""Check that point value types match the descriptor type."""
check_type = metric_descriptor.MetricDescriptorType.to_type_class(
self.descriptor.type)
for ts in self.time_series:
if not ts.check_points_type(check_type):
raise ValueError("Invalid point value type") | [
"def",
"_check_type",
"(",
"self",
")",
":",
"check_type",
"=",
"metric_descriptor",
".",
"MetricDescriptorType",
".",
"to_type_class",
"(",
"self",
".",
"descriptor",
".",
"type",
")",
"for",
"ts",
"in",
"self",
".",
"time_series",
":",
"if",
"not",
"ts",
... | 49.285714 | 13.714286 |
def choices(self):
"""Menu options for new configuration files
"""
print("| {0}K{1}{2}eep the old and .new files, no changes".format(
self.red, self.endc, self.br))
print("| {0}O{1}{2}verwrite all old configuration files with new "
"ones".format(self.red, self.endc, self.br))
print("| The old files will be saved with suffix .old")
print("| {0}R{1}{2}emove all .new files".format(
self.red, self.endc, self.br))
print("| {0}P{1}{2}rompt K, O, R, D, M option for each single "
"file".format(self.red, self.endc, self.br))
print("| {0}Q{1}{2}uit from menu".format(self.red, self.endc, self.br))
self.msg.template(78)
try:
choose = raw_input("\nWhat would you like to do [K/O/R/P/Q]? ")
except EOFError:
print("")
raise SystemExit()
print("")
if choose in ("K", "k"):
self.keep()
elif choose in ("O", "o"):
self.overwrite_all()
elif choose in ("R", "r"):
self.remove_all()
elif choose in ("P", "p"):
self.prompt() | [
"def",
"choices",
"(",
"self",
")",
":",
"print",
"(",
"\"| {0}K{1}{2}eep the old and .new files, no changes\"",
".",
"format",
"(",
"self",
".",
"red",
",",
"self",
".",
"endc",
",",
"self",
".",
"br",
")",
")",
"print",
"(",
"\"| {0}O{1}{2}verwrite all old con... | 41.035714 | 16.392857 |
def param_value_encode(self, param_id, param_value, param_type, param_count, param_index):
'''
Emit the value of a onboard parameter. The inclusion of param_count
and param_index in the message allows the recipient to
keep track of received parameters and allows him to
re-request missing parameters after a loss or timeout.
param_id : Onboard parameter id, terminated by NULL if the length is less than 16 human-readable chars and WITHOUT null termination (NULL) byte if the length is exactly 16 chars - applications have to provide 16+1 bytes storage if the ID is stored as string (char)
param_value : Onboard parameter value (float)
param_type : Onboard parameter type: see the MAV_PARAM_TYPE enum for supported data types. (uint8_t)
param_count : Total number of onboard parameters (uint16_t)
param_index : Index of this onboard parameter (uint16_t)
'''
return MAVLink_param_value_message(param_id, param_value, param_type, param_count, param_index) | [
"def",
"param_value_encode",
"(",
"self",
",",
"param_id",
",",
"param_value",
",",
"param_type",
",",
"param_count",
",",
"param_index",
")",
":",
"return",
"MAVLink_param_value_message",
"(",
"param_id",
",",
"param_value",
",",
"param_type",
",",
"param_count",
... | 80.466667 | 56.733333 |
def convert(qlr, images, label, **kwargs):
r"""Converts one or more images to a raster instruction file.
:param qlr:
An instance of the BrotherQLRaster class
:type qlr: :py:class:`brother_ql.raster.BrotherQLRaster`
:param images:
The images to be converted. They can be filenames or instances of Pillow's Image.
:type images: list(PIL.Image.Image) or list(str) images
:param str label:
Type of label the printout should be on.
:param \**kwargs:
See below
:Keyword Arguments:
* **cut** (``bool``) --
Enable cutting after printing the labels.
* **dither** (``bool``) --
Instead of applying a threshold to the pixel values, approximate grey tones with dithering.
* **compress**
* **red**
* **rotate**
* **dpi_600**
* **hq**
* **threshold**
"""
label_specs = label_type_specs[label]
dots_printable = label_specs['dots_printable']
right_margin_dots = label_specs['right_margin_dots']
right_margin_dots += right_margin_addition.get(qlr.model, 0)
device_pixel_width = qlr.get_pixel_width()
cut = kwargs.get('cut', True)
dither = kwargs.get('dither', False)
compress = kwargs.get('compress', False)
red = kwargs.get('red', False)
rotate = kwargs.get('rotate', 'auto')
if rotate != 'auto': rotate = int(rotate)
dpi_600 = kwargs.get('dpi_600', False)
hq = kwargs.get('hq', True)
threshold = kwargs.get('threshold', 70)
threshold = 100.0 - threshold
threshold = min(255, max(0, int(threshold/100.0 * 255)))
if red and not qlr.two_color_support:
raise BrotherQLUnsupportedCmd('Printing in red is not supported with the selected model.')
try:
qlr.add_switch_mode()
except BrotherQLUnsupportedCmd:
pass
qlr.add_invalidate()
qlr.add_initialize()
try:
qlr.add_switch_mode()
except BrotherQLUnsupportedCmd:
pass
for image in images:
if isinstance(image, Image.Image):
im = image
else:
try:
im = Image.open(image)
except:
raise NotImplementedError("The image argument needs to be an Image() instance, the filename to an image, or a file handle.")
if im.mode.endswith('A'):
# place in front of white background and get red of transparency
bg = Image.new("RGB", im.size, (255,255,255))
bg.paste(im, im.split()[-1])
im = bg
elif im.mode == "P":
# Convert GIF ("P") to RGB
im = im.convert("RGB" if red else "L")
elif im.mode == "L" and red:
# Convert greyscale to RGB if printing on black/red tape
im = im.convert("RGB")
if dpi_600:
dots_expected = [el*2 for el in dots_printable]
else:
dots_expected = dots_printable
if label_specs['kind'] == ENDLESS_LABEL:
if rotate not in ('auto', 0):
im = im.rotate(rotate, expand=True)
if dpi_600:
im = im.resize((im.size[0]//2, im.size[1]))
if im.size[0] != dots_printable[0]:
hsize = int((dots_printable[0] / im.size[0]) * im.size[1])
im = im.resize((dots_printable[0], hsize), Image.ANTIALIAS)
logger.warning('Need to resize the image...')
if im.size[0] < device_pixel_width:
new_im = Image.new(im.mode, (device_pixel_width, im.size[1]), (255,)*len(im.mode))
new_im.paste(im, (device_pixel_width-im.size[0]-right_margin_dots, 0))
im = new_im
elif label_specs['kind'] in (DIE_CUT_LABEL, ROUND_DIE_CUT_LABEL):
if rotate == 'auto':
if im.size[0] == dots_expected[1] and im.size[1] == dots_expected[0]:
im = im.rotate(90, expand=True)
elif rotate != 0:
im = im.rotate(rotate, expand=True)
if im.size[0] != dots_expected[0] or im.size[1] != dots_expected[1]:
raise ValueError("Bad image dimensions: %s. Expecting: %s." % (im.size, dots_expected))
if dpi_600:
im = im.resize((im.size[0]//2, im.size[1]))
new_im = Image.new(im.mode, (device_pixel_width, dots_expected[1]), (255,)*len(im.mode))
new_im.paste(im, (device_pixel_width-im.size[0]-right_margin_dots, 0))
im = new_im
if red:
filter_h = lambda h: 255 if (h < 40 or h > 210) else 0
filter_s = lambda s: 255 if s > 100 else 0
filter_v = lambda v: 255 if v > 80 else 0
red_im = filtered_hsv(im, filter_h, filter_s, filter_v)
red_im = red_im.convert("L")
red_im = PIL.ImageOps.invert(red_im)
red_im = red_im.point(lambda x: 0 if x < threshold else 255, mode="1")
filter_h = lambda h: 255
filter_s = lambda s: 255
filter_v = lambda v: 255 if v < 80 else 0
black_im = filtered_hsv(im, filter_h, filter_s, filter_v)
black_im = black_im.convert("L")
black_im = PIL.ImageOps.invert(black_im)
black_im = black_im.point(lambda x: 0 if x < threshold else 255, mode="1")
black_im = PIL.ImageChops.subtract(black_im, red_im)
else:
im = im.convert("L")
im = PIL.ImageOps.invert(im)
if dither:
im = im.convert("1", dither=Image.FLOYDSTEINBERG)
else:
im = im.point(lambda x: 0 if x < threshold else 255, mode="1")
qlr.add_status_information()
tape_size = label_specs['tape_size']
if label_specs['kind'] in (DIE_CUT_LABEL, ROUND_DIE_CUT_LABEL):
qlr.mtype = 0x0B
qlr.mwidth = tape_size[0]
qlr.mlength = tape_size[1]
else:
qlr.mtype = 0x0A
qlr.mwidth = tape_size[0]
qlr.mlength = 0
qlr.pquality = int(hq)
qlr.add_media_and_quality(im.size[1])
try:
if cut:
qlr.add_autocut(True)
qlr.add_cut_every(1)
except BrotherQLUnsupportedCmd:
pass
try:
qlr.dpi_600 = dpi_600
qlr.cut_at_end = cut
qlr.two_color_printing = True if red else False
qlr.add_expanded_mode()
except BrotherQLUnsupportedCmd:
pass
qlr.add_margins(label_specs['feed_margin'])
try:
if compress: qlr.add_compression(True)
except BrotherQLUnsupportedCmd:
pass
if red:
qlr.add_raster_data(black_im, red_im)
else:
qlr.add_raster_data(im)
qlr.add_print()
return qlr.data | [
"def",
"convert",
"(",
"qlr",
",",
"images",
",",
"label",
",",
"*",
"*",
"kwargs",
")",
":",
"label_specs",
"=",
"label_type_specs",
"[",
"label",
"]",
"dots_printable",
"=",
"label_specs",
"[",
"'dots_printable'",
"]",
"right_margin_dots",
"=",
"label_specs"... | 38.045714 | 19.371429 |
def remove_first(self):
"""Removes first
:return: True iff head has been removed
"""
if self.head is None:
return False
self.head = self.head.next_node
return True | [
"def",
"remove_first",
"(",
"self",
")",
":",
"if",
"self",
".",
"head",
"is",
"None",
":",
"return",
"False",
"self",
".",
"head",
"=",
"self",
".",
"head",
".",
"next_node",
"return",
"True"
] | 19.636364 | 17.545455 |
def text(self):
"""
Return the String assosicated with the current text
"""
if self.m_name == -1 or self.m_event != const.TEXT:
return u''
return self.sb[self.m_name] | [
"def",
"text",
"(",
"self",
")",
":",
"if",
"self",
".",
"m_name",
"==",
"-",
"1",
"or",
"self",
".",
"m_event",
"!=",
"const",
".",
"TEXT",
":",
"return",
"u''",
"return",
"self",
".",
"sb",
"[",
"self",
".",
"m_name",
"]"
] | 26.5 | 15.75 |
def fit_quantile(self, X, y, quantile, max_iter=20, tol=0.01, weights=None):
"""fit ExpectileGAM to a desired quantile via binary search
Parameters
----------
X : array-like, shape (n_samples, m_features)
Training vectors, where n_samples is the number of samples
and m_features is the number of features.
y : array-like, shape (n_samples,)
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
quantile : float on (0, 1)
desired quantile to fit.
max_iter : int, default: 20
maximum number of binary search iterations to perform
tol : float > 0, default: 0.01
maximum distance between desired quantile and fitted quantile
weights : array-like shape (n_samples,) or None, default: None
containing sample weights
if None, defaults to array of ones
Returns
-------
self : fitted GAM object
"""
def _within_tol(a, b, tol):
return np.abs(a - b) <= tol
# validate arguments
if quantile <= 0 or quantile >= 1:
raise ValueError('quantile must be on (0, 1), but found {}'.format(quantile))
if tol <= 0:
raise ValueError('tol must be float > 0 {}'.format(tol))
if max_iter <= 0:
raise ValueError('max_iter must be int > 0 {}'.format(max_iter))
# perform a first fit if necessary
if not self._is_fitted:
self.fit(X, y, weights=weights)
# do binary search
max_ = 1.0
min_ = 0.0
n_iter = 0
while n_iter < max_iter:
ratio = self._get_quantile_ratio(X, y)
if _within_tol(ratio, quantile, tol):
break
if ratio < quantile:
min_ = self.expectile
else:
max_ = self.expectile
expectile = (max_ + min_) / 2.
self.set_params(expectile=expectile)
self.fit(X, y, weights=weights)
n_iter += 1
# print diagnostics
if not _within_tol(ratio, quantile, tol) and self.verbose:
warnings.warn('maximum iterations reached')
return self | [
"def",
"fit_quantile",
"(",
"self",
",",
"X",
",",
"y",
",",
"quantile",
",",
"max_iter",
"=",
"20",
",",
"tol",
"=",
"0.01",
",",
"weights",
"=",
"None",
")",
":",
"def",
"_within_tol",
"(",
"a",
",",
"b",
",",
"tol",
")",
":",
"return",
"np",
... | 33.028986 | 19.608696 |
def p_if_else(p):
""" statement : if_then_part NEWLINE program_co else_part
"""
cond_ = p[1]
then_ = p[3]
else_ = p[4][0]
endif = p[4][1]
p[0] = make_sentence('IF', cond_, then_, make_block(else_, endif), lineno=p.lineno(2)) | [
"def",
"p_if_else",
"(",
"p",
")",
":",
"cond_",
"=",
"p",
"[",
"1",
"]",
"then_",
"=",
"p",
"[",
"3",
"]",
"else_",
"=",
"p",
"[",
"4",
"]",
"[",
"0",
"]",
"endif",
"=",
"p",
"[",
"4",
"]",
"[",
"1",
"]",
"p",
"[",
"0",
"]",
"=",
"ma... | 30.625 | 20.375 |
def lastgenome(args):
"""
%prog genome_A.fasta genome_B.fasta
Run LAST by calling LASTDB, LASTAL and LAST-SPLIT. The recipe is based on
tutorial here:
<https://github.com/mcfrith/last-genome-alignments>
The script runs the following steps:
$ lastdb -P0 -uNEAR -R01 Chr10A-NEAR Chr10A.fa
$ lastal -E0.05 -C2 Chr10A-NEAR Chr10B.fa | last-split -m1 | maf-swap | last-split -m1 -fMAF > Chr10A.Chr10B.1-1.maf
$ maf-convert -n blasttab Chr10A.Chr10B.1-1.maf > Chr10A.Chr10B.1-1.blast
Works with LAST v959.
"""
from jcvi.apps.grid import MakeManager
p = OptionParser(lastgenome.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
gA, gB = args
mm = MakeManager()
bb = lambda x : op.basename(x).rsplit(".", 1)[0]
gA_pf, gB_pf = bb(gA), bb(gB)
# Build LASTDB
dbname = "-".join((gA_pf, "NEAR"))
dbfile = dbname + ".suf"
build_db_cmd = "lastdb -P0 -uNEAR -R01 {} {}".format(dbfile, gA)
mm.add(gA, dbfile, build_db_cmd)
# Run LASTAL
maffile = "{}.{}.1-1.maf".format(gA_pf, gB_pf)
lastal_cmd = "lastal -E0.05 -C2 {} {}".format(dbname, gB)
lastal_cmd += " | last-split -m1"
lastal_cmd += " | maf-swap"
lastal_cmd += " | last-split -m1 -fMAF > {}".format(maffile)
mm.add([dbfile, gB], maffile, lastal_cmd)
# Convert to BLAST format
blastfile = maffile.replace(".maf", ".blast")
convert_cmd = "maf-convert -n blasttab {} > {}".format(maffile, blastfile)
mm.add(maffile, blastfile, convert_cmd)
mm.write() | [
"def",
"lastgenome",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"apps",
".",
"grid",
"import",
"MakeManager",
"p",
"=",
"OptionParser",
"(",
"lastgenome",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
... | 31.428571 | 20.734694 |
def init(self):
"""
Init or reset the virtual device.
:rtype: str
:return: The initial response of the virtual device.
"""
self.logged_in = False
if self.login_type == self.LOGIN_TYPE_PASSWORDONLY:
self.prompt_stage = self.PROMPT_STAGE_PASSWORD
elif self.login_type == self.LOGIN_TYPE_NONE:
self.prompt_stage = self.PROMPT_STAGE_CUSTOM
else:
self.prompt_stage = self.PROMPT_STAGE_USERNAME
return self.banner + self._get_prompt() | [
"def",
"init",
"(",
"self",
")",
":",
"self",
".",
"logged_in",
"=",
"False",
"if",
"self",
".",
"login_type",
"==",
"self",
".",
"LOGIN_TYPE_PASSWORDONLY",
":",
"self",
".",
"prompt_stage",
"=",
"self",
".",
"PROMPT_STAGE_PASSWORD",
"elif",
"self",
".",
"... | 31.294118 | 18.470588 |
def main(arguments=None):
'''Converts a given url with the specified arguments.'''
parsed_options, arguments = get_options(arguments)
image_url = arguments[0]
image_url = quote(image_url)
try:
config = Config.load(None)
except Exception:
config = None
if not parsed_options.key and not config:
sys.stdout.write('Error: The -k or --key argument is mandatory. For more information type thumbor-url -h\n')
return
security_key, thumbor_params = get_thumbor_params(image_url, parsed_options, config)
crypto = CryptoURL(key=security_key)
url = crypto.generate(**thumbor_params)
sys.stdout.write('URL:\n')
sys.stdout.write('%s\n' % url)
return url | [
"def",
"main",
"(",
"arguments",
"=",
"None",
")",
":",
"parsed_options",
",",
"arguments",
"=",
"get_options",
"(",
"arguments",
")",
"image_url",
"=",
"arguments",
"[",
"0",
"]",
"image_url",
"=",
"quote",
"(",
"image_url",
")",
"try",
":",
"config",
"... | 28.28 | 25 |
def call_task_fn(self):
"""Call the function attached to the task."""
if not self.fn:
return self.log_finished()
future = asyncio.Future()
future.add_done_callback(lambda x: self.log_finished())
if inspect.iscoroutinefunction(self.fn):
f = asyncio.ensure_future(self.fn())
f.add_done_callback(lambda x: self.bind_end(x.result(), future))
else:
self.bind_end(self.fn(), future)
return future | [
"def",
"call_task_fn",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"fn",
":",
"return",
"self",
".",
"log_finished",
"(",
")",
"future",
"=",
"asyncio",
".",
"Future",
"(",
")",
"future",
".",
"add_done_callback",
"(",
"lambda",
"x",
":",
"self",
... | 27 | 19.6 |
def _update_state(self, value: str) -> None:
"""Update state temporary during open or close."""
attribute = next(attr for attr in self._device['device_info'].get(
'Attributes', []) if attr.get(
'AttributeDisplayName') == 'doorstate')
if attribute is not None:
attribute['Value'] = value | [
"def",
"_update_state",
"(",
"self",
",",
"value",
":",
"str",
")",
"->",
"None",
":",
"attribute",
"=",
"next",
"(",
"attr",
"for",
"attr",
"in",
"self",
".",
"_device",
"[",
"'device_info'",
"]",
".",
"get",
"(",
"'Attributes'",
",",
"[",
"]",
")",... | 49.142857 | 9.142857 |
def send(self, stack: Layers):
"""
Intercept any potential "AnswerCallbackQuery" before adding the stack
to the output buffer.
"""
if not isinstance(stack, Stack):
stack = Stack(stack)
if 'callback_query' in self._update and stack.has_layer(Update):
layer = stack.get_layer(Update)
try:
msg = self._update['callback_query']['message']
except KeyError:
layer.inline_message_id = \
self._update['callback_query']['inline_message_id']
else:
layer.chat_id = msg['chat']['id']
layer.message_id = msg['message_id']
if stack.has_layer(AnswerCallbackQuery):
self._acq = stack.get_layer(AnswerCallbackQuery)
stack = Stack([
l for l in stack.layers
if not isinstance(l, AnswerCallbackQuery)
])
if stack.has_layer(Reply):
layer = stack.get_layer(Reply)
if 'message' in self._update:
layer.message = self._update['message']
elif 'callback_query' in self._update:
layer.message = self._update['callback_query']['message']
if 'inline_query' in self._update \
and stack.has_layer(AnswerInlineQuery):
a = stack.get_layer(AnswerInlineQuery)
a.inline_query_id = self._update['inline_query']['id']
if stack.layers:
return super(TelegramResponder, self).send(stack) | [
"def",
"send",
"(",
"self",
",",
"stack",
":",
"Layers",
")",
":",
"if",
"not",
"isinstance",
"(",
"stack",
",",
"Stack",
")",
":",
"stack",
"=",
"Stack",
"(",
"stack",
")",
"if",
"'callback_query'",
"in",
"self",
".",
"_update",
"and",
"stack",
".",... | 35.418605 | 18.627907 |
def textslice(self, start, end):
"""
Return a chunk referencing a slice of a scalar text value.
"""
return self._select(self._pointer.textslice(start, end)) | [
"def",
"textslice",
"(",
"self",
",",
"start",
",",
"end",
")",
":",
"return",
"self",
".",
"_select",
"(",
"self",
".",
"_pointer",
".",
"textslice",
"(",
"start",
",",
"end",
")",
")"
] | 36.8 | 11.6 |
def print_critical_paths(critical_paths):
""" Prints the results of the critical path length analysis.
Done by default by the `timing_critical_path()` function.
"""
line_indent = " " * 2
# print the critical path
for cp_with_num in enumerate(critical_paths):
print("Critical path", cp_with_num[0], ":")
print(line_indent, "The first wire is:", cp_with_num[1][0])
for net in cp_with_num[1][1]:
print(line_indent, (net))
print() | [
"def",
"print_critical_paths",
"(",
"critical_paths",
")",
":",
"line_indent",
"=",
"\" \"",
"*",
"2",
"# print the critical path",
"for",
"cp_with_num",
"in",
"enumerate",
"(",
"critical_paths",
")",
":",
"print",
"(",
"\"Critical path\"",
",",
"cp_with_num",
"[",... | 44.333333 | 10.75 |
def search(self, filter, attributes=None):
"""Search LDAP for records."""
if attributes is None:
attributes = ['*']
if filter is None:
filter = ["(objectclass=*)"]
# Convert filter list into an LDAP-consumable format
filterstr = "(&{})".format(''.join(filter))
self.conn.search(
search_base=self.basedn,
search_filter=filterstr,
search_scope=ldap3.SUBTREE,
attributes=attributes)
return self.conn.entries | [
"def",
"search",
"(",
"self",
",",
"filter",
",",
"attributes",
"=",
"None",
")",
":",
"if",
"attributes",
"is",
"None",
":",
"attributes",
"=",
"[",
"'*'",
"]",
"if",
"filter",
"is",
"None",
":",
"filter",
"=",
"[",
"\"(objectclass=*)\"",
"]",
"# Conv... | 32.4375 | 11.5625 |
def handle_response(response):
"""
Given a requests.Response object, throw the appropriate exception, if applicable.
"""
# ignore valid responses
if response.status_code < 400:
return
cls = _status_to_exception_type.get(response.status_code, HttpError)
kwargs = {
'code': response.status_code,
'method': response.request.method,
'url': response.request.url,
'details': response.text,
}
if response.headers and 'retry-after' in response.headers:
kwargs['retry_after'] = response.headers.get('retry-after')
raise cls(**kwargs) | [
"def",
"handle_response",
"(",
"response",
")",
":",
"# ignore valid responses",
"if",
"response",
".",
"status_code",
"<",
"400",
":",
"return",
"cls",
"=",
"_status_to_exception_type",
".",
"get",
"(",
"response",
".",
"status_code",
",",
"HttpError",
")",
"kw... | 27.090909 | 21.545455 |
def com_adobe_fonts_check_family_max_4_fonts_per_family_name(ttFonts):
"""Verify that each group of fonts with the same nameID 1
has maximum of 4 fonts"""
from collections import Counter
from fontbakery.utils import get_name_entry_strings
failed = False
family_names = list()
for ttFont in ttFonts:
names_list = get_name_entry_strings(ttFont, NameID.FONT_FAMILY_NAME)
# names_list will likely contain multiple entries, e.g. multiple copies
# of the same name in the same language for different platforms, but
# also different names in different languages, we use set() below
# to remove the duplicates and only store the unique family name(s)
# used for a given font
names_set = set(names_list)
family_names.extend(names_set)
counter = Counter(family_names)
for family_name, count in counter.items():
if count > 4:
failed = True
yield FAIL, ("Family '{}' has {} fonts (should be 4 or fewer)."
).format(family_name, count)
if not failed:
yield PASS, ("There were no more than 4 fonts per family name.") | [
"def",
"com_adobe_fonts_check_family_max_4_fonts_per_family_name",
"(",
"ttFonts",
")",
":",
"from",
"collections",
"import",
"Counter",
"from",
"fontbakery",
".",
"utils",
"import",
"get_name_entry_strings",
"failed",
"=",
"False",
"family_names",
"=",
"list",
"(",
")"... | 42.76 | 19.08 |
def extract_to_disk(self):
"""Extract all files and write them to disk."""
archive_name, extension = os.path.splitext(os.path.basename(self.file.name))
if not os.path.isdir(os.path.join(os.getcwd(), archive_name)):
os.mkdir(archive_name)
os.chdir(archive_name)
for filename, data in self.extract().items():
f = open(filename, 'wb')
f.write(data or b'')
f.close() | [
"def",
"extract_to_disk",
"(",
"self",
")",
":",
"archive_name",
",",
"extension",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"self",
".",
"file",
".",
"name",
")",
")",
"if",
"not",
"os",
".",
"path",
".... | 44.1 | 14.8 |
def getlist(self, key, delimiter=',', **kwargs):
"""
Gets the setting value as a :class:`list`; it splits the string using ``delimiter``.
:param str delimiter: split the value using this delimiter
:rtype: list
"""
value = self.get(key, **kwargs)
if value is None: return value
if isinstance(value, str):
value = value.strip()
if value.startswith('[') and value.endswith(']'):
return self.getserialized(key)
return [p.strip(' ') for p in value.split(delimiter)]
#end if
return list(value) | [
"def",
"getlist",
"(",
"self",
",",
"key",
",",
"delimiter",
"=",
"','",
",",
"*",
"*",
"kwargs",
")",
":",
"value",
"=",
"self",
".",
"get",
"(",
"key",
",",
"*",
"*",
"kwargs",
")",
"if",
"value",
"is",
"None",
":",
"return",
"value",
"if",
"... | 31.842105 | 19.631579 |
def create_or_update(
self, resource_group_name, vm_scale_set_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Create or update a VM scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set to create or
update.
:type vm_scale_set_name: str
:param parameters: The scale set object.
:type parameters:
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSet
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns VirtualMachineScaleSet
or ClientRawResponse<VirtualMachineScaleSet> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSet]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSet]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VirtualMachineScaleSet', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) | [
"def",
"create_or_update",
"(",
"self",
",",
"resource_group_name",
",",
"vm_scale_set_name",
",",
"parameters",
",",
"custom_headers",
"=",
"None",
",",
"raw",
"=",
"False",
",",
"polling",
"=",
"True",
",",
"*",
"*",
"operation_config",
")",
":",
"raw_result... | 48.88 | 26.04 |
def delete_archives(self, *archives):
'''
Delete archives
:return:
'''
# Remove paths
_archives = []
for archive in archives:
_archives.append(os.path.basename(archive))
archives = _archives[:]
ret = {'files': {}, 'errors': {}}
for archive in self.archives():
arc_dir = os.path.dirname(archive)
archive = os.path.basename(archive)
if archives and archive in archives or not archives:
archive = os.path.join(arc_dir, archive)
try:
os.unlink(archive)
ret['files'][archive] = 'removed'
except Exception as err:
ret['errors'][archive] = str(err)
ret['files'][archive] = 'left'
return ret | [
"def",
"delete_archives",
"(",
"self",
",",
"*",
"archives",
")",
":",
"# Remove paths",
"_archives",
"=",
"[",
"]",
"for",
"archive",
"in",
"archives",
":",
"_archives",
".",
"append",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"archive",
")",
")",
... | 33 | 15.4 |
def _change_iscsi_target_settings(self, iscsi_info):
"""Change iSCSI target settings.
:param iscsi_info: A dictionary that contains information of iSCSI
target like target_name, lun, ip_address, port etc.
:raises: IloError, on an error from iLO.
"""
sushy_system = self._get_sushy_system(PROLIANT_SYSTEM_ID)
try:
pci_settings_map = (
sushy_system.bios_settings.bios_mappings.pci_settings_mappings)
nics = []
for mapping in pci_settings_map:
for subinstance in mapping['Subinstances']:
for association in subinstance['Associations']:
if 'NicBoot' in association:
nics.append(association)
except sushy.exceptions.SushyError as e:
msg = (self._('The Redfish controller failed to get the '
'bios mappings. Error %(error)s')
% {'error': str(e)})
LOG.debug(msg)
raise exception.IloError(msg)
if not nics:
msg = ('No nics were found on the system')
raise exception.IloError(msg)
# Set iSCSI info to all nics
iscsi_infos = []
for nic in nics:
data = iscsi_info.copy()
data['iSCSIAttemptName'] = nic
data['iSCSINicSource'] = nic
data['iSCSIAttemptInstance'] = nics.index(nic) + 1
iscsi_infos.append(data)
iscsi_data = {'iSCSISources': iscsi_infos}
try:
(sushy_system.bios_settings.iscsi_resource.
iscsi_settings.update_iscsi_settings(iscsi_data))
except sushy.exceptions.SushyError as e:
msg = (self._("The Redfish controller is failed to update iSCSI "
"settings. Error %(error)s") %
{'error': str(e)})
LOG.debug(msg)
raise exception.IloError(msg) | [
"def",
"_change_iscsi_target_settings",
"(",
"self",
",",
"iscsi_info",
")",
":",
"sushy_system",
"=",
"self",
".",
"_get_sushy_system",
"(",
"PROLIANT_SYSTEM_ID",
")",
"try",
":",
"pci_settings_map",
"=",
"(",
"sushy_system",
".",
"bios_settings",
".",
"bios_mappin... | 41.425532 | 16.468085 |
def get_weichert_factor(beta, cmag, cyear, end_year):
'''
Gets the Weichert adjustment factor for each the magnitude bins
:param float beta:
Beta value of Gutenberg & Richter parameter (b * log(10.))
:param np.ndarray cmag:
Magnitude values of the completeness table
:param np.ndarray cyear:
Year values of the completeness table
:param float end_year:
Last year for consideration in the catalogue
:returns:
Weichert adjustment factor (float)
'''
if len(cmag) > 1:
# cval corresponds to the mid-point of the completeness bins
# In the original code it requires that the magnitude bins be
# equal sizedclass IsotropicGaussian(BaseSmoothingKernel):
dmag = (cmag[1:] + cmag[:-1]) / 2.
cval = np.hstack([dmag, cmag[-1] + (dmag[-1] - cmag[-2])])
else:
# Single completeness value so Weichert factor is unity
return 1.0 / (end_year - cyear[0] + 1), None
t_f = sum(np.exp(-beta * cval)) / sum((end_year - cyear + 1) *
np.exp(-beta * cval))
return t_f, cval | [
"def",
"get_weichert_factor",
"(",
"beta",
",",
"cmag",
",",
"cyear",
",",
"end_year",
")",
":",
"if",
"len",
"(",
"cmag",
")",
">",
"1",
":",
"# cval corresponds to the mid-point of the completeness bins",
"# In the original code it requires that the magnitude bins be",
... | 34.6875 | 23.4375 |
def find_all_matching_parsers(self, strict: bool, desired_type: Type[Any] = JOKER, required_ext: str = JOKER) \
-> Tuple[Tuple[List[Parser], List[Parser], List[Parser]],
List[Parser], List[Parser], List[Parser]]:
"""
Implementation of the parent method by lookin into the registry to find the most appropriate parsers to use in
order
:param strict:
:param desired_type: the desired type, or 'JOKER' for a wildcard
:param required_ext:
:return: match=(matching_parsers_generic, matching_parsers_approx, matching_parsers_exact),
no_type_match_but_ext_match, no_ext_match_but_type_match, no_match
"""
# if desired_type is JOKER and required_ext is JOKER:
# # Easy : return everything (GENERIC first, SPECIFIC then) in order (make a copy first :) )
# matching_parsers_generic = self._generic_parsers.copy()
# matching_parsers_approx = []
# matching_parsers_exact = self._specific_parsers.copy()
# no_type_match_but_ext_match = []
# no_ext_match_but_type_match = []
# no_match = []
# else:
#
# Although the above could be thought as an easy way to accelerate the process, it does not any more since the
# JOKER special cases are handled in parser.is_able_to_parse and converter.is_able_to_convert functions.
#
# It was also dangerous since it prevented us to get consistency across views - hence parser/converter
# implementors could get the feeling that their parser was correctly registered where it wasn't
check_var(strict, var_types=bool, var_name='strict')
# first transform any 'Any' type requirement into the official class for that
desired_type = get_validated_type(desired_type, 'desired_type', enforce_not_joker=False)
matching_parsers_generic = []
matching_parsers_approx = []
matching_parsers_exact = []
no_type_match_but_ext_match = []
no_ext_match_but_type_match = []
no_match = []
# handle generic parsers first - except if desired type is Any
for p in self._generic_parsers:
match = p.is_able_to_parse(desired_type=desired_type, desired_ext=required_ext, strict=strict)
if match:
# match
if is_any_type(desired_type):
# special case : what is required is Any, so put in exact match
matching_parsers_exact.append(p)
else:
matching_parsers_generic.append(p)
else:
# check if by releasing the constraint on ext it makes a match
if p.is_able_to_parse(desired_type=desired_type, desired_ext=JOKER, strict=strict):
no_ext_match_but_type_match.append(p)
else:
# there will be no way to use this: it is a generic parser that is not able to parse this type...
# no_type_match_but_ext_match.append(p)
pass
# then the specific
for p in self._specific_parsers:
match, exact_match = p.is_able_to_parse_detailed(desired_type=desired_type,
desired_ext=required_ext,
strict=strict)
if match:
if is_any_type(desired_type):
# special case: dont register as a type match
no_type_match_but_ext_match.append(p)
else:
if exact_match is None or exact_match:
matching_parsers_exact.append(p)
else:
matching_parsers_approx.append(p)
else:
# try to set the type to a supported type to see if that makes a match
if p.is_able_to_parse(desired_type=JOKER, desired_ext=required_ext, strict=strict):
no_type_match_but_ext_match.append(p)
# try to set the ext to a supported ext to see if that makes a match
elif p.is_able_to_parse(desired_type=desired_type, desired_ext=JOKER, strict=strict):
no_ext_match_but_type_match.append(p)
# no match at all
else:
no_match.append(p)
return (matching_parsers_generic, matching_parsers_approx, matching_parsers_exact), \
no_type_match_but_ext_match, no_ext_match_but_type_match, no_match | [
"def",
"find_all_matching_parsers",
"(",
"self",
",",
"strict",
":",
"bool",
",",
"desired_type",
":",
"Type",
"[",
"Any",
"]",
"=",
"JOKER",
",",
"required_ext",
":",
"str",
"=",
"JOKER",
")",
"->",
"Tuple",
"[",
"Tuple",
"[",
"List",
"[",
"Parser",
"... | 50.142857 | 29.659341 |
def head(self, n=6):
"""
Returns first n values of your column as a DataFrame. This is executing:
SELECT
<name_of_the_column>
FROM
<name_of_the_table>
LIMIT <n>
Parameters
----------
n: int
number of rows to return
Examples
--------
>>> from db import DemoDB
>>> db = DemoDB()
>>> db.tables.Customer.City.head()
0 Sao Jose dos Campos
1 Stuttgart
2 Montreal
3 Oslo
4 Prague
5 Prague
Name: City, dtype: object
>>> db.tables.Customer.City.head(2)
0 Sao Jose dos Campos
1 Stuttgart
Name: City, dtype: object
"""
q = self._query_templates['column']['head'].format(column=self.name, schema=self.schema,
table=self.table, n=n)
return pd.read_sql(q, self._con)[self.name] | [
"def",
"head",
"(",
"self",
",",
"n",
"=",
"6",
")",
":",
"q",
"=",
"self",
".",
"_query_templates",
"[",
"'column'",
"]",
"[",
"'head'",
"]",
".",
"format",
"(",
"column",
"=",
"self",
".",
"name",
",",
"schema",
"=",
"self",
".",
"schema",
",",... | 31.147059 | 16.147059 |
def filters(self):
"""List of filters available for the dataset."""
if self._filters is None:
self._filters, self._attributes = self._fetch_configuration()
return self._filters | [
"def",
"filters",
"(",
"self",
")",
":",
"if",
"self",
".",
"_filters",
"is",
"None",
":",
"self",
".",
"_filters",
",",
"self",
".",
"_attributes",
"=",
"self",
".",
"_fetch_configuration",
"(",
")",
"return",
"self",
".",
"_filters"
] | 41.6 | 14.8 |
def cci(series, window=14):
"""
compute commodity channel index
"""
price = typical_price(series)
typical_mean = rolling_mean(price, window)
res = (price - typical_mean) / (.015 * np.std(typical_mean))
return pd.Series(index=series.index, data=res) | [
"def",
"cci",
"(",
"series",
",",
"window",
"=",
"14",
")",
":",
"price",
"=",
"typical_price",
"(",
"series",
")",
"typical_mean",
"=",
"rolling_mean",
"(",
"price",
",",
"window",
")",
"res",
"=",
"(",
"price",
"-",
"typical_mean",
")",
"/",
"(",
"... | 33.625 | 8.125 |
def format_valid_streams(plugin, streams):
"""Formats a dict of streams.
Filters out synonyms and displays them next to
the stream they point to.
Streams are sorted according to their quality
(based on plugin.stream_weight).
"""
delimiter = ", "
validstreams = []
for name, stream in sorted(streams.items(),
key=lambda stream: plugin.stream_weight(stream[0])):
if name in STREAM_SYNONYMS:
continue
def synonymfilter(n):
return stream is streams[n] and n is not name
synonyms = list(filter(synonymfilter, streams.keys()))
if len(synonyms) > 0:
joined = delimiter.join(synonyms)
name = "{0} ({1})".format(name, joined)
validstreams.append(name)
return delimiter.join(validstreams) | [
"def",
"format_valid_streams",
"(",
"plugin",
",",
"streams",
")",
":",
"delimiter",
"=",
"\", \"",
"validstreams",
"=",
"[",
"]",
"for",
"name",
",",
"stream",
"in",
"sorted",
"(",
"streams",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"stream",
... | 26.354839 | 20.483871 |
def inROI(self, Y):
'''which points are inside ROI'''
if Y.ndim > 1:
area = np.zeros((Y.shape[0],4))
else:
area = np.zeros((1,4))
pts = np.zeros((0,), int)
pdist = np.zeros((0,), int)
dist0 = 0
for k in range(len(self.prect)):
self.square_area = (triangle_area(self.prect[k][0,:], self.prect[k][1,:], self.prect[k][2,:]) +
triangle_area(self.prect[k][2,:], self.prect[k][3,:], self.prect[k][4,:]))
for n in range(4):
area[:,n] = triangle_area(self.prect[k][0+n,:], self.prect[k][1+n,:], Y)
# points inside prect
newpts = np.array((area.sum(axis=1) <= self.square_area+1e-5).nonzero()).flatten().astype(int)
if newpts.size > 0:
pts = np.concatenate((pts, newpts))
newdists = self.orthproj(Y[newpts, :], k) + dist0
pdist = np.concatenate((pdist, newdists))
dist0 += (np.diff(self.pos[k], axis=0)[0,:]**2).sum()
# check if in radius of circle
if k < len(self.prect)-1:
pcent = self.pos[k][1,:]
dist = ((Y - pcent[np.newaxis,:])**2).sum(axis=1)**0.5
newpts = np.array((dist<=self.d).nonzero()[0].astype(int))
if newpts.size > 0:
pts = np.concatenate((pts, newpts))
newdists = dist0 * np.ones(newpts.shape)
pdist = np.concatenate((pdist, newdists))
pts, inds = np.unique(pts, return_index=True)
pdist = pdist[inds]
return pts, pdist | [
"def",
"inROI",
"(",
"self",
",",
"Y",
")",
":",
"if",
"Y",
".",
"ndim",
">",
"1",
":",
"area",
"=",
"np",
".",
"zeros",
"(",
"(",
"Y",
".",
"shape",
"[",
"0",
"]",
",",
"4",
")",
")",
"else",
":",
"area",
"=",
"np",
".",
"zeros",
"(",
... | 47.382353 | 19.676471 |
def despeckle_simple(B, th2=2):
"""Single-chromosome despeckling
Simple speckle removing function on a single chromomsome. It also works
for multiple chromosomes but trends may be disrupted.
Parameters
----------
B : array_like
The input matrix to despeckle
th2 : float
The number of standard deviations above the mean beyond which
despeckling should be performed
Returns
-------
array_like
The despeckled matrix
"""
A = np.copy(B)
n1 = A.shape[0]
dist = {u: np.diag(A, u) for u in range(n1)}
medians, stds = {}, {}
for u in dist:
medians[u] = np.median(dist[u])
stds[u] = np.std(dist[u])
for nw, j in itertools.product(range(n1), range(n1)):
lp = j + nw
kp = j - nw
if lp < n1:
if A[j, lp] > medians[nw] + th2 * stds[nw]:
A[j, lp] = medians[nw]
if kp >= 0:
if A[j, kp] > medians[nw] + th2 * stds[nw]:
A[j, kp] = medians[nw]
return A | [
"def",
"despeckle_simple",
"(",
"B",
",",
"th2",
"=",
"2",
")",
":",
"A",
"=",
"np",
".",
"copy",
"(",
"B",
")",
"n1",
"=",
"A",
".",
"shape",
"[",
"0",
"]",
"dist",
"=",
"{",
"u",
":",
"np",
".",
"diag",
"(",
"A",
",",
"u",
")",
"for",
... | 25.871795 | 20.153846 |
def pom_contains_modules():
"""
Reads pom.xml in current working directory and checks, if there is non-empty modules tag.
"""
pom_file = None
try:
pom_file = open("pom.xml")
pom = pom_file.read()
finally:
if pom_file:
pom_file.close()
artifact = MavenArtifact(pom=pom)
if artifact.modules:
return True
else:
return False | [
"def",
"pom_contains_modules",
"(",
")",
":",
"pom_file",
"=",
"None",
"try",
":",
"pom_file",
"=",
"open",
"(",
"\"pom.xml\"",
")",
"pom",
"=",
"pom_file",
".",
"read",
"(",
")",
"finally",
":",
"if",
"pom_file",
":",
"pom_file",
".",
"close",
"(",
")... | 23.117647 | 19.235294 |
def send_message(self, recipient_list, subject, body):
"""发送站内消息
:param recipient_list: 收件人列表
:param subject: 标题
:param body: 内容(不能超过 1024 个字符)
"""
url = 'http://www.shanbay.com/api/v1/message/'
recipient = ','.join(recipient_list)
data = {
'recipient': recipient,
'subject': subject,
'body': body,
'csrfmiddlewaretoken': self._request.cookies.get('csrftoken')
}
response = self.request(url, 'post', data=data)
return response.ok | [
"def",
"send_message",
"(",
"self",
",",
"recipient_list",
",",
"subject",
",",
"body",
")",
":",
"url",
"=",
"'http://www.shanbay.com/api/v1/message/'",
"recipient",
"=",
"','",
".",
"join",
"(",
"recipient_list",
")",
"data",
"=",
"{",
"'recipient'",
":",
"r... | 32.352941 | 14 |
def set_bulk_size(size):
"""Set size limit on bulk execution.
Bulk execution bundles many operators to run together.
This can improve performance when running a lot of small
operators sequentially.
Parameters
----------
size : int
Maximum number of operators that can be bundled in a bulk.
Returns
-------
int
Previous bulk size.
"""
prev = ctypes.c_int()
check_call(_LIB.MXEngineSetBulkSize(
ctypes.c_int(size), ctypes.byref(prev)))
return prev.value | [
"def",
"set_bulk_size",
"(",
"size",
")",
":",
"prev",
"=",
"ctypes",
".",
"c_int",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXEngineSetBulkSize",
"(",
"ctypes",
".",
"c_int",
"(",
"size",
")",
",",
"ctypes",
".",
"byref",
"(",
"prev",
")",
")",
")... | 24.47619 | 20.809524 |
def on_select_fit(self, event):
"""
Picks out the fit selected in the fit combobox and sets it to the
current fit of the GUI then calls the select function of the fit to
set the GUI's bounds boxes and alter other such parameters
Parameters
----------
event : the wx.ComboBoxEvent that triggers this function
Alters
------
current_fit, fit_box selection, tmin_box selection, tmax_box
selection
"""
fit_val = self.fit_box.GetValue()
if self.s not in self.pmag_results_data['specimens'] or not self.pmag_results_data['specimens'][self.s] or fit_val == 'None':
self.clear_boxes()
self.current_fit = None
self.fit_box.SetStringSelection('None')
self.tmin_box.SetStringSelection('')
self.tmax_box.SetStringSelection('')
else:
try:
fit_num = list(
map(lambda x: x.name, self.pmag_results_data['specimens'][self.s])).index(fit_val)
except ValueError:
fit_num = -1
self.pmag_results_data['specimens'][self.s][fit_num].select()
if self.ie_open:
self.ie.change_selected(self.current_fit) | [
"def",
"on_select_fit",
"(",
"self",
",",
"event",
")",
":",
"fit_val",
"=",
"self",
".",
"fit_box",
".",
"GetValue",
"(",
")",
"if",
"self",
".",
"s",
"not",
"in",
"self",
".",
"pmag_results_data",
"[",
"'specimens'",
"]",
"or",
"not",
"self",
".",
... | 39.870968 | 22.451613 |
def _get_info(self, formula_def):
'''
Get package info
'''
fields = (
'name',
'os',
'os_family',
'release',
'version',
'dependencies',
'os_dependencies',
'os_family_dependencies',
'summary',
'description',
)
for item in fields:
if item not in formula_def:
formula_def[item] = 'None'
if 'installed' not in formula_def:
formula_def['installed'] = 'Not installed'
return ('Name: {name}\n'
'Version: {version}\n'
'Release: {release}\n'
'Install Date: {installed}\n'
'Supported OSes: {os}\n'
'Supported OS families: {os_family}\n'
'Dependencies: {dependencies}\n'
'OS Dependencies: {os_dependencies}\n'
'OS Family Dependencies: {os_family_dependencies}\n'
'Summary: {summary}\n'
'Description:\n'
'{description}').format(**formula_def) | [
"def",
"_get_info",
"(",
"self",
",",
"formula_def",
")",
":",
"fields",
"=",
"(",
"'name'",
",",
"'os'",
",",
"'os_family'",
",",
"'release'",
",",
"'version'",
",",
"'dependencies'",
",",
"'os_dependencies'",
",",
"'os_family_dependencies'",
",",
"'summary'",
... | 31.342857 | 14.428571 |
def import_account(self, label: str, encrypted_pri_key: str, pwd: str, b58_address: str,
b64_salt: str, n: int = 16384) -> AccountData:
"""
This interface is used to import account by providing account data.
:param label: str, wallet label
:param encrypted_pri_key: str, an encrypted private key in base64 encoding from
:param pwd: str, a password which is used to encrypt and decrypt the private key
:param b58_address: str, a base58 encode wallet address value
:param b64_salt: str, a base64 encode salt value which is used in the encryption of private key
:param n: int, CPU/Memory cost parameter. It must be a power of 2 and less than :math:`2^{32}`
:return:
if succeed, return an data structure which contain the information of a wallet account.
if failed, return a None object.
"""
salt = base64.b64decode(b64_salt.encode('ascii')).decode('latin-1')
private_key = Account.get_gcm_decoded_private_key(encrypted_pri_key, pwd, b58_address, salt, n, self.scheme)
acct_info = self.create_account_info(label, pwd, salt, private_key)
for acct in self.wallet_in_mem.accounts:
if not isinstance(acct, AccountData):
raise SDKException(ErrorCode.other_error('Invalid account data in memory.'))
if acct_info.address_base58 == acct.b58_address:
return acct
raise SDKException(ErrorCode.other_error('Import account failed.')) | [
"def",
"import_account",
"(",
"self",
",",
"label",
":",
"str",
",",
"encrypted_pri_key",
":",
"str",
",",
"pwd",
":",
"str",
",",
"b58_address",
":",
"str",
",",
"b64_salt",
":",
"str",
",",
"n",
":",
"int",
"=",
"16384",
")",
"->",
"AccountData",
"... | 63.333333 | 32.25 |
def format_date(value, format='%b %d, %Y', convert_tz=None):
"""
Format an Excel date or date string, returning a formatted date.
To return a Python :py:class:`datetime.datetime` object, pass ``None``
as a ``format`` argument.
>>> format_date(42419.82163)
'Feb. 19, 2016'
.. code-block:: html+jinja
{{ row.date|format_date('%Y-%m-%d') }}
"""
if isinstance(value, float) or isinstance(value, int):
seconds = (value - 25569) * 86400.0
parsed = datetime.datetime.utcfromtimestamp(seconds)
else:
parsed = dateutil.parser.parse(value)
if convert_tz:
local_zone = dateutil.tz.gettz(convert_tz)
parsed = parsed.astimezone(tz=local_zone)
if format:
return parsed.strftime(format)
else:
return parsed | [
"def",
"format_date",
"(",
"value",
",",
"format",
"=",
"'%b %d, %Y'",
",",
"convert_tz",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"float",
")",
"or",
"isinstance",
"(",
"value",
",",
"int",
")",
":",
"seconds",
"=",
"(",
"value",
... | 30.230769 | 19 |
def from_web_element(self, web_element):
"""
Store reference to a WebElement instance representing the element on the DOM.
Use it when an instance of WebElement has already been created (e.g. as the result of find_element) and
you want to create a UIComponent out of it without evaluating it from the locator again.
Returns an instance of the class.
"""
if isinstance(web_element, WebElement) is not True:
raise TypeError("web_element parameter is not of type WebElement.")
self._web_element = web_element
return self | [
"def",
"from_web_element",
"(",
"self",
",",
"web_element",
")",
":",
"if",
"isinstance",
"(",
"web_element",
",",
"WebElement",
")",
"is",
"not",
"True",
":",
"raise",
"TypeError",
"(",
"\"web_element parameter is not of type WebElement.\"",
")",
"self",
".",
"_w... | 55.181818 | 24.454545 |
def _split_string_to_tokens(text):
"""Splits text to a list of string tokens."""
if not text:
return []
ret = []
token_start = 0
# Classify each character in the input string
is_alnum = [c in _ALPHANUMERIC_CHAR_SET for c in text]
for pos in xrange(1, len(text)):
if is_alnum[pos] != is_alnum[pos - 1]:
token = text[token_start:pos]
if token != u" " or token_start == 0:
ret.append(token)
token_start = pos
final_token = text[token_start:]
ret.append(final_token)
return ret | [
"def",
"_split_string_to_tokens",
"(",
"text",
")",
":",
"if",
"not",
"text",
":",
"return",
"[",
"]",
"ret",
"=",
"[",
"]",
"token_start",
"=",
"0",
"# Classify each character in the input string",
"is_alnum",
"=",
"[",
"c",
"in",
"_ALPHANUMERIC_CHAR_SET",
"for... | 30.058824 | 13.647059 |
def plot_isobar(self, P, Tmin=None, Tmax=None, methods_P=[], pts=50,
only_valid=True): # pragma: no cover
r'''Method to create a plot of the property vs temperature at a
specific pressure according to
either a specified list of methods, or user methods (if set), or all
methods. User-selectable number of points, and temperature range. If
only_valid is set,`test_method_validity_P` will be used to check if
each condition in the specified range is valid, and
`test_property_validity` will be used to test the answer, and the
method is allowed to fail; only the valid points will be plotted.
Otherwise, the result will be calculated and displayed as-is. This will
not suceed if the method fails.
Parameters
----------
P : float
Pressure for the isobar, [Pa]
Tmin : float
Minimum temperature, to begin calculating the property, [K]
Tmax : float
Maximum temperature, to stop calculating the property, [K]
methods_P : list, optional
List of methods to consider
pts : int, optional
A list of points to calculate the property at; if Tmin to Tmax
covers a wide range of method validities, only a few points may end
up calculated for a given method so this may need to be large
only_valid : bool
If True, only plot successful methods and calculated properties,
and handle errors; if False, attempt calculation without any
checking and use methods outside their bounds
'''
if not has_matplotlib:
raise Exception('Optional dependency matplotlib is required for plotting')
if Tmin is None:
if self.Tmin is not None:
Tmin = self.Tmin
else:
raise Exception('Minimum pressure could not be auto-detected; please provide it')
if Tmax is None:
if self.Tmax is not None:
Tmax = self.Tmax
else:
raise Exception('Maximum pressure could not be auto-detected; please provide it')
if not methods_P:
if self.user_methods_P:
methods_P = self.user_methods_P
else:
methods_P = self.all_methods_P
Ts = np.linspace(Tmin, Tmax, pts)
for method_P in methods_P:
if only_valid:
properties, Ts2 = [], []
for T in Ts:
if self.test_method_validity_P(T, P, method_P):
try:
p = self.calculate_P(T, P, method_P)
if self.test_property_validity(p):
properties.append(p)
Ts2.append(T)
except:
pass
plt.plot(Ts2, properties, label=method_P)
else:
properties = [self.calculate_P(T, P, method_P) for T in Ts]
plt.plot(Ts, properties, label=method_P)
plt.legend(loc='best')
plt.ylabel(self.name + ', ' + self.units)
plt.xlabel('Temperature, K')
plt.title(self.name + ' of ' + self.CASRN)
plt.show() | [
"def",
"plot_isobar",
"(",
"self",
",",
"P",
",",
"Tmin",
"=",
"None",
",",
"Tmax",
"=",
"None",
",",
"methods_P",
"=",
"[",
"]",
",",
"pts",
"=",
"50",
",",
"only_valid",
"=",
"True",
")",
":",
"# pragma: no cover",
"if",
"not",
"has_matplotlib",
":... | 45.361111 | 20 |
def _threshold_to_row(thresholds_keyword):
"""Helper to make a message row from a threshold
We are expecting something like this:
{
'thresholds': {
'structure': {
'ina_structure_flood_hazard_classification': {
'classes': {
'low': [1, 2],
'medium': [3, 4],
'high': [5, 6]
},
'active': True
},
'ina_structure_flood_hazard_4_class_classification':
{
'classes': {
'low': [1, 2],
'medium': [3, 4],
'high': [5, 6],
'very_high': [7, 8]
},
'active': False
}
},
'population': {
'ina_population_flood_hazard_classification': {
'classes': {
'low': [1, 2.5],
'medium': [2.5, 4.5],
'high': [4.5, 6]
},
'active': False
},
'ina_population_flood_hazard_4_class_classification':
{
'classes': {
'low': [1, 2.5],
'medium': [2.5, 4],
'high': [4, 6],
'very_high': [6, 8]
},
'active': True
}
},
},
Each value is a list with exactly two element [a, b], where a <= b.
:param thresholds_keyword: Value of the keyword to be rendered. This
must be a string representation of a dict, or a dict.
:type thresholds_keyword: basestring, dict
:returns: A table to be added into a cell in the keywords table.
:rtype: safe.messaging.items.table
"""
if isinstance(thresholds_keyword, str):
thresholds_keyword = literal_eval(thresholds_keyword)
for k, v in list(thresholds_keyword.items()):
# If the v is not dictionary, it should be the old value maps.
# To handle thresholds in the Impact Function.
if not isinstance(v, dict):
table = m.Table(style_class='table table-condensed')
for key, value in list(thresholds_keyword.items()):
row = m.Row()
name = definition(key)['name'] if definition(key) else key
row.add(m.Cell(m.ImportantText(name)))
pretty_value = tr('%s to %s' % (value[0], value[1]))
row.add(m.Cell(pretty_value))
table.add(row)
return table
table = m.Table(style_class='table table-condensed table-striped')
i = 0
for exposure_key, classifications in list(thresholds_keyword.items()):
i += 1
exposure = definition(exposure_key)
exposure_row = m.Row()
exposure_row.add(m.Cell(m.ImportantText(tr('Exposure'))))
exposure_row.add(m.Cell(m.Text(exposure['name'])))
exposure_row.add(m.Cell(''))
table.add(exposure_row)
active_classification = None
classification_row = m.Row()
classification_row.add(m.Cell(m.ImportantText(tr(
'Classification'))))
for classification, value in list(classifications.items()):
if value.get('active'):
active_classification = definition(classification)
classification_row.add(
m.Cell(active_classification['name']))
classification_row.add(m.Cell(''))
break
if not active_classification:
classification_row.add(m.Cell(tr('No classifications set.')))
classification_row.add(m.Cell(''))
continue
table.add(classification_row)
header = m.Row()
header.add(m.Cell(tr('Class name')))
header.add(m.Cell(tr('Minimum')))
header.add(m.Cell(tr('Maximum')))
table.add(header)
classes = active_classification.get('classes')
# Sort by value, put the lowest first
classes = sorted(classes, key=lambda the_key: the_key['value'])
for the_class in classes:
threshold = classifications[active_classification['key']][
'classes'][the_class['key']]
row = m.Row()
row.add(m.Cell(the_class['name']))
row.add(m.Cell(threshold[0]))
row.add(m.Cell(threshold[1]))
table.add(row)
if i < len(thresholds_keyword):
# Empty row
empty_row = m.Row()
empty_row.add(m.Cell(''))
empty_row.add(m.Cell(''))
table.add(empty_row)
return table | [
"def",
"_threshold_to_row",
"(",
"thresholds_keyword",
")",
":",
"if",
"isinstance",
"(",
"thresholds_keyword",
",",
"str",
")",
":",
"thresholds_keyword",
"=",
"literal_eval",
"(",
"thresholds_keyword",
")",
"for",
"k",
",",
"v",
"in",
"list",
"(",
"thresholds_... | 38.492537 | 16.708955 |
def _classify_arithmetic(self, regs_init, regs_fini, mem_fini, written_regs, read_regs):
"""Classify binary-operation gadgets.
"""
matches = []
# TODO: Review these restrictions.
op_restrictions = {
"+": lambda x, y: False,
"-": lambda x, y: x == y,
"|": lambda x, y: x == y,
"&": lambda x, y: x == y,
"^": lambda x, y: x == y,
}
# Check for "dst_reg <- src1_reg OP src2_reg" pattern.
for op_name, op_fn in self._binary_ops.items():
for src_1_reg, src_1_val in regs_init.items():
# Make sure the *src* register was read.
if src_1_reg not in read_regs:
continue
for src_2_reg, src_2_val in regs_init.items():
# Make sure the *src* register was read.
if src_2_reg not in read_regs:
continue
for dst_reg, dst_val in regs_fini.items():
# Make sure the *dst* register was written.
if dst_reg not in written_regs:
continue
# Check restrictions.
if self._arch_regs_size[src_1_reg] != self._arch_regs_size[src_2_reg] or \
self._arch_regs_size[src_1_reg] != self._arch_regs_size[dst_reg]:
continue
# Avoid trivial operations.
if op_restrictions[op_name](src_1_reg, src_2_reg):
continue
size = self._arch_regs_size[src_1_reg]
if dst_val == op_fn(src_1_val, src_2_val) & (2**size - 1):
src = sorted([src_1_reg, src_2_reg])
src_ir = [
ReilRegisterOperand(src[0], self._arch_regs_size[src[0]]),
ReilRegisterOperand(src[1], self._arch_regs_size[src[1]])
]
dst_reg_ir = ReilRegisterOperand(dst_reg, self._arch_regs_size[dst_reg])
matches.append({
"src": src_ir,
"dst": [dst_reg_ir],
"op": op_name
})
return matches | [
"def",
"_classify_arithmetic",
"(",
"self",
",",
"regs_init",
",",
"regs_fini",
",",
"mem_fini",
",",
"written_regs",
",",
"read_regs",
")",
":",
"matches",
"=",
"[",
"]",
"# TODO: Review these restrictions.",
"op_restrictions",
"=",
"{",
"\"+\"",
":",
"lambda",
... | 40.372881 | 22.237288 |
def sampleLocation(self):
"""
Simple method to sample uniformly from a cylinder.
"""
areaRatio = self.radius / (self.radius + self.height)
if random.random() < areaRatio:
return self._sampleLocationOnDisc()
else:
return self._sampleLocationOnSide() | [
"def",
"sampleLocation",
"(",
"self",
")",
":",
"areaRatio",
"=",
"self",
".",
"radius",
"/",
"(",
"self",
".",
"radius",
"+",
"self",
".",
"height",
")",
"if",
"random",
".",
"random",
"(",
")",
"<",
"areaRatio",
":",
"return",
"self",
".",
"_sample... | 30.666667 | 9.333333 |
def aes_encrypt(base64_encryption_key, data):
"""Encrypt data with AES-CBC and sign it with HMAC-SHA256
Arguments:
base64_encryption_key (str): a base64-encoded string containing an AES encryption key
and HMAC signing key as generated by generate_encryption_key()
data (str): a byte string containing the data to be encrypted
Returns:
str: the encrypted data as a byte string with the HMAC signature appended to the end
"""
if isinstance(data, text_type):
data = data.encode("UTF-8")
aes_key_bytes, hmac_key_bytes = _extract_keys(base64_encryption_key)
data = _pad(data)
iv_bytes = os.urandom(AES_BLOCK_SIZE)
cipher = AES.new(aes_key_bytes, mode=AES.MODE_CBC, IV=iv_bytes)
data = iv_bytes + cipher.encrypt(data) # prepend init vector
hmac_signature = hmac.new(hmac_key_bytes, data, hashlib.sha256).digest()
return as_base64(data + hmac_signature) | [
"def",
"aes_encrypt",
"(",
"base64_encryption_key",
",",
"data",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"text_type",
")",
":",
"data",
"=",
"data",
".",
"encode",
"(",
"\"UTF-8\"",
")",
"aes_key_bytes",
",",
"hmac_key_bytes",
"=",
"_extract_keys",
"... | 43.904762 | 23.809524 |
def _ignore_interrupts(self):
"""
Ignore interrupt and termination signals. Used as a pre-execution
function (preexec_fn) for subprocess.Popen calls that pypiper will
control over (i.e., manually clean up).
"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN) | [
"def",
"_ignore_interrupts",
"(",
"self",
")",
":",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGINT",
",",
"signal",
".",
"SIG_IGN",
")",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGTERM",
",",
"signal",
".",
"SIG_IGN",
")"
] | 43.75 | 13.75 |
def _find_family_class(dev):
"""! @brief Search the families list for matching entry."""
for familyInfo in FAMILIES:
# Skip if wrong vendor.
if dev.vendor != familyInfo.vendor:
continue
# Scan each level of families
for familyName in dev.families:
for regex in familyInfo.matches:
# Require the regex to match the entire family name.
match = regex.match(familyName)
if match and match.span() == (0, len(familyName)):
return familyInfo.klass
else:
# Default target superclass.
return CoreSightTarget | [
"def",
"_find_family_class",
"(",
"dev",
")",
":",
"for",
"familyInfo",
"in",
"FAMILIES",
":",
"# Skip if wrong vendor.",
"if",
"dev",
".",
"vendor",
"!=",
"familyInfo",
".",
"vendor",
":",
"continue",
"# Scan each level of families",
"for",
"familyName",
"in",
"d... | 40.882353 | 12.352941 |
def urljoin(domain, path=None, scheme=None):
"""
Joins a domain, path and scheme part together, returning a full URL.
:param domain: the domain, e.g. ``example.com``
:param path: the path part of the URL, e.g. ``/example/``
:param scheme: the scheme part of the URL, e.g. ``http``, defaulting to the
value of ``settings.DEFAULT_URL_SCHEME``
:returns: a full URL
"""
if scheme is None:
scheme = getattr(settings, 'DEFAULT_URL_SCHEME', 'http')
return urlunparse((scheme, domain, path or '', None, None, None)) | [
"def",
"urljoin",
"(",
"domain",
",",
"path",
"=",
"None",
",",
"scheme",
"=",
"None",
")",
":",
"if",
"scheme",
"is",
"None",
":",
"scheme",
"=",
"getattr",
"(",
"settings",
",",
"'DEFAULT_URL_SCHEME'",
",",
"'http'",
")",
"return",
"urlunparse",
"(",
... | 39.142857 | 20.142857 |
def like_cosi(cosi,vsini_dist,veq_dist,vgrid=None):
"""likelihood of Data (vsini_dist, veq_dist) given cosi
"""
sini = np.sqrt(1-cosi**2)
def integrand(v):
#return vsini_dist(v)*veq_dist(v/sini)
return vsini_dist(v*sini)*veq_dist(v)
if vgrid is None:
return quad(integrand,0,np.inf)[0]
else:
return np.trapz(integrand(vgrid),vgrid) | [
"def",
"like_cosi",
"(",
"cosi",
",",
"vsini_dist",
",",
"veq_dist",
",",
"vgrid",
"=",
"None",
")",
":",
"sini",
"=",
"np",
".",
"sqrt",
"(",
"1",
"-",
"cosi",
"**",
"2",
")",
"def",
"integrand",
"(",
"v",
")",
":",
"#return vsini_dist(v)*veq_dist(v/s... | 34.272727 | 10.090909 |
def is_unknown(input, model_file=None, model_proto=None, name=None):
"""Returns true if input id is unknown piece.
Args:
input: An arbitrary tensor of int32.
model_file: The sentencepiece model file path.
model_proto: The sentencepiece model serialized proto.
Either `model_file` or `model_proto` must be set.
name: The name argument that is passed to the op function.
Returns:
A tensor of bool with the same shape as input.
"""
return _gen_sentencepiece_processor_op.sentencepiece_get_piece_type(
input, model_file=model_file, model_proto=model_proto, name=name,
piece_type=0) | [
"def",
"is_unknown",
"(",
"input",
",",
"model_file",
"=",
"None",
",",
"model_proto",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"return",
"_gen_sentencepiece_processor_op",
".",
"sentencepiece_get_piece_type",
"(",
"input",
",",
"model_file",
"=",
"model... | 38.9375 | 21.1875 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.