text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def dump(node, annotate_fields=True, include_attributes=False, indent=" "):
"""
Return a formatted dump of the tree in *node*. This is mainly useful for
debugging purposes. The returned string will show the names and the values
for fields. This makes the code impossible to evaluate, so if evaluation
is wanted *annotate_fields* must be set to False. Attributes such as line
numbers and column offsets are not dumped by default. If this is wanted,
*include_attributes* can be set to True.
"""
def _format(node, level=0):
if isinstance(node, ast.AST):
fields = [(a, _format(b, level)) for a, b in ast.iter_fields(node)]
if include_attributes and node._attributes:
fields.extend(
[
(a, _format(getattr(node, a), level))
for a in node._attributes
]
)
return "".join(
[
node.__class__.__name__,
"(",
", ".join(
("%s=%s" % field for field in fields)
if annotate_fields
else (b for a, b in fields)
),
")",
]
)
elif isinstance(node, list):
lines = ["["]
lines.extend(
(
indent * (level + 2) + _format(x, level + 2) + ","
for x in node
)
)
if len(lines) > 1:
lines.append(indent * (level + 1) + "]")
else:
lines[-1] += "]"
return "\n".join(lines)
return repr(node)
if not isinstance(node, ast.AST):
raise TypeError("expected AST, got %r" % node.__class__.__name__)
return _format(node) | [
"def",
"dump",
"(",
"node",
",",
"annotate_fields",
"=",
"True",
",",
"include_attributes",
"=",
"False",
",",
"indent",
"=",
"\" \"",
")",
":",
"def",
"_format",
"(",
"node",
",",
"level",
"=",
"0",
")",
":",
"if",
"isinstance",
"(",
"node",
",",
"... | 37.18 | 18.62 |
def diskusage(path):
'''
Recursively calculate disk usage of path and return it
in bytes
CLI Example:
.. code-block:: bash
salt '*' file.diskusage /path/to/check
'''
total_size = 0
seen = set()
if os.path.isfile(path):
stat_structure = os.stat(path)
ret = stat_structure.st_size
return ret
for dirpath, dirnames, filenames in salt.utils.path.os_walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
try:
stat_structure = os.stat(fp)
except OSError:
continue
if stat_structure.st_ino in seen:
continue
seen.add(stat_structure.st_ino)
total_size += stat_structure.st_size
ret = total_size
return ret | [
"def",
"diskusage",
"(",
"path",
")",
":",
"total_size",
"=",
"0",
"seen",
"=",
"set",
"(",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"stat_structure",
"=",
"os",
".",
"stat",
"(",
"path",
")",
"ret",
"=",
"stat_structure"... | 21.27027 | 22.783784 |
def ckinv(self,oo):
""" check the value is date or not
檢查是否為日期格式
"""
pattern = re.compile(r"[0-9]{2}/[0-9]{2}/[0-9]{2}")
b = re.search(pattern, oo[0])
try:
b.group()
return True
except:
return False | [
"def",
"ckinv",
"(",
"self",
",",
"oo",
")",
":",
"pattern",
"=",
"re",
".",
"compile",
"(",
"r\"[0-9]{2}/[0-9]{2}/[0-9]{2}\"",
")",
"b",
"=",
"re",
".",
"search",
"(",
"pattern",
",",
"oo",
"[",
"0",
"]",
")",
"try",
":",
"b",
".",
"group",
"(",
... | 21.636364 | 17.909091 |
def _evolve(self, state, qargs=None):
"""Evolve a quantum state by the QuantumChannel.
Args:
state (QuantumState): The input statevector or density matrix.
qargs (list): a list of QuantumState subsystem positions to apply
the operator on.
Returns:
QuantumState: the output quantum state.
Raises:
QiskitError: if the operator dimension does not match the
specified QuantumState subsystem dimensions.
"""
# If subsystem evolution we use the SuperOp representation
if qargs is not None:
return SuperOp(self)._evolve(state, qargs)
# Otherwise we compute full evolution directly
state = self._format_state(state)
if state.shape[0] != self._input_dim:
raise QiskitError(
"QuantumChannel input dimension is not equal to state dimension."
)
if state.ndim == 1 and self._data[1] is None and \
self._data[0].shape[0] // self._output_dim == 1:
# If the shape of the stinespring operator is equal to the output_dim
# evolution of a state vector psi -> stine.psi
return np.dot(self._data[0], state)
# Otherwise we always return a density matrix
state = self._format_state(state, density_matrix=True)
stine_l, stine_r = self._data
if stine_r is None:
stine_r = stine_l
din, dout = self.dim
dtr = stine_l.shape[0] // dout
shape = (dout, dtr, din)
return np.einsum('iAB,BC,jAC->ij', np.reshape(stine_l, shape), state,
np.reshape(np.conjugate(stine_r), shape)) | [
"def",
"_evolve",
"(",
"self",
",",
"state",
",",
"qargs",
"=",
"None",
")",
":",
"# If subsystem evolution we use the SuperOp representation",
"if",
"qargs",
"is",
"not",
"None",
":",
"return",
"SuperOp",
"(",
"self",
")",
".",
"_evolve",
"(",
"state",
",",
... | 42.075 | 19.2 |
def guess_base_branch():
# type: (str) -> Optional[str, None]
""" Try to guess the base branch for the current branch.
Do not trust this guess. git makes it pretty much impossible to guess
the base branch reliably so this function implements few heuristics that
will work on most common use cases but anything a bit crazy will probably
trip this function.
Returns:
Optional[str]: The name of the base branch for the current branch if
guessable or **None** if can't guess.
"""
my_branch = current_branch(refresh=True).name
curr = latest_commit()
if len(curr.branches) > 1:
# We're possibly at the beginning of the new branch (currently both
# on base and new branch).
other = [x for x in curr.branches if x != my_branch]
if len(other) == 1:
return other[0]
return None
else:
# We're on one branch
parent = curr
while parent and my_branch in parent.branches:
curr = parent
if len(curr.branches) > 1:
other = [x for x in curr.branches if x != my_branch]
if len(other) == 1:
return other[0]
return None
parents = [p for p in curr.parents if my_branch in p.branches]
num_parents = len(parents)
if num_parents > 2:
# More than two parent, give up
return None
if num_parents == 2:
# This is a merge commit.
for p in parents:
if p.branches == [my_branch]:
parent = p
break
elif num_parents == 1:
parent = parents[0]
elif num_parents == 0:
parent = None
return None | [
"def",
"guess_base_branch",
"(",
")",
":",
"# type: (str) -> Optional[str, None]",
"my_branch",
"=",
"current_branch",
"(",
"refresh",
"=",
"True",
")",
".",
"name",
"curr",
"=",
"latest_commit",
"(",
")",
"if",
"len",
"(",
"curr",
".",
"branches",
")",
">",
... | 33.092593 | 17.925926 |
def fetch_events_async(self, issues, tag_name):
"""
Fetch events for all issues and add them to self.events
:param list issues: all issues
:param str tag_name: name of the tag to fetch events for
:returns: Nothing
"""
if not issues:
return issues
max_simultaneous_requests = self.options.max_simultaneous_requests
verbose = self.options.verbose
gh = self.github
user = self.options.user
repo = self.options.project
self.events_cnt = 0
if verbose:
print("fetching events for {} {}... ".format(
len(issues), tag_name)
)
def worker(issue):
page = 1
issue['events'] = []
while page > 0:
rc, data = gh.repos[user][repo].issues[
issue['number']].events.get(
page=page, per_page=PER_PAGE_NUMBER)
if rc == 200:
issue['events'].extend(data)
self.events_cnt += len(data)
else:
self.raise_GitHubError(rc, data, gh.getheaders())
page = NextPage(gh)
threads = []
cnt = len(issues)
for i in range(0, (cnt // max_simultaneous_requests) + 1):
for j in range(max_simultaneous_requests):
idx = i * max_simultaneous_requests + j
if idx == cnt:
break
t = threading.Thread(target=worker, args=(issues[idx],))
threads.append(t)
t.start()
if verbose > 2:
print(".", end="")
if not idx % PER_PAGE_NUMBER:
print("")
for t in threads:
t.join()
if verbose > 2:
print(".") | [
"def",
"fetch_events_async",
"(",
"self",
",",
"issues",
",",
"tag_name",
")",
":",
"if",
"not",
"issues",
":",
"return",
"issues",
"max_simultaneous_requests",
"=",
"self",
".",
"options",
".",
"max_simultaneous_requests",
"verbose",
"=",
"self",
".",
"options"... | 33.290909 | 16.018182 |
def enable(app_id, enabled=True):
'''
Enable or disable an existing assistive access application.
app_id
The bundle ID or command to set assistive access status.
enabled
Sets enabled or disabled status. Default is ``True``.
CLI Example:
.. code-block:: bash
salt '*' assistive.enable /usr/bin/osascript
salt '*' assistive.enable com.smileonmymac.textexpander enabled=False
'''
enable_str = '1' if enabled else '0'
for a in _get_assistive_access():
if app_id == a[0]:
cmd = 'sqlite3 "/Library/Application Support/com.apple.TCC/TCC.db" ' \
'"UPDATE access SET allowed=\'{0}\' WHERE client=\'{1}\'"'.format(enable_str, app_id)
call = __salt__['cmd.run_all'](
cmd,
output_loglevel='debug',
python_shell=False
)
if call['retcode'] != 0:
comment = ''
if 'stderr' in call:
comment += call['stderr']
if 'stdout' in call:
comment += call['stdout']
raise CommandExecutionError('Error enabling app: {0}'.format(comment))
return True
return False | [
"def",
"enable",
"(",
"app_id",
",",
"enabled",
"=",
"True",
")",
":",
"enable_str",
"=",
"'1'",
"if",
"enabled",
"else",
"'0'",
"for",
"a",
"in",
"_get_assistive_access",
"(",
")",
":",
"if",
"app_id",
"==",
"a",
"[",
"0",
"]",
":",
"cmd",
"=",
"'... | 29.609756 | 24.097561 |
def load_extra_vi_page_navigation_bindings():
"""
Key bindings, for scrolling up and down through pages.
This are separate bindings, because GNU readline doesn't have them.
"""
registry = ConditionalRegistry(Registry(), ViMode())
handle = registry.add_binding
handle(Keys.ControlF)(scroll_forward)
handle(Keys.ControlB)(scroll_backward)
handle(Keys.ControlD)(scroll_half_page_down)
handle(Keys.ControlU)(scroll_half_page_up)
handle(Keys.ControlE)(scroll_one_line_down)
handle(Keys.ControlY)(scroll_one_line_up)
handle(Keys.PageDown)(scroll_page_down)
handle(Keys.PageUp)(scroll_page_up)
return registry | [
"def",
"load_extra_vi_page_navigation_bindings",
"(",
")",
":",
"registry",
"=",
"ConditionalRegistry",
"(",
"Registry",
"(",
")",
",",
"ViMode",
"(",
")",
")",
"handle",
"=",
"registry",
".",
"add_binding",
"handle",
"(",
"Keys",
".",
"ControlF",
")",
"(",
... | 35.944444 | 11.722222 |
def describe(self, percentiles=None, include=None, exclude=None):
"""
Generate descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset's distribution, excluding
``NaN`` values.
Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.
Parameters
----------
percentiles : list-like of numbers, optional
The percentiles to include in the output. All should
fall between 0 and 1. The default is
``[.25, .5, .75]``, which returns the 25th, 50th, and
75th percentiles.
include : 'all', list-like of dtypes or None (default), optional
A white list of data types to include in the result. Ignored
for ``Series``. Here are the options:
- 'all' : All columns of the input will be included in the output.
- A list-like of dtypes : Limits the results to the
provided data types.
To limit the result to numeric types submit
``numpy.number``. To limit it instead to object columns submit
the ``numpy.object`` data type. Strings
can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
select pandas categorical columns, use ``'category'``
- None (default) : The result will include all numeric columns.
exclude : list-like of dtypes or None (default), optional,
A black list of data types to omit from the result. Ignored
for ``Series``. Here are the options:
- A list-like of dtypes : Excludes the provided data types
from the result. To exclude numeric types submit
``numpy.number``. To exclude object columns submit the data
type ``numpy.object``. Strings can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
exclude pandas categorical columns, use ``'category'``
- None (default) : The result will exclude nothing.
Returns
-------
Series or DataFrame
Summary statistics of the Series or Dataframe provided.
See Also
--------
DataFrame.count: Count number of non-NA/null observations.
DataFrame.max: Maximum of the values in the object.
DataFrame.min: Minimum of the values in the object.
DataFrame.mean: Mean of the values.
DataFrame.std: Standard deviation of the obersvations.
DataFrame.select_dtypes: Subset of a DataFrame including/excluding
columns based on their dtype.
Notes
-----
For numeric data, the result's index will include ``count``,
``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and
upper percentiles. By default the lower percentile is ``25`` and the
upper percentile is ``75``. The ``50`` percentile is the
same as the median.
For object data (e.g. strings or timestamps), the result's index
will include ``count``, ``unique``, ``top``, and ``freq``. The ``top``
is the most common value. The ``freq`` is the most common value's
frequency. Timestamps also include the ``first`` and ``last`` items.
If multiple object values have the highest count, then the
``count`` and ``top`` results will be arbitrarily chosen from
among those with the highest count.
For mixed data types provided via a ``DataFrame``, the default is to
return only an analysis of numeric columns. If the dataframe consists
only of object and categorical data without any numeric columns, the
default is to return an analysis of both the object and categorical
columns. If ``include='all'`` is provided as an option, the result
will include a union of attributes of each type.
The `include` and `exclude` parameters can be used to limit
which columns in a ``DataFrame`` are analyzed for the output.
The parameters are ignored when analyzing a ``Series``.
Examples
--------
Describing a numeric ``Series``.
>>> s = pd.Series([1, 2, 3])
>>> s.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
dtype: float64
Describing a categorical ``Series``.
>>> s = pd.Series(['a', 'a', 'b', 'c'])
>>> s.describe()
count 4
unique 3
top a
freq 2
dtype: object
Describing a timestamp ``Series``.
>>> s = pd.Series([
... np.datetime64("2000-01-01"),
... np.datetime64("2010-01-01"),
... np.datetime64("2010-01-01")
... ])
>>> s.describe()
count 3
unique 2
top 2010-01-01 00:00:00
freq 2
first 2000-01-01 00:00:00
last 2010-01-01 00:00:00
dtype: object
Describing a ``DataFrame``. By default only numeric fields
are returned.
>>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']),
... 'numeric': [1, 2, 3],
... 'object': ['a', 'b', 'c']
... })
>>> df.describe()
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Describing all columns of a ``DataFrame`` regardless of data type.
>>> df.describe(include='all')
categorical numeric object
count 3 3.0 3
unique 3 NaN 3
top f NaN c
freq 1 NaN 1
mean NaN 2.0 NaN
std NaN 1.0 NaN
min NaN 1.0 NaN
25% NaN 1.5 NaN
50% NaN 2.0 NaN
75% NaN 2.5 NaN
max NaN 3.0 NaN
Describing a column from a ``DataFrame`` by accessing it as
an attribute.
>>> df.numeric.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Name: numeric, dtype: float64
Including only numeric columns in a ``DataFrame`` description.
>>> df.describe(include=[np.number])
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Including only string columns in a ``DataFrame`` description.
>>> df.describe(include=[np.object])
object
count 3
unique 3
top c
freq 1
Including only categorical columns from a ``DataFrame`` description.
>>> df.describe(include=['category'])
categorical
count 3
unique 3
top f
freq 1
Excluding numeric columns from a ``DataFrame`` description.
>>> df.describe(exclude=[np.number])
categorical object
count 3 3
unique 3 3
top f c
freq 1 1
Excluding object columns from a ``DataFrame`` description.
>>> df.describe(exclude=[np.object])
categorical numeric
count 3 3.0
unique 3 NaN
top f NaN
freq 1 NaN
mean NaN 2.0
std NaN 1.0
min NaN 1.0
25% NaN 1.5
50% NaN 2.0
75% NaN 2.5
max NaN 3.0
"""
if self.ndim >= 3:
msg = "describe is not implemented on Panel objects."
raise NotImplementedError(msg)
elif self.ndim == 2 and self.columns.size == 0:
raise ValueError("Cannot describe a DataFrame without columns")
if percentiles is not None:
# explicit conversion of `percentiles` to list
percentiles = list(percentiles)
# get them all to be in [0, 1]
self._check_percentile(percentiles)
# median should always be included
if 0.5 not in percentiles:
percentiles.append(0.5)
percentiles = np.asarray(percentiles)
else:
percentiles = np.array([0.25, 0.5, 0.75])
# sort and check for duplicates
unique_pcts = np.unique(percentiles)
if len(unique_pcts) < len(percentiles):
raise ValueError("percentiles cannot contain duplicates")
percentiles = unique_pcts
formatted_percentiles = format_percentiles(percentiles)
def describe_numeric_1d(series):
stat_index = (['count', 'mean', 'std', 'min'] +
formatted_percentiles + ['max'])
d = ([series.count(), series.mean(), series.std(), series.min()] +
series.quantile(percentiles).tolist() + [series.max()])
return pd.Series(d, index=stat_index, name=series.name)
def describe_categorical_1d(data):
names = ['count', 'unique']
objcounts = data.value_counts()
count_unique = len(objcounts[objcounts != 0])
result = [data.count(), count_unique]
if result[1] > 0:
top, freq = objcounts.index[0], objcounts.iloc[0]
if is_datetime64_any_dtype(data):
tz = data.dt.tz
asint = data.dropna().values.view('i8')
top = Timestamp(top)
if top.tzinfo is not None and tz is not None:
# Don't tz_localize(None) if key is already tz-aware
top = top.tz_convert(tz)
else:
top = top.tz_localize(tz)
names += ['top', 'freq', 'first', 'last']
result += [top, freq,
Timestamp(asint.min(), tz=tz),
Timestamp(asint.max(), tz=tz)]
else:
names += ['top', 'freq']
result += [top, freq]
return pd.Series(result, index=names, name=data.name)
def describe_1d(data):
if is_bool_dtype(data):
return describe_categorical_1d(data)
elif is_numeric_dtype(data):
return describe_numeric_1d(data)
elif is_timedelta64_dtype(data):
return describe_numeric_1d(data)
else:
return describe_categorical_1d(data)
if self.ndim == 1:
return describe_1d(self)
elif (include is None) and (exclude is None):
# when some numerics are found, keep only numerics
data = self.select_dtypes(include=[np.number])
if len(data.columns) == 0:
data = self
elif include == 'all':
if exclude is not None:
msg = "exclude must be None when include is 'all'"
raise ValueError(msg)
data = self
else:
data = self.select_dtypes(include=include, exclude=exclude)
ldesc = [describe_1d(s) for _, s in data.iteritems()]
# set a convenient order for rows
names = []
ldesc_indexes = sorted((x.index for x in ldesc), key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
if name not in names:
names.append(name)
d = pd.concat(ldesc, join_axes=pd.Index([names]), axis=1)
d.columns = data.columns.copy()
return d | [
"def",
"describe",
"(",
"self",
",",
"percentiles",
"=",
"None",
",",
"include",
"=",
"None",
",",
"exclude",
"=",
"None",
")",
":",
"if",
"self",
".",
"ndim",
">=",
"3",
":",
"msg",
"=",
"\"describe is not implemented on Panel objects.\"",
"raise",
"NotImpl... | 36.924699 | 19.28012 |
def instantiate(self, parallel_envs, seed=0, preset='default') -> VecEnv:
""" Create vectorized environments """
envs = DummyVecEnv([self._creation_function(i, seed, preset) for i in range(parallel_envs)])
if self.frame_history is not None:
envs = VecFrameStack(envs, self.frame_history)
return envs | [
"def",
"instantiate",
"(",
"self",
",",
"parallel_envs",
",",
"seed",
"=",
"0",
",",
"preset",
"=",
"'default'",
")",
"->",
"VecEnv",
":",
"envs",
"=",
"DummyVecEnv",
"(",
"[",
"self",
".",
"_creation_function",
"(",
"i",
",",
"seed",
",",
"preset",
")... | 42.25 | 26.75 |
def _rollback_handle(cls, connection):
"""On snowflake, rolling back the handle of an aborted session raises
an exception.
"""
try:
connection.handle.rollback()
except snowflake.connector.errors.ProgrammingError as e:
msg = dbt.compat.to_string(e)
if 'Session no longer exists' not in msg:
raise | [
"def",
"_rollback_handle",
"(",
"cls",
",",
"connection",
")",
":",
"try",
":",
"connection",
".",
"handle",
".",
"rollback",
"(",
")",
"except",
"snowflake",
".",
"connector",
".",
"errors",
".",
"ProgrammingError",
"as",
"e",
":",
"msg",
"=",
"dbt",
".... | 37.8 | 10.6 |
def _makedirs(path):
"""
Create a base directory of the provided path and return None.
:param path: A string containing a path to be deconstructed and basedir
created.
:return: None
"""
dirname, _ = os.path.split(path)
try:
os.makedirs(dirname)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise | [
"def",
"_makedirs",
"(",
"path",
")",
":",
"dirname",
",",
"_",
"=",
"os",
".",
"path",
".",
"split",
"(",
"path",
")",
"try",
":",
"os",
".",
"makedirs",
"(",
"dirname",
")",
"except",
"OSError",
"as",
"exc",
":",
"if",
"exc",
".",
"errno",
"=="... | 24.0625 | 19.3125 |
def hist2d(self, da, **kwargs):
"""Make the two dimensional histogram
Parameters
----------
da: xarray.DataArray
The data source"""
if self.value is None or self.value == 'counts':
normed = False
else:
normed = True
y = da.values
x = da.coords[da.dims[0]].values
counts, xedges, yedges = np.histogram2d(
x, y, normed=normed, **kwargs)
if self.value == 'counts':
counts = counts / counts.sum().astype(float)
return counts, xedges, yedges | [
"def",
"hist2d",
"(",
"self",
",",
"da",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"value",
"is",
"None",
"or",
"self",
".",
"value",
"==",
"'counts'",
":",
"normed",
"=",
"False",
"else",
":",
"normed",
"=",
"True",
"y",
"=",
"da",
... | 31.555556 | 12.833333 |
def remove_members_in_score_range(self, min_score, max_score):
'''
Remove members from the leaderboard in a given score range.
@param min_score [float] Minimum score.
@param max_score [float] Maximum score.
'''
self.remove_members_in_score_range_in(
self.leaderboard_name,
min_score,
max_score) | [
"def",
"remove_members_in_score_range",
"(",
"self",
",",
"min_score",
",",
"max_score",
")",
":",
"self",
".",
"remove_members_in_score_range_in",
"(",
"self",
".",
"leaderboard_name",
",",
"min_score",
",",
"max_score",
")"
] | 33.545455 | 19 |
def get_buffer(self, *args):
'''
all args-->_cffi_backend.CDataOwn
Must be a pointer or an array
Returns-->buffer (if a SINGLE argument was provided)
LIST of buffer (if a args was a tuple or list)
'''
res = tuple([
self.buffer(x) for x in args
])
if len(res) == 0:
return None
elif len(res) == 1:
return res[0]
else:
return res | [
"def",
"get_buffer",
"(",
"self",
",",
"*",
"args",
")",
":",
"res",
"=",
"tuple",
"(",
"[",
"self",
".",
"buffer",
"(",
"x",
")",
"for",
"x",
"in",
"args",
"]",
")",
"if",
"len",
"(",
"res",
")",
"==",
"0",
":",
"return",
"None",
"elif",
"le... | 26.941176 | 18.352941 |
def cov_error(self, comp_cov, score_metric="frobenius"):
"""Computes the covariance error vs. comp_cov.
May require self.path_
Parameters
----------
comp_cov : array-like, shape = (n_features, n_features)
The precision to compare with.
This should normally be the test sample covariance/precision.
scaling : bool
If True, the squared error norm is divided by n_features.
If False (default), the squared error norm is not rescaled.
score_metric : str
The type of norm used to compute the error between the estimated
self.precision, self.covariance and the reference `comp_cov`.
Available error types:
- 'frobenius' (default): sqrt(tr(A^t.A))
- 'spectral': sqrt(max(eigenvalues(A^t.A))
- 'kl': kl-divergence
- 'quadratic': quadratic loss
- 'log_likelihood': negative log likelihood
squared : bool
Whether to compute the squared error norm or the error norm.
If True (default), the squared error norm is returned.
If False, the error norm is returned.
Returns
-------
The min error between `self.covariance_` and `comp_cov`.
If self.precision_ is a list, returns errors for each matrix, otherwise
returns a scalar.
"""
if not isinstance(self.precision_, list):
return _compute_error(
comp_cov, self.covariance_, self.precision_, score_metric
)
path_errors = []
for lidx, lam in enumerate(self.path_):
path_errors.append(
_compute_error(
comp_cov,
self.covariance_[lidx],
self.precision_[lidx],
score_metric,
)
)
return np.array(path_errors) | [
"def",
"cov_error",
"(",
"self",
",",
"comp_cov",
",",
"score_metric",
"=",
"\"frobenius\"",
")",
":",
"if",
"not",
"isinstance",
"(",
"self",
".",
"precision_",
",",
"list",
")",
":",
"return",
"_compute_error",
"(",
"comp_cov",
",",
"self",
".",
"covaria... | 34.418182 | 20.909091 |
def get_home(self, home_id=None):
"""
Get the data about a home
"""
now = datetime.datetime.utcnow()
if self.home and now < self.home_refresh_at:
return self.home
if not self._do_auth():
raise RuntimeError("Unable to login")
if home_id is None:
home_id = self.home_id
url = self.api_base_url + "Home/GetHomeById"
params = {
"homeId": home_id
}
headers = {
"Accept": "application/json",
'Authorization':
'bearer ' + self.login_data['token']['accessToken']
}
response = requests.get(
url, params=params, headers=headers, timeout=10)
if response.status_code != 200:
raise RuntimeError(
"{} response code when getting home".format(
response.status_code))
home = response.json()
if self.cache_home:
self.home = home
self.home_refresh_at = (datetime.datetime.utcnow()
+ datetime.timedelta(minutes=5))
return home | [
"def",
"get_home",
"(",
"self",
",",
"home_id",
"=",
"None",
")",
":",
"now",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"if",
"self",
".",
"home",
"and",
"now",
"<",
"self",
".",
"home_refresh_at",
":",
"return",
"self",
".",
"home",... | 26.642857 | 19.261905 |
def generate_single_simulation(self, x):
"""
Generate a single SSA simulation
:param x: an integer to reset the random seed. If None, the initial random number generator is used
:return: a list of :class:`~means.simulation.Trajectory` one per species in the problem
:rtype: list[:class:`~means.simulation.Trajectory`]
"""
#reset random seed
if x:
self.__rng = np.random.RandomState(x)
# perform one stochastic simulation
time_points, species_over_time = self._gssa(self.__initial_conditions, self.__t_max)
# build descriptors for first order raw moments aka expectations (e.g. [1, 0, 0], [0, 1, 0] and [0, 0, 1])
descriptors = []
for i, s in enumerate(self.__species):
row = [0] * len(self.__species)
row[i] = 1
descriptors.append(Moment(row, s))
# build trajectories
trajectories = [Trajectory(time_points, spot, desc) for
spot, desc in zip(species_over_time, descriptors)]
return trajectories | [
"def",
"generate_single_simulation",
"(",
"self",
",",
"x",
")",
":",
"#reset random seed",
"if",
"x",
":",
"self",
".",
"__rng",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
"x",
")",
"# perform one stochastic simulation",
"time_points",
",",
"species_ove... | 41.269231 | 23.5 |
def _premium(fn):
"""Premium decorator for APIs that require premium access level."""
@_functools.wraps(fn)
def _fn(self, *args, **kwargs):
if self._lite:
raise RuntimeError('Premium API not available in lite access.')
return fn(self, *args, **kwargs)
return _fn | [
"def",
"_premium",
"(",
"fn",
")",
":",
"@",
"_functools",
".",
"wraps",
"(",
"fn",
")",
"def",
"_fn",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_lite",
":",
"raise",
"RuntimeError",
"(",
"'Premium API no... | 40.875 | 14.25 |
def _call_timeout_handlers(self):
"""Call the timeout handlers due.
:Return: (next_event_timeout, sources_handled) tuple.
next_event_timeout is number of seconds until the next timeout
event, sources_handled is number of handlers called.
"""
sources_handled = 0
now = time.time()
schedule = None
while self._timeout_handlers:
schedule, handler = self._timeout_handlers[0]
if schedule <= now:
# pylint: disable-msg=W0212
logger.debug("About to call a timeout handler: {0!r}"
.format(handler))
self._timeout_handlers = self._timeout_handlers[1:]
result = handler()
logger.debug(" handler result: {0!r}".format(result))
rec = handler._pyxmpp_recurring
if rec:
logger.debug(" recurring, restarting in {0} s"
.format(handler._pyxmpp_timeout))
self._timeout_handlers.append(
(now + handler._pyxmpp_timeout, handler))
self._timeout_handlers.sort(key = lambda x: x[0])
elif rec is None and result is not None:
logger.debug(" auto-recurring, restarting in {0} s"
.format(result))
self._timeout_handlers.append((now + result, handler))
self._timeout_handlers.sort(key = lambda x: x[0])
sources_handled += 1
else:
break
if self.check_events():
return 0, sources_handled
if self._timeout_handlers and schedule:
timeout = schedule - now
else:
timeout = None
return timeout, sources_handled | [
"def",
"_call_timeout_handlers",
"(",
"self",
")",
":",
"sources_handled",
"=",
"0",
"now",
"=",
"time",
".",
"time",
"(",
")",
"schedule",
"=",
"None",
"while",
"self",
".",
"_timeout_handlers",
":",
"schedule",
",",
"handler",
"=",
"self",
".",
"_timeout... | 46.365854 | 17.97561 |
def query(self):
"""
Returns the query instance for this widget.
:return <orb.Query> || <orb.QueryCompound>
"""
queryWidget = self.queryWidget()
# check to see if there is an active container for this widget
container = queryWidget.containerFor(self)
if container:
return container.query()
elif QueryCompound.typecheck(self._query):
return self._query
# generate a new query from the editor
column = self.uiColumnDDL.currentSchemaPath()
plugin = self.currentPlugin()
editor = self.editor()
op = self.uiOperatorDDL.currentText()
if column and plugin:
query = Query(column)
plugin.setupQuery(query, op, editor)
return query
else:
return Query() | [
"def",
"query",
"(",
"self",
")",
":",
"queryWidget",
"=",
"self",
".",
"queryWidget",
"(",
")",
"# check to see if there is an active container for this widget\r",
"container",
"=",
"queryWidget",
".",
"containerFor",
"(",
"self",
")",
"if",
"container",
":",
"retu... | 31.964286 | 14.678571 |
def _Ep(self):
""" Proton energy array in GeV
"""
return np.logspace(
np.log10(self.Epmin.to("GeV").value),
np.log10(self.Epmax.to("GeV").value),
int(self.nEpd * (np.log10(self.Epmax / self.Epmin))),
) | [
"def",
"_Ep",
"(",
"self",
")",
":",
"return",
"np",
".",
"logspace",
"(",
"np",
".",
"log10",
"(",
"self",
".",
"Epmin",
".",
"to",
"(",
"\"GeV\"",
")",
".",
"value",
")",
",",
"np",
".",
"log10",
"(",
"self",
".",
"Epmax",
".",
"to",
"(",
"... | 32.75 | 14.125 |
def convert(self, argument):
"""Converts the argument to a boolean; raise ValueError on errors."""
if isinstance(argument, str):
if argument.lower() in ['true', 't', '1']:
return True
elif argument.lower() in ['false', 'f', '0']:
return False
bool_argument = bool(argument)
if argument == bool_argument:
# The argument is a valid boolean (True, False, 0, or 1), and not just
# something that always converts to bool (list, string, int, etc.).
return bool_argument
raise ValueError('Non-boolean argument to boolean flag', argument) | [
"def",
"convert",
"(",
"self",
",",
"argument",
")",
":",
"if",
"isinstance",
"(",
"argument",
",",
"str",
")",
":",
"if",
"argument",
".",
"lower",
"(",
")",
"in",
"[",
"'true'",
",",
"'t'",
",",
"'1'",
"]",
":",
"return",
"True",
"elif",
"argumen... | 38.933333 | 19 |
def get_system_name(self, oid_system_name):
"""Get the short os name from the OS name OID string."""
short_system_name = None
if oid_system_name == '':
return short_system_name
# Find the short name in the oid_to_short_os_name dict
for r, v in iteritems(oid_to_short_system_name):
if re.search(r, oid_system_name):
short_system_name = v
break
return short_system_name | [
"def",
"get_system_name",
"(",
"self",
",",
"oid_system_name",
")",
":",
"short_system_name",
"=",
"None",
"if",
"oid_system_name",
"==",
"''",
":",
"return",
"short_system_name",
"# Find the short name in the oid_to_short_os_name dict",
"for",
"r",
",",
"v",
"in",
"i... | 32.928571 | 15.357143 |
def maximum_syscall_number(self, abi):
"""
:param abi: The abi to evaluate
:return: The largest syscall number known for the given abi
"""
if abi not in self.syscall_number_mapping or \
not self.syscall_number_mapping[abi]:
return 0
return max(self.syscall_number_mapping[abi]) | [
"def",
"maximum_syscall_number",
"(",
"self",
",",
"abi",
")",
":",
"if",
"abi",
"not",
"in",
"self",
".",
"syscall_number_mapping",
"or",
"not",
"self",
".",
"syscall_number_mapping",
"[",
"abi",
"]",
":",
"return",
"0",
"return",
"max",
"(",
"self",
".",... | 38.666667 | 10.222222 |
def _topLevelObjectGenerator(self, request, numObjects, getByIndexMethod):
"""
Returns a generator over the results for the specified request, which
is over a set of objects of the specified size. The objects are
returned by call to the specified method, which must take a single
integer as an argument. The returned generator yields a sequence of
(object, nextPageToken) pairs, which allows this iteration to be picked
up at any point.
"""
currentIndex = 0
if request.page_token:
currentIndex, = paging._parsePageToken(
request.page_token, 1)
while currentIndex < numObjects:
object_ = getByIndexMethod(currentIndex)
currentIndex += 1
nextPageToken = None
if currentIndex < numObjects:
nextPageToken = str(currentIndex)
yield object_.toProtocolElement(), nextPageToken | [
"def",
"_topLevelObjectGenerator",
"(",
"self",
",",
"request",
",",
"numObjects",
",",
"getByIndexMethod",
")",
":",
"currentIndex",
"=",
"0",
"if",
"request",
".",
"page_token",
":",
"currentIndex",
",",
"=",
"paging",
".",
"_parsePageToken",
"(",
"request",
... | 47.1 | 16.3 |
def load_file(filename, out=sys.stdout):
"""
load a Python source file and compile it to byte-code
_load_file(filename: string): code_object
filename: name of file containing Python source code
(normally a .py)
code_object: code_object compiled from this source code
This function does NOT write any file!
"""
fp = open(filename, 'rb')
try:
source = fp.read()
try:
if PYTHON_VERSION < 2.6:
co = compile(source, filename, 'exec')
else:
co = compile(source, filename, 'exec', dont_inherit=True)
except SyntaxError:
out.write('>>Syntax error in %s\n' % filename)
raise
finally:
fp.close()
return co | [
"def",
"load_file",
"(",
"filename",
",",
"out",
"=",
"sys",
".",
"stdout",
")",
":",
"fp",
"=",
"open",
"(",
"filename",
",",
"'rb'",
")",
"try",
":",
"source",
"=",
"fp",
".",
"read",
"(",
")",
"try",
":",
"if",
"PYTHON_VERSION",
"<",
"2.6",
":... | 31.521739 | 15.869565 |
async def get_cred_def_id(self):
"""
Get the ledger ID of the object
Example:
source_id = 'foobar123'
schema_name = 'Schema Name'
payment_handle = 0
credential_def1 = await CredentialDef.create(source_id, name, schema_id, payment_handle)
assert await credential_def.get_cred_def_id() == '2hoqvcwupRTUNkXn6ArYzs:3:CL:2471'
:return: ID string
"""
cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_char_p))
c_handle = c_uint32(self.handle)
cred_def_id = await do_call('vcx_credentialdef_get_cred_def_id', c_handle, cb)
return cred_def_id .decode() | [
"async",
"def",
"get_cred_def_id",
"(",
"self",
")",
":",
"cb",
"=",
"create_cb",
"(",
"CFUNCTYPE",
"(",
"None",
",",
"c_uint32",
",",
"c_uint32",
",",
"c_char_p",
")",
")",
"c_handle",
"=",
"c_uint32",
"(",
"self",
".",
"handle",
")",
"cred_def_id",
"="... | 40.3125 | 18.8125 |
def get_expiration_time(self, app: 'Quart', session: SessionMixin) -> Optional[datetime]:
"""Helper method to return the Session expiration time.
If the session is not 'permanent' it will expire as and when
the browser stops accessing the app.
"""
if session.permanent:
return datetime.utcnow() + app.permanent_session_lifetime
else:
return None | [
"def",
"get_expiration_time",
"(",
"self",
",",
"app",
":",
"'Quart'",
",",
"session",
":",
"SessionMixin",
")",
"->",
"Optional",
"[",
"datetime",
"]",
":",
"if",
"session",
".",
"permanent",
":",
"return",
"datetime",
".",
"utcnow",
"(",
")",
"+",
"app... | 40.9 | 20.5 |
def pixel_to_geo(pixel, level):
"""Transform from pixel to geo coordinates"""
pixel_x = pixel[0]
pixel_y = pixel[1]
map_size = float(TileSystem.map_size(level))
x = (TileSystem.clip(pixel_x, (0, map_size - 1)) / map_size) - 0.5
y = 0.5 - (TileSystem.clip(pixel_y, (0, map_size - 1)) / map_size)
lat = 90 - 360 * atan(exp(-y * 2 * pi)) / pi
lon = 360 * x
return round(lat, 6), round(lon, 6) | [
"def",
"pixel_to_geo",
"(",
"pixel",
",",
"level",
")",
":",
"pixel_x",
"=",
"pixel",
"[",
"0",
"]",
"pixel_y",
"=",
"pixel",
"[",
"1",
"]",
"map_size",
"=",
"float",
"(",
"TileSystem",
".",
"map_size",
"(",
"level",
")",
")",
"x",
"=",
"(",
"TileS... | 45.2 | 15.1 |
def _keep_analyses( analyses, keep_forms, target_forms ):
''' Filters the given list of *analyses* by morphological forms:
deletes analyses that are listed in *target_forms*, but not in
*keep_forms*. '''
to_delete = []
for aid, analysis in enumerate(analyses):
delete = False
for target in target_forms:
if (target == analysis[FORM] and not analysis[FORM] in keep_forms):
delete = True
if delete:
to_delete.append( aid )
if to_delete:
to_delete.reverse()
for aid in to_delete:
del analyses[aid] | [
"def",
"_keep_analyses",
"(",
"analyses",
",",
"keep_forms",
",",
"target_forms",
")",
":",
"to_delete",
"=",
"[",
"]",
"for",
"aid",
",",
"analysis",
"in",
"enumerate",
"(",
"analyses",
")",
":",
"delete",
"=",
"False",
"for",
"target",
"in",
"target_form... | 37.5625 | 17.5625 |
def _find_impl(cls, registry):
"""Returns the best matching implementation from *registry* for type *cls*.
Where there is no registered implementation for a specific type, its method
resolution order is used to find a more generic implementation.
Note: if *registry* does not contain an implementation for the base
*object* type, this function may return None.
"""
mro = _compose_mro(cls, registry.keys())
match = None
for t in mro:
if match is not None:
# If *match* is an implicit ABC but there is another unrelated,
# equally matching implicit ABC, refuse the temptation to guess.
if ( # :off
t in registry and
t not in cls.__mro__ and
match not in cls.__mro__ and
not issubclass(match, t)
): # :on
raise RuntimeError("Ambiguous dispatch: {0} or {1}".format(match, t))
break
if t in registry:
match = t
return registry.get(match) | [
"def",
"_find_impl",
"(",
"cls",
",",
"registry",
")",
":",
"mro",
"=",
"_compose_mro",
"(",
"cls",
",",
"registry",
".",
"keys",
"(",
")",
")",
"match",
"=",
"None",
"for",
"t",
"in",
"mro",
":",
"if",
"match",
"is",
"not",
"None",
":",
"# If *mat... | 37.740741 | 19.518519 |
def fix_e112(self, result):
"""Fix under-indented comments."""
line_index = result['line'] - 1
target = self.source[line_index]
if not target.lstrip().startswith('#'):
# Don't screw with invalid syntax.
return []
self.source[line_index] = self.indent_word + target | [
"def",
"fix_e112",
"(",
"self",
",",
"result",
")",
":",
"line_index",
"=",
"result",
"[",
"'line'",
"]",
"-",
"1",
"target",
"=",
"self",
".",
"source",
"[",
"line_index",
"]",
"if",
"not",
"target",
".",
"lstrip",
"(",
")",
".",
"startswith",
"(",
... | 32.1 | 14.5 |
def to_file(data, filename_or_file_object):
"""
Write ``data`` to a file specified by either filename of the file or an opened :class:`file` buffer.
:param data: Object to write to file
:param filename_or_file_object: filename/or opened file buffer to write to
:type filename_or_file_object: basestring|file
"""
if isinstance(filename_or_file_object, basestring):
file_ = open(filename_or_file_object, 'w')
we_opened = True
else:
file_ = filename_or_file_object
we_opened = False
try:
file_.write(dump(data))
finally:
if we_opened:
file_.close() | [
"def",
"to_file",
"(",
"data",
",",
"filename_or_file_object",
")",
":",
"if",
"isinstance",
"(",
"filename_or_file_object",
",",
"basestring",
")",
":",
"file_",
"=",
"open",
"(",
"filename_or_file_object",
",",
"'w'",
")",
"we_opened",
"=",
"True",
"else",
"... | 31.4 | 19.3 |
def swo_disable(self, port_mask):
"""Disables ITM & Stimulus ports.
Args:
self (JLink): the ``JLink`` instance
port_mask (int): mask specifying which ports to disable
Returns:
``None``
Raises:
JLinkException: on error
"""
res = self._dll.JLINKARM_SWO_DisableTarget(port_mask)
if res != 0:
raise errors.JLinkException(res)
return None | [
"def",
"swo_disable",
"(",
"self",
",",
"port_mask",
")",
":",
"res",
"=",
"self",
".",
"_dll",
".",
"JLINKARM_SWO_DisableTarget",
"(",
"port_mask",
")",
"if",
"res",
"!=",
"0",
":",
"raise",
"errors",
".",
"JLinkException",
"(",
"res",
")",
"return",
"N... | 25.647059 | 19.294118 |
def multisig_validate_deserialize(rawmsg, requrl=None, check_expiration=True,
decode_payload=True,
algorithm_name=DEFAULT_ALGO):
"""
Validate a general JSON serialization and return the headers and
payload if all the signatures are good.
If check_expiration is False, the payload will be accepted even if
expired.
If decode_payload is True then this function will attempt to decode
it as JSON, otherwise the raw payload will be returned. Note that
it is always decoded from base64url.
"""
assert algorithm_name in ALGORITHM_AVAILABLE
algo = ALGORITHM_AVAILABLE[algorithm_name]
data = json.loads(rawmsg)
payload64 = data.get('payload', None)
signatures = data.get('signatures', None)
if payload64 is None or not isinstance(signatures, list):
raise InvalidMessage('must contain "payload" and "signatures"')
if not len(signatures):
raise InvalidMessage('no signatures')
try:
payload, sigs = _multisig_decode(payload64, signatures, decode_payload)
except Exception as err:
raise InvalidMessage(str(err))
all_valid = True
try:
for entry in sigs:
valid = _verify_signature(algorithm=algo, **entry)
all_valid = all_valid and valid
except Exception as err:
raise InvalidMessage('failed to verify signature: {}'.format(err))
if not all_valid:
return None, None
if decode_payload:
_verify_payload(payload, check_expiration, requrl)
return [entry['header'] for entry in sigs], payload | [
"def",
"multisig_validate_deserialize",
"(",
"rawmsg",
",",
"requrl",
"=",
"None",
",",
"check_expiration",
"=",
"True",
",",
"decode_payload",
"=",
"True",
",",
"algorithm_name",
"=",
"DEFAULT_ALGO",
")",
":",
"assert",
"algorithm_name",
"in",
"ALGORITHM_AVAILABLE"... | 35.311111 | 21.133333 |
def multi_path_generator(pathnames):
"""
yields (name,chunkgen) for all of the files found under the list
of pathnames given. This is recursive, so directories will have
their contents emitted. chunkgen is a function that can called and
iterated over to obtain the contents of the file in multiple
reads.
"""
for pathname in pathnames:
if isdir(pathname):
for entry in directory_generator(pathname):
yield entry
else:
yield pathname, file_chunk(pathname) | [
"def",
"multi_path_generator",
"(",
"pathnames",
")",
":",
"for",
"pathname",
"in",
"pathnames",
":",
"if",
"isdir",
"(",
"pathname",
")",
":",
"for",
"entry",
"in",
"directory_generator",
"(",
"pathname",
")",
":",
"yield",
"entry",
"else",
":",
"yield",
... | 35.266667 | 17.933333 |
def create_database(self, database_name):
"""
Creates a new database in CosmosDB.
"""
if database_name is None:
raise AirflowBadRequest("Database name cannot be None.")
# We need to check to see if this database already exists so we don't try
# to create it twice
existing_database = list(self.get_conn().QueryDatabases({
"query": "SELECT * FROM r WHERE r.id=@id",
"parameters": [
{"name": "@id", "value": database_name}
]
}))
# Only create if we did not find it already existing
if len(existing_database) == 0:
self.get_conn().CreateDatabase({"id": database_name}) | [
"def",
"create_database",
"(",
"self",
",",
"database_name",
")",
":",
"if",
"database_name",
"is",
"None",
":",
"raise",
"AirflowBadRequest",
"(",
"\"Database name cannot be None.\"",
")",
"# We need to check to see if this database already exists so we don't try",
"# to creat... | 37.105263 | 17.947368 |
def _set_line_speed(self, v, load=False):
"""
Setter method for line_speed, mapped from YANG variable /interface/management/line_speed (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_line_speed is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_line_speed() directly.
YANG Description: The line-speed characteristics for this management
interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=line_speed.line_speed, is_container='container', presence=False, yang_name="line-speed", rest_name="line-speed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The line-speed characteristics for this management \ninterface.'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """line_speed must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=line_speed.line_speed, is_container='container', presence=False, yang_name="line-speed", rest_name="line-speed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The line-speed characteristics for this management \ninterface.'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__line_speed = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_line_speed",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"bas... | 71.04 | 35.56 |
def condition_yaw(heading, relative=False):
"""
Send MAV_CMD_CONDITION_YAW message to point vehicle at a specified heading (in degrees).
This method sets an absolute heading by default, but you can set the `relative` parameter
to `True` to set yaw relative to the current yaw heading.
By default the yaw of the vehicle will follow the direction of travel. After setting
the yaw using this function there is no way to return to the default yaw "follow direction
of travel" behaviour (https://github.com/diydrones/ardupilot/issues/2427)
For more information see:
http://copter.ardupilot.com/wiki/common-mavlink-mission-command-messages-mav_cmd/#mav_cmd_condition_yaw
"""
if relative:
is_relative = 1 #yaw relative to direction of travel
else:
is_relative = 0 #yaw is an absolute angle
# create the CONDITION_YAW command using command_long_encode()
msg = vehicle.message_factory.command_long_encode(
0, 0, # target system, target component
mavutil.mavlink.MAV_CMD_CONDITION_YAW, #command
0, #confirmation
heading, # param 1, yaw in degrees
0, # param 2, yaw speed deg/s
1, # param 3, direction -1 ccw, 1 cw
is_relative, # param 4, relative offset 1, absolute angle 0
0, 0, 0) # param 5 ~ 7 not used
# send command to vehicle
vehicle.send_mavlink(msg) | [
"def",
"condition_yaw",
"(",
"heading",
",",
"relative",
"=",
"False",
")",
":",
"if",
"relative",
":",
"is_relative",
"=",
"1",
"#yaw relative to direction of travel",
"else",
":",
"is_relative",
"=",
"0",
"#yaw is an absolute angle",
"# create the CONDITION_YAW comman... | 46.5 | 23.566667 |
def idPlayerResults(cfg, rawResult):
"""interpret standard rawResult for all players with known IDs"""
result = {}
knownPlayers = []
dictResult = {plyrRes.player_id : plyrRes.result for plyrRes in rawResult}
for p in cfg.players:
if p.playerID and p.playerID in dictResult: # identified player w/ result
knownPlayers.append(p)
result[p.name] = dictResult[p.playerID]
#if len(knownPlayers) == len(dictResult) - 1: # identified all but one player
# for p in cfg.players: # search for the not identified player
# if p in knownPlayers: continue # already found
# result.append( [p.name, p.playerID, dictResult[p.playerID]] )
# break # found missing player; stop searching
#for r in result:
# print("result:>", r)
return result | [
"def",
"idPlayerResults",
"(",
"cfg",
",",
"rawResult",
")",
":",
"result",
"=",
"{",
"}",
"knownPlayers",
"=",
"[",
"]",
"dictResult",
"=",
"{",
"plyrRes",
".",
"player_id",
":",
"plyrRes",
".",
"result",
"for",
"plyrRes",
"in",
"rawResult",
"}",
"for",... | 48.058824 | 20.705882 |
def search(self, **kwargs):
"""
Method to search neighbors based on extends search.
:param search: Dict containing QuerySets to find neighbors.
:param include: Array containing fields to include on response.
:param exclude: Array containing fields to exclude on response.
:param fields: Array containing fields to override default fields.
:param kind: Determine if result will be detailed ('detail') or basic ('basic').
:return: Dict containing neighbors
"""
return super(ApiV4Neighbor, self).get(self.prepare_url(
'api/v4/neighbor/', kwargs)) | [
"def",
"search",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"super",
"(",
"ApiV4Neighbor",
",",
"self",
")",
".",
"get",
"(",
"self",
".",
"prepare_url",
"(",
"'api/v4/neighbor/'",
",",
"kwargs",
")",
")"
] | 44.642857 | 22.071429 |
def validate_pluginid(value):
'''Returns True if the provided value is a valid pluglin id'''
valid = string.ascii_letters + string.digits + '.'
return all(c in valid for c in value) | [
"def",
"validate_pluginid",
"(",
"value",
")",
":",
"valid",
"=",
"string",
".",
"ascii_letters",
"+",
"string",
".",
"digits",
"+",
"'.'",
"return",
"all",
"(",
"c",
"in",
"valid",
"for",
"c",
"in",
"value",
")"
] | 47.5 | 13 |
def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None):
"""Create a zip file from all the files under 'base_dir'.
The output zip file will be named 'base_name' + ".zip". Uses either the
"zipfile" Python module (if available) or the InfoZIP "zip" utility
(if installed and found on the default search path). If neither tool is
available, raises ExecError. Returns the name of the output zip
file.
"""
zip_filename = base_name + ".zip"
archive_dir = os.path.dirname(base_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# If zipfile module is not available, try spawning an external 'zip'
# command.
try:
import zipfile
except ImportError:
zipfile = None
if zipfile is None:
_call_external_zip(base_dir, zip_filename, verbose, dry_run)
else:
if logger is not None:
logger.info("creating '%s' and adding '%s' to it",
zip_filename, base_dir)
if not dry_run:
zip = zipfile.ZipFile(zip_filename, "w",
compression=zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(base_dir):
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zip.write(path, path)
if logger is not None:
logger.info("adding '%s'", path)
zip.close()
return zip_filename | [
"def",
"_make_zipfile",
"(",
"base_name",
",",
"base_dir",
",",
"verbose",
"=",
"0",
",",
"dry_run",
"=",
"0",
",",
"logger",
"=",
"None",
")",
":",
"zip_filename",
"=",
"base_name",
"+",
"\".zip\"",
"archive_dir",
"=",
"os",
".",
"path",
".",
"dirname",... | 36.108696 | 21.065217 |
def __touch_and_multi(self, *args, **kwargs):
"""
Runs each tuple tuple of (redis_cmd, args) in provided inside of a Redis
MULTI block, plus an increment of the last_updated value, then executes
the MULTI block. If ``returns`` is specified, it returns that index
from the results list. If ``returns`` is None, returns all values.
"""
with self.connection.pipeline() as pipe:
pipe.incr(self.__last_update_key)
[getattr(pipe, function)(*a) for function, a in args]
results = pipe.execute()
if kwargs.get('returns'):
return results[kwargs.get('returns')]
else:
return results | [
"def",
"__touch_and_multi",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"self",
".",
"connection",
".",
"pipeline",
"(",
")",
"as",
"pipe",
":",
"pipe",
".",
"incr",
"(",
"self",
".",
"__last_update_key",
")",
"[",
"geta... | 41.647059 | 19.176471 |
def create_document(
self,
parent,
collection_id,
document_id,
document,
mask=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates a new document.
Example:
>>> from google.cloud import firestore_v1beta1
>>>
>>> client = firestore_v1beta1.FirestoreClient()
>>>
>>> parent = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]', '[ANY_PATH]')
>>>
>>> # TODO: Initialize `collection_id`:
>>> collection_id = ''
>>>
>>> # TODO: Initialize `document_id`:
>>> document_id = ''
>>>
>>> # TODO: Initialize `document`:
>>> document = {}
>>>
>>> response = client.create_document(parent, collection_id, document_id, document)
Args:
parent (str): The parent resource. For example:
``projects/{project_id}/databases/{database_id}/documents`` or
``projects/{project_id}/databases/{database_id}/documents/chatrooms/{chatroom_id}``
collection_id (str): The collection ID, relative to ``parent``, to list. For example:
``chatrooms``.
document_id (str): The client-assigned document ID to use for this document.
Optional. If not specified, an ID will be assigned by the service.
document (Union[dict, ~google.cloud.firestore_v1beta1.types.Document]): The document to create. ``name`` must not be set.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.firestore_v1beta1.types.Document`
mask (Union[dict, ~google.cloud.firestore_v1beta1.types.DocumentMask]): The fields to return. If not set, returns all fields.
If the document has a field that is not present in this mask, that field
will not be returned in the response.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.firestore_v1beta1.types.DocumentMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.firestore_v1beta1.types.Document` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "create_document" not in self._inner_api_calls:
self._inner_api_calls[
"create_document"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_document,
default_retry=self._method_configs["CreateDocument"].retry,
default_timeout=self._method_configs["CreateDocument"].timeout,
client_info=self._client_info,
)
request = firestore_pb2.CreateDocumentRequest(
parent=parent,
collection_id=collection_id,
document_id=document_id,
document=document,
mask=mask,
)
return self._inner_api_calls["create_document"](
request, retry=retry, timeout=timeout, metadata=metadata
) | [
"def",
"create_document",
"(",
"self",
",",
"parent",
",",
"collection_id",
",",
"document_id",
",",
"document",
",",
"mask",
"=",
"None",
",",
"retry",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"DEFAULT",
",",
"timeout",
"=",
... | 44.293478 | 27.141304 |
def mode_reader(self):
"""MODE READER command.
Instructs a mode-switching server to switch modes.
See <http://tools.ietf.org/html/rfc3977#section-5.3>
Returns:
Boolean value indicating whether posting is allowed or not.
"""
code, message = self.command("MODE READER")
if not code in [200, 201]:
raise NNTPReplyError(code, message)
return code == 200 | [
"def",
"mode_reader",
"(",
"self",
")",
":",
"code",
",",
"message",
"=",
"self",
".",
"command",
"(",
"\"MODE READER\"",
")",
"if",
"not",
"code",
"in",
"[",
"200",
",",
"201",
"]",
":",
"raise",
"NNTPReplyError",
"(",
"code",
",",
"message",
")",
"... | 28.466667 | 20.6 |
def get_all(self, security):
"""
Get all available quote data for the given ticker security.
Returns a dictionary.
"""
url = 'http://www.google.com/finance?q=%s' % security
page = self._request(url)
soup = BeautifulSoup(page)
snapData = soup.find("table", {"class": "snap-data"})
if snapData is None:
raise UfException(Errors.STOCK_SYMBOL_ERROR, "Can find data for stock %s, security error?" % security)
data = {}
for row in snapData.findAll('tr'):
keyTd, valTd = row.findAll('td')
data[keyTd.getText()] = valTd.getText()
return data | [
"def",
"get_all",
"(",
"self",
",",
"security",
")",
":",
"url",
"=",
"'http://www.google.com/finance?q=%s'",
"%",
"security",
"page",
"=",
"self",
".",
"_request",
"(",
"url",
")",
"soup",
"=",
"BeautifulSoup",
"(",
"page",
")",
"snapData",
"=",
"soup",
"... | 37.055556 | 18.388889 |
def install_default_formatters(self):
"""
Installs default formatters for the following tags:
b, i, u, s, list (and \*), quote, code, center, color, url
"""
self.add_simple_formatter('b', '<strong>%(value)s</strong>')
self.add_simple_formatter('i', '<em>%(value)s</em>')
self.add_simple_formatter('u', '<u>%(value)s</u>')
self.add_simple_formatter('s', '<strike>%(value)s</strike>')
self.add_simple_formatter('hr', '<hr />', standalone=True)
self.add_simple_formatter('sub', '<sub>%(value)s</sub>')
self.add_simple_formatter('sup', '<sup>%(value)s</sup>')
def _render_list(name, value, options, parent, context):
list_type = options['list'] if (options and 'list' in options) else '*'
css_opts = {
'1': 'decimal', '01': 'decimal-leading-zero',
'a': 'lower-alpha', 'A': 'upper-alpha',
'i': 'lower-roman', 'I': 'upper-roman',
}
tag = 'ol' if list_type in css_opts else 'ul'
css = ' style="list-style-type:%s;"' % css_opts[list_type] if list_type in css_opts else ''
return '<%s%s>%s</%s>' % (tag, css, value, tag)
self.add_formatter('list', _render_list, transform_newlines=False, strip=True, swallow_trailing_newline=True)
# Make sure transform_newlines = False for [*], so [code] tags can be embedded without transformation.
def _render_list_item(name, value, options, parent, context):
if not parent or parent.tag_name != 'list':
return '[*]%s<br />' % value
return '<li>%s</li>' % value
self.add_formatter('*', _render_list_item, newline_closes=True, transform_newlines=False,
same_tag_closes=True, strip=True)
self.add_simple_formatter('quote', '<blockquote>%(value)s</blockquote>', strip=True,
swallow_trailing_newline=True)
self.add_simple_formatter('code', '<code>%(value)s</code>', render_embedded=False, transform_newlines=False,
swallow_trailing_newline=True, replace_cosmetic=False)
self.add_simple_formatter('center', '<div style="text-align:center;">%(value)s</div>')
def _render_color(name, value, options, parent, context):
if 'color' in options:
color = options['color'].strip()
elif options:
color = list(options.keys())[0].strip()
else:
return value
match = re.match(r'^([a-z]+)|^(#[a-f0-9]{3,6})', color, re.I)
color = match.group() if match else 'inherit'
return '<span style="color:%(color)s;">%(value)s</span>' % {
'color': color,
'value': value,
}
self.add_formatter('color', _render_color)
def _render_url(name, value, options, parent, context):
if options and 'url' in options:
# Option values are not escaped for HTML output.
href = self._replace(options['url'], self.REPLACE_ESCAPE)
else:
href = value
# Completely ignore javascript: and data: "links".
if re.sub(r'[^a-z0-9+]', '', href.lower().split(':', 1)[0]) in ('javascript', 'data', 'vbscript'):
return ''
# Only add the missing http:// if it looks like it starts with a domain name.
if '://' not in href and _domain_re.match(href):
href = 'http://' + href
return self.url_template.format(href=href.replace('"', '%22'), text=value)
self.add_formatter('url', _render_url, replace_links=False, replace_cosmetic=False) | [
"def",
"install_default_formatters",
"(",
"self",
")",
":",
"self",
".",
"add_simple_formatter",
"(",
"'b'",
",",
"'<strong>%(value)s</strong>'",
")",
"self",
".",
"add_simple_formatter",
"(",
"'i'",
",",
"'<em>%(value)s</em>'",
")",
"self",
".",
"add_simple_formatter... | 52.1 | 26.585714 |
def render_none(self, context, result):
"""Render empty responses."""
context.response.body = b''
del context.response.content_length
return True | [
"def",
"render_none",
"(",
"self",
",",
"context",
",",
"result",
")",
":",
"context",
".",
"response",
".",
"body",
"=",
"b''",
"del",
"context",
".",
"response",
".",
"content_length",
"return",
"True"
] | 29.8 | 8.4 |
def parse_schedule(schedule, action):
""" parses the given schedule and validates at """
error = None
scheduled_at = None
try:
scheduled_at = dateutil.parser.parse(schedule)
if scheduled_at.tzinfo is None:
error = 'Timezone information is mandatory for the scheduled {0}'.format(action)
status_code = 400
elif scheduled_at < datetime.datetime.now(tzutc):
error = 'Cannot schedule {0} in the past'.format(action)
status_code = 422
else:
status_code = None
except (ValueError, TypeError):
logger.exception('Invalid scheduled %s time: %s', action, schedule)
error = 'Unable to parse scheduled timestamp. It should be in an unambiguous format, e.g. ISO 8601'
status_code = 422
return (status_code, error, scheduled_at) | [
"def",
"parse_schedule",
"(",
"schedule",
",",
"action",
")",
":",
"error",
"=",
"None",
"scheduled_at",
"=",
"None",
"try",
":",
"scheduled_at",
"=",
"dateutil",
".",
"parser",
".",
"parse",
"(",
"schedule",
")",
"if",
"scheduled_at",
".",
"tzinfo",
"is",... | 47.789474 | 19.368421 |
def chi_a(mass1, mass2, spin1z, spin2z):
""" Returns the aligned mass-weighted spin difference from mass1, mass2,
spin1z, and spin2z.
"""
return (spin2z * mass2 - spin1z * mass1) / (mass2 + mass1) | [
"def",
"chi_a",
"(",
"mass1",
",",
"mass2",
",",
"spin1z",
",",
"spin2z",
")",
":",
"return",
"(",
"spin2z",
"*",
"mass2",
"-",
"spin1z",
"*",
"mass1",
")",
"/",
"(",
"mass2",
"+",
"mass1",
")"
] | 41.6 | 7.8 |
def set(self, key, value):
"""Only set if purview caching is enabled"""
if config.CACHE_POTENTIAL_PURVIEWS:
self.cache[key] = value | [
"def",
"set",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"if",
"config",
".",
"CACHE_POTENTIAL_PURVIEWS",
":",
"self",
".",
"cache",
"[",
"key",
"]",
"=",
"value"
] | 39 | 5.5 |
def cli(file1, file2, comments) -> int:
""" Compare file1 to file2 using a filter """
sys.exit(compare_files(file1, file2, comments)) | [
"def",
"cli",
"(",
"file1",
",",
"file2",
",",
"comments",
")",
"->",
"int",
":",
"sys",
".",
"exit",
"(",
"compare_files",
"(",
"file1",
",",
"file2",
",",
"comments",
")",
")"
] | 46.333333 | 4 |
def summarize_provenance_per_cache(self):
"""Utility function to summarize provenance files for cached items used by a Cohort,
for each cache_dir that exists. Only existing cache_dirs are summarized.
This is a summary of provenance files because the function checks to see whether all
patients data have the same provenance within the cache dir. The function assumes
that it will be desireable to have all patients data generated using the same
environment, for each cache type.
At the moment, most PROVENANCE files contain details about packages used to generat
e the cached data file. However, this function is generic & so it summarizes the
contents of those files irrespective of their contents.
Returns
----------
Dict containing summarized provenance for each existing cache_dir, after checking
to see that provenance files are identical among all patients in the data frame for
that cache_dir.
If conflicting PROVENANCE files are discovered within a cache-dir:
- a warning is generated, describing the conflict
- and, a value of `None` is returned in the dictionary for that cache-dir
See also
-----------
* `?cohorts.Cohort.summarize_provenance` which summarizes provenance files among
cache_dirs.
* `?cohorts.Cohort.summarize_dataframe` which hashes/summarizes contents of the data
frame for this cohort.
"""
provenance_summary = {}
df = self.as_dataframe()
for cache in self.cache_names:
cache_name = self.cache_names[cache]
cache_provenance = None
num_discrepant = 0
this_cache_dir = path.join(self.cache_dir, cache_name)
if path.exists(this_cache_dir):
for patient_id in self._list_patient_ids():
patient_cache_dir = path.join(this_cache_dir, patient_id)
try:
this_provenance = self.load_provenance(patient_cache_dir = patient_cache_dir)
except:
this_provenance = None
if this_provenance:
if not(cache_provenance):
cache_provenance = this_provenance
else:
num_discrepant += compare_provenance(this_provenance, cache_provenance)
if num_discrepant == 0:
provenance_summary[cache_name] = cache_provenance
else:
provenance_summary[cache_name] = None
return(provenance_summary) | [
"def",
"summarize_provenance_per_cache",
"(",
"self",
")",
":",
"provenance_summary",
"=",
"{",
"}",
"df",
"=",
"self",
".",
"as_dataframe",
"(",
")",
"for",
"cache",
"in",
"self",
".",
"cache_names",
":",
"cache_name",
"=",
"self",
".",
"cache_names",
"[",
... | 48.925926 | 24.833333 |
def create(self, path: str, k: int = 20):
"""
Create from a scored lexicon file (fast_align format) using vocab from a trained Sockeye model.
:param path: Path to lexicon file.
:param k: Number of target entries per source to keep.
"""
self.lex = np.zeros((len(self.vocab_source), k), dtype=np.int)
src_unk_id = self.vocab_source[C.UNK_SYMBOL]
trg_unk_id = self.vocab_target[C.UNK_SYMBOL]
num_insufficient = 0 # number of source tokens with insufficient number of translations given k
for src_id, group in groupby(lexicon_iterator(path, self.vocab_source, self.vocab_target), key=itemgetter(0)):
# Unk token will always be part of target vocab, so no need to track it here
if src_id == src_unk_id:
continue
# filter trg_unk_id
filtered_group = ((trg_id, prob) for src_id, trg_id, prob in group if trg_id != trg_unk_id)
# sort by prob and take top k
top_k = [trg_id for trg_id, prob in sorted(filtered_group, key=itemgetter(1), reverse=True)[:k]]
if len(top_k) < k:
num_insufficient += 1
self.lex[src_id, :len(top_k)] = top_k
logger.info("Created top-k lexicon from \"%s\", k=%d. %d source tokens with fewer than %d translations",
path, k, num_insufficient, k) | [
"def",
"create",
"(",
"self",
",",
"path",
":",
"str",
",",
"k",
":",
"int",
"=",
"20",
")",
":",
"self",
".",
"lex",
"=",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"self",
".",
"vocab_source",
")",
",",
"k",
")",
",",
"dtype",
"=",
"np",
"... | 50.888889 | 28 |
def tabbedPane(self, req, tag):
"""
Render a tabbed pane tab for each top-level
L{xmantissa.ixmantissa.IPreferenceCollection} tab
"""
navigation = webnav.getTabs(self.aggregator.getPreferenceCollections())
pages = list()
for tab in navigation:
f = inevow.IRenderer(
self.aggregator.store.getItemByID(tab.storeID))
f.tab = tab
if hasattr(f, 'setFragmentParent'):
f.setFragmentParent(self)
pages.append((tab.name, f))
f = tabbedPane.TabbedPaneFragment(pages, name='preference-editor')
f.setFragmentParent(self)
return f | [
"def",
"tabbedPane",
"(",
"self",
",",
"req",
",",
"tag",
")",
":",
"navigation",
"=",
"webnav",
".",
"getTabs",
"(",
"self",
".",
"aggregator",
".",
"getPreferenceCollections",
"(",
")",
")",
"pages",
"=",
"list",
"(",
")",
"for",
"tab",
"in",
"naviga... | 36.944444 | 14.944444 |
def reload(*command, ignore_patterns=[]):
"""Reload given command"""
path = "."
sig = signal.SIGTERM
delay = 0.25
ignorefile = ".reloadignore"
ignore_patterns = ignore_patterns or load_ignore_patterns(ignorefile)
event_handler = ReloadEventHandler(ignore_patterns)
reloader = Reloader(command, signal)
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
reloader.start_command()
try:
while True:
time.sleep(delay)
sys.stdout.write(reloader.read())
sys.stdout.flush()
if event_handler.modified:
reloader.restart_command()
except KeyboardInterrupt:
observer.stop()
observer.join()
reloader.stop_command()
sys.stdout.write(reloader.read())
sys.stdout.flush() | [
"def",
"reload",
"(",
"*",
"command",
",",
"ignore_patterns",
"=",
"[",
"]",
")",
":",
"path",
"=",
"\".\"",
"sig",
"=",
"signal",
".",
"SIGTERM",
"delay",
"=",
"0.25",
"ignorefile",
"=",
"\".reloadignore\"",
"ignore_patterns",
"=",
"ignore_patterns",
"or",
... | 25.75 | 18.5625 |
def main(reraise_exceptions=False, **kwargs):
"""Main program. Catches several common errors and displays them nicely."""
exit_status = 0
try:
cli.main(**kwargs)
except SoftLayer.SoftLayerAPIError as ex:
if 'invalid api token' in ex.faultString.lower():
print("Authentication Failed: To update your credentials, use 'slcli config setup'")
exit_status = 1
else:
print(str(ex))
exit_status = 1
except SoftLayer.SoftLayerError as ex:
print(str(ex))
exit_status = 1
except exceptions.CLIAbort as ex:
print(str(ex.message))
exit_status = ex.code
except Exception:
if reraise_exceptions:
raise
import traceback
print("An unexpected error has occured:")
print(str(traceback.format_exc()))
print("Feel free to report this error as it is likely a bug:")
print(" https://github.com/softlayer/softlayer-python/issues")
print("The following snippet should be able to reproduce the error")
exit_status = 1
sys.exit(exit_status) | [
"def",
"main",
"(",
"reraise_exceptions",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"exit_status",
"=",
"0",
"try",
":",
"cli",
".",
"main",
"(",
"*",
"*",
"kwargs",
")",
"except",
"SoftLayer",
".",
"SoftLayerAPIError",
"as",
"ex",
":",
"if",
... | 34.40625 | 19 |
def split_qname(self, cybox_id):
"""
Separate the namespace from the identifier in a qualified name and lookup the namespace URI associated
with the given namespace.
"""
if ':' in cybox_id:
(namespace, uid) = cybox_id.split(':', 1)
else:
namespace = None
uid = cybox_id
if namespace and namespace in self.namespace_dict:
namespace_uri = self.namespace_dict[namespace]
else:
logger.warning("Could not retrieve namespace for identifier %s" % (cybox_id))
# TODO: Introduce configurable URI
namespace_uri = None
if not namespace_uri:
if self.default_identifier_ns_uri:
namespace_uri = self.default_identifier_ns_uri
else:
namespace_uri = "%s/%s" % (DINGOS_MISSING_ID_NAMESPACE_URI_PREFIX, namespace)
return (namespace, namespace_uri, uid) | [
"def",
"split_qname",
"(",
"self",
",",
"cybox_id",
")",
":",
"if",
"':'",
"in",
"cybox_id",
":",
"(",
"namespace",
",",
"uid",
")",
"=",
"cybox_id",
".",
"split",
"(",
"':'",
",",
"1",
")",
"else",
":",
"namespace",
"=",
"None",
"uid",
"=",
"cybox... | 37.32 | 21.24 |
def getMaxPacketSize(self, endpoint):
"""
Get device's max packet size for given endpoint.
Warning: this function will not always give you the expected result.
See https://libusb.org/ticket/77 . You should instead consult the
endpoint descriptor of current configuration and alternate setting.
"""
result = libusb1.libusb_get_max_packet_size(self.device_p, endpoint)
mayRaiseUSBError(result)
return result | [
"def",
"getMaxPacketSize",
"(",
"self",
",",
"endpoint",
")",
":",
"result",
"=",
"libusb1",
".",
"libusb_get_max_packet_size",
"(",
"self",
".",
"device_p",
",",
"endpoint",
")",
"mayRaiseUSBError",
"(",
"result",
")",
"return",
"result"
] | 42.545455 | 20.545455 |
def _check_FITS_extvers(img, extname, extvers):
"""Returns True if all (except None) extension versions specified by the
argument 'extvers' and that are of the type specified by the argument
'extname' are present in the 'img' FITS file. Returns False if some of the
extension versions for a given EXTNAME cannot be found in the FITS image.
"""
default_extn = 1 if isinstance(extname, str) else 0
if isinstance(extvers, list):
extv = [default_extn if ext is None else ext for ext in extvers]
else:
extv = [default_extn if extvers is None else extvers]
extv_in_fits = get_extver_list(img, extname)
return set(extv).issubset(set(extv_in_fits)) | [
"def",
"_check_FITS_extvers",
"(",
"img",
",",
"extname",
",",
"extvers",
")",
":",
"default_extn",
"=",
"1",
"if",
"isinstance",
"(",
"extname",
",",
"str",
")",
"else",
"0",
"if",
"isinstance",
"(",
"extvers",
",",
"list",
")",
":",
"extv",
"=",
"[",... | 42.75 | 22.3125 |
async def connect(
self,
host: str,
port: int,
af: socket.AddressFamily = socket.AF_UNSPEC,
ssl_options: Union[Dict[str, Any], ssl.SSLContext] = None,
max_buffer_size: int = None,
source_ip: str = None,
source_port: int = None,
timeout: Union[float, datetime.timedelta] = None,
) -> IOStream:
"""Connect to the given host and port.
Asynchronously returns an `.IOStream` (or `.SSLIOStream` if
``ssl_options`` is not None).
Using the ``source_ip`` kwarg, one can specify the source
IP address to use when establishing the connection.
In case the user needs to resolve and
use a specific interface, it has to be handled outside
of Tornado as this depends very much on the platform.
Raises `TimeoutError` if the input future does not complete before
``timeout``, which may be specified in any form allowed by
`.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
relative to `.IOLoop.time`)
Similarly, when the user requires a certain source port, it can
be specified using the ``source_port`` arg.
.. versionchanged:: 4.5
Added the ``source_ip`` and ``source_port`` arguments.
.. versionchanged:: 5.0
Added the ``timeout`` argument.
"""
if timeout is not None:
if isinstance(timeout, numbers.Real):
timeout = IOLoop.current().time() + timeout
elif isinstance(timeout, datetime.timedelta):
timeout = IOLoop.current().time() + timeout.total_seconds()
else:
raise TypeError("Unsupported timeout %r" % timeout)
if timeout is not None:
addrinfo = await gen.with_timeout(
timeout, self.resolver.resolve(host, port, af)
)
else:
addrinfo = await self.resolver.resolve(host, port, af)
connector = _Connector(
addrinfo,
functools.partial(
self._create_stream,
max_buffer_size,
source_ip=source_ip,
source_port=source_port,
),
)
af, addr, stream = await connector.start(connect_timeout=timeout)
# TODO: For better performance we could cache the (af, addr)
# information here and re-use it on subsequent connections to
# the same host. (http://tools.ietf.org/html/rfc6555#section-4.2)
if ssl_options is not None:
if timeout is not None:
stream = await gen.with_timeout(
timeout,
stream.start_tls(
False, ssl_options=ssl_options, server_hostname=host
),
)
else:
stream = await stream.start_tls(
False, ssl_options=ssl_options, server_hostname=host
)
return stream | [
"async",
"def",
"connect",
"(",
"self",
",",
"host",
":",
"str",
",",
"port",
":",
"int",
",",
"af",
":",
"socket",
".",
"AddressFamily",
"=",
"socket",
".",
"AF_UNSPEC",
",",
"ssl_options",
":",
"Union",
"[",
"Dict",
"[",
"str",
",",
"Any",
"]",
"... | 39.28 | 19.32 |
def seek(self, offset, whence=0):
"""Change the file position.
The new position is specified by offset, relative to the
position indicated by whence. Possible values for whence are:
0: start of stream (default): offset must not be negative
1: current stream position
2: end of stream; offset must not be positive
Returns the new file position.
Note that seeking is emulated, sp depending on the parameters,
this operation may be extremely slow.
"""
self._check_can_seek()
# Recalculate offset as an absolute file position.
if whence == 0:
pass
elif whence == 1:
offset = self._pos + offset
elif whence == 2:
# Seeking relative to EOF - we need to know the file's size.
if self._size < 0:
self._read_all(return_data=False)
offset = self._size + offset
else:
raise ValueError("Invalid value for whence: {}".format(whence))
# Make it so that offset is the number of bytes to skip forward.
if offset is None:
#This is not needed on Python 3 where the comparison to self._pos
#will fail with a TypeError.
raise TypeError("Seek offset should be an integer, not None")
if offset < self._pos:
self._rewind()
else:
offset -= self._pos
# Read and discard data until we reach the desired position.
if self._mode != _MODE_READ_EOF:
self._read_block(offset, return_data=False)
return self._pos | [
"def",
"seek",
"(",
"self",
",",
"offset",
",",
"whence",
"=",
"0",
")",
":",
"self",
".",
"_check_can_seek",
"(",
")",
"# Recalculate offset as an absolute file position.",
"if",
"whence",
"==",
"0",
":",
"pass",
"elif",
"whence",
"==",
"1",
":",
"offset",
... | 35.555556 | 20.288889 |
def get(self, request, path):
"""Return HTML (or other related content) for Meteor."""
if path == 'meteor_runtime_config.js':
config = {
'DDP_DEFAULT_CONNECTION_URL': request.build_absolute_uri('/'),
'PUBLIC_SETTINGS': self.meteor_settings.get('public', {}),
'ROOT_URL': request.build_absolute_uri(
'%s/' % (
self.runtime_config.get('ROOT_URL_PATH_PREFIX', ''),
),
),
'ROOT_URL_PATH_PREFIX': '',
}
# Use HTTPS instead of HTTP if SECURE_SSL_REDIRECT is set
if config['DDP_DEFAULT_CONNECTION_URL'].startswith('http:') \
and settings.SECURE_SSL_REDIRECT:
config['DDP_DEFAULT_CONNECTION_URL'] = 'https:%s' % (
config['DDP_DEFAULT_CONNECTION_URL'].split(':', 1)[1],
)
config.update(self.runtime_config)
return HttpResponse(
'__meteor_runtime_config__ = %s;' % dumps(config),
content_type='text/javascript',
)
try:
file_path, content_type = self.url_map[path]
with open(file_path, 'r') as content:
return HttpResponse(
content.read(),
content_type=content_type,
)
except KeyError:
return HttpResponse(self.html) | [
"def",
"get",
"(",
"self",
",",
"request",
",",
"path",
")",
":",
"if",
"path",
"==",
"'meteor_runtime_config.js'",
":",
"config",
"=",
"{",
"'DDP_DEFAULT_CONNECTION_URL'",
":",
"request",
".",
"build_absolute_uri",
"(",
"'/'",
")",
",",
"'PUBLIC_SETTINGS'",
"... | 43.787879 | 17.666667 |
def save_to_store(self):
"""Save index to store.
:raise AttributeError: If no datastore is defined
"""
if not self._store:
raise AttributeError('No datastore defined!')
saved_data = self.save_to_data(in_place=True)
data = Serializer.serialize(saved_data)
self._store.store_blob(data, 'all_keys_with_undefined') | [
"def",
"save_to_store",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_store",
":",
"raise",
"AttributeError",
"(",
"'No datastore defined!'",
")",
"saved_data",
"=",
"self",
".",
"save_to_data",
"(",
"in_place",
"=",
"True",
")",
"data",
"=",
"Serialize... | 33.636364 | 16.909091 |
def year(self, value=None):
"""
We do *NOT* know for what year we are converting so lets assume the
year has 365 days.
"""
if value is None:
return self.day() / 365
else:
self.millisecond(self.day(value * 365)) | [
"def",
"year",
"(",
"self",
",",
"value",
"=",
"None",
")",
":",
"if",
"value",
"is",
"None",
":",
"return",
"self",
".",
"day",
"(",
")",
"/",
"365",
"else",
":",
"self",
".",
"millisecond",
"(",
"self",
".",
"day",
"(",
"value",
"*",
"365",
"... | 30.444444 | 13.333333 |
def haversine(lon1, lat1, lon2, lat2, earth_radius=6357000):
"""Calculate the great circle distance between two points on earth in Kilometers
on the earth (specified in decimal degrees)
.. seealso:: :func:`distance_points`
:param float lon1: longitude of first place (decimal degrees)
:param float lat1: latitude of first place (decimal degrees)
:param float lon2: longitude of second place (decimal degrees)
:param float lat2: latitude of second place (decimal degrees)
:param earth_radius: earth_radius (use 6367 for KM 6367000 for meters 3956 for miles
- http://stackoverflow.com/questions/5283900/what-earth-radius-should-i-use-to-calculate-distances-near-the-poles
:Example:
>>> London_long=-0.126 ; London_lat=51.50; Paris_long = 2.350; Paris_lat = 48.856
>>> haversine(London_long, London_lat, Paris_long, Paris_lat)
342.55375272454864
:returns: float distance in Kilometers
"""
# convert decimal degrees to radiant
lon1, lat1, lon2, lat2 = list(map(math.radians, [lon1, lat1, lon2, lat2]))
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2
c = 2 * math.asin(math.sqrt(a))
distance = earth_radius * c # 6371 # Radius of earth in kilometers. Use 3956 for miles
return distance | [
"def",
"haversine",
"(",
"lon1",
",",
"lat1",
",",
"lon2",
",",
"lat2",
",",
"earth_radius",
"=",
"6357000",
")",
":",
"# convert decimal degrees to radiant",
"lon1",
",",
"lat1",
",",
"lon2",
",",
"lat2",
"=",
"list",
"(",
"map",
"(",
"math",
".",
"radi... | 45.3 | 26.266667 |
def build_area_source_geometry(area_source):
"""
Returns the area source geometry as a Node
:param area_source:
Area source model as an instance of the :class:
`openquake.hazardlib.source.area.AreaSource`
:returns:
Instance of :class:`openquake.baselib.node.Node`
"""
geom = []
for lon_lat in zip(area_source.polygon.lons, area_source.polygon.lats):
geom.extend(lon_lat)
poslist_node = Node("gml:posList", text=geom)
linear_ring_node = Node("gml:LinearRing", nodes=[poslist_node])
exterior_node = Node("gml:exterior", nodes=[linear_ring_node])
polygon_node = Node("gml:Polygon", nodes=[exterior_node])
upper_depth_node = Node(
"upperSeismoDepth", text=area_source.upper_seismogenic_depth)
lower_depth_node = Node(
"lowerSeismoDepth", text=area_source.lower_seismogenic_depth)
return Node(
"areaGeometry", {'discretization': area_source.area_discretization},
nodes=[polygon_node, upper_depth_node, lower_depth_node]) | [
"def",
"build_area_source_geometry",
"(",
"area_source",
")",
":",
"geom",
"=",
"[",
"]",
"for",
"lon_lat",
"in",
"zip",
"(",
"area_source",
".",
"polygon",
".",
"lons",
",",
"area_source",
".",
"polygon",
".",
"lats",
")",
":",
"geom",
".",
"extend",
"(... | 42.208333 | 19.208333 |
def construct_url(self):
"""Construct a full trakt request URI, with `params` and `query`."""
path = [self.path]
path.extend(self.params)
# Build URL
url = self.client.base_url + '/'.join(
str(value) for value in path
if value
)
# Append query parameters (if defined)
query = self.encode_query(self.query)
if query:
url += '?' + query
return url | [
"def",
"construct_url",
"(",
"self",
")",
":",
"path",
"=",
"[",
"self",
".",
"path",
"]",
"path",
".",
"extend",
"(",
"self",
".",
"params",
")",
"# Build URL",
"url",
"=",
"self",
".",
"client",
".",
"base_url",
"+",
"'/'",
".",
"join",
"(",
"str... | 24.888889 | 19 |
def post(self, query_continue=None, upload_file=None, auth=None,
continuation=False, **params):
"""Makes an API request with the POST method
:Parameters:
query_continue : `dict`
Optionally, the value of a query continuation 'continue' field.
upload_file : `bytes`
The bytes of a file to upload.
auth : mixed
Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth.
continuation : `bool`
If true, a continuation will be attempted and a generator of
JSON response documents will be returned.
params :
Keyword parameters to be sent in the POST message body.
:Returns:
A response JSON documents (or a generator of documents if
`continuation == True`)
:Raises:
:class:`mwapi.errors.APIError` : if the API responds with an error
"""
if upload_file is not None:
files = {'file': upload_file}
else:
files = None
return self.request('POST', params=params, auth=auth,
query_continue=query_continue, files=files,
continuation=continuation) | [
"def",
"post",
"(",
"self",
",",
"query_continue",
"=",
"None",
",",
"upload_file",
"=",
"None",
",",
"auth",
"=",
"None",
",",
"continuation",
"=",
"False",
",",
"*",
"*",
"params",
")",
":",
"if",
"upload_file",
"is",
"not",
"None",
":",
"files",
"... | 39.28125 | 20.75 |
def get_sigma(database_file_name='', e_min=np.NaN, e_max=np.NaN, e_step=np.NaN, t_kelvin=None):
"""retrieve the Energy and sigma axis for the given isotope
:param database_file_name: path/to/file with extension
:type database_file_name: string
:param e_min: left energy range in eV of new interpolated data
:type e_min: float
:param e_max: right energy range in eV of new interpolated data
:type e_max: float
:param e_step: energy step in eV for interpolation
:type e_step: float
:param t_kelvin: temperature in Kelvin
:type t_kelvin: float
:return: {'energy': np.array, 'sigma': np.array}
:rtype: dict
"""
file_extension = os.path.splitext(database_file_name)[1]
if t_kelvin is None:
# '.csv' files
if file_extension != '.csv':
raise IOError("Cross-section File type must be '.csv'")
else:
_df = get_database_data(file_name=database_file_name)
_dict = get_interpolated_data(df=_df, e_min=e_min, e_max=e_max,
e_step=e_step)
return {'energy_eV': _dict['x_axis'],
'sigma_b': _dict['y_axis']}
else:
raise ValueError("Doppler broadened cross-section in not yet supported in current version.") | [
"def",
"get_sigma",
"(",
"database_file_name",
"=",
"''",
",",
"e_min",
"=",
"np",
".",
"NaN",
",",
"e_max",
"=",
"np",
".",
"NaN",
",",
"e_step",
"=",
"np",
".",
"NaN",
",",
"t_kelvin",
"=",
"None",
")",
":",
"file_extension",
"=",
"os",
".",
"pat... | 39.71875 | 22.03125 |
def to_excel(self, *args):
"""
Dump all the data to excel, fname and path can be passed as args
"""
path = os.getcwd()
fname = self.fname.replace(".tpl", "_tpl") + ".xlsx"
idxs = self.filter_trends("")
for idx in idxs:
self.extract(idx)
data_df = pd.DataFrame(self.data)
data_df.columns = self.label.values()
data_df.insert(0, "Time [s]", self.time)
if len(args) > 0 and args[0] != "":
path = args[0]
if os.path.exists(path) == False:
os.mkdir(path)
data_df.to_excel(path + os.sep + fname)
else:
data_df.to_excel(self.path + os.sep + fname) | [
"def",
"to_excel",
"(",
"self",
",",
"*",
"args",
")",
":",
"path",
"=",
"os",
".",
"getcwd",
"(",
")",
"fname",
"=",
"self",
".",
"fname",
".",
"replace",
"(",
"\".tpl\"",
",",
"\"_tpl\"",
")",
"+",
"\".xlsx\"",
"idxs",
"=",
"self",
".",
"filter_t... | 37.473684 | 11.052632 |
def serve(self, handler):
"""Serve calls over this connection using the given RequestHandler.
:param handler:
RequestHandler to process the requests through
:return:
A Future that resolves (to None) once the loop is done running --
which happens once this connection is closed.
"""
assert handler, "handler is required"
while not self.closed:
message = yield self.await()
try:
handler(message, self)
except Exception:
# TODO Send error frame back
log.exception("Failed to process %s", repr(message)) | [
"def",
"serve",
"(",
"self",
",",
"handler",
")",
":",
"assert",
"handler",
",",
"\"handler is required\"",
"while",
"not",
"self",
".",
"closed",
":",
"message",
"=",
"yield",
"self",
".",
"await",
"(",
")",
"try",
":",
"handler",
"(",
"message",
",",
... | 34.315789 | 17.473684 |
def pin_auth(self, request):
"""Authenticates with the pin."""
exhausted = False
auth = False
trust = self.check_pin_trust(request.environ)
# If the trust return value is `None` it means that the cookie is
# set but the stored pin hash value is bad. This means that the
# pin was changed. In this case we count a bad auth and unset the
# cookie. This way it becomes harder to guess the cookie name
# instead of the pin as we still count up failures.
bad_cookie = False
if trust is None:
self._fail_pin_auth()
bad_cookie = True
# If we're trusted, we're authenticated.
elif trust:
auth = True
# If we failed too many times, then we're locked out.
elif self._failed_pin_auth > 10:
exhausted = True
# Otherwise go through pin based authentication
else:
entered_pin = request.args.get("pin")
if entered_pin.strip().replace("-", "") == self.pin.replace("-", ""):
self._failed_pin_auth = 0
auth = True
else:
self._fail_pin_auth()
rv = Response(
json.dumps({"auth": auth, "exhausted": exhausted}),
mimetype="application/json",
)
if auth:
rv.set_cookie(
self.pin_cookie_name,
"%s|%s" % (int(time.time()), hash_pin(self.pin)),
httponly=True,
)
elif bad_cookie:
rv.delete_cookie(self.pin_cookie_name)
return rv | [
"def",
"pin_auth",
"(",
"self",
",",
"request",
")",
":",
"exhausted",
"=",
"False",
"auth",
"=",
"False",
"trust",
"=",
"self",
".",
"check_pin_trust",
"(",
"request",
".",
"environ",
")",
"# If the trust return value is `None` it means that the cookie is",
"# set ... | 34.326087 | 19.347826 |
def citedby_pid(self, pid, metaonly=False, from_heap=True):
"""
Retrieve citedby documents from a given PID number.
pid: SciELO PID number
metaonly: will retrieve only the metadata of the requested article citations including the number of citations it has received.
from_heap: will retrieve the number of citations from a preproduced report, it will not fetch the api. Much faster results but not extremelly updated.
"""
if from_heap is True:
result = citations.raw_data(pid)
if result and 'cited_by' in result and metaonly is True:
del(result['cited_by'])
return result
if result:
return result
result = self.client.citedby_pid(pid, metaonly=metaonly)
try:
return json.loads(result)
except:
return None | [
"def",
"citedby_pid",
"(",
"self",
",",
"pid",
",",
"metaonly",
"=",
"False",
",",
"from_heap",
"=",
"True",
")",
":",
"if",
"from_heap",
"is",
"True",
":",
"result",
"=",
"citations",
".",
"raw_data",
"(",
"pid",
")",
"if",
"result",
"and",
"'cited_by... | 34.96 | 27.28 |
def check_online(stream):
"""
Used to check user's online opponents and show their online/offline status on page on init
"""
while True:
packet = yield from stream.get()
session_id = packet.get('session_key')
opponent_username = packet.get('username')
if session_id and opponent_username:
user_owner = get_user_from_session(session_id)
if user_owner:
# Find all connections including user_owner as opponent
online_opponents = list(filter(lambda x: x[1] == user_owner.username, ws_connections))
logger.debug('User ' + user_owner.username + ' has ' + str(len(online_opponents)) + ' opponents online')
# Send user online statuses of his opponents
socket = ws_connections.get((user_owner.username, opponent_username))
if socket:
online_opponents_usernames = [i[0] for i in online_opponents]
yield from target_message(socket,
{'type': 'gone-online', 'usernames': online_opponents_usernames})
else:
pass # socket for the pair user_owner.username, opponent_username not found
# this can be in case the user has already gone offline
else:
pass # invalid session id
else:
pass | [
"def",
"check_online",
"(",
"stream",
")",
":",
"while",
"True",
":",
"packet",
"=",
"yield",
"from",
"stream",
".",
"get",
"(",
")",
"session_id",
"=",
"packet",
".",
"get",
"(",
"'session_key'",
")",
"opponent_username",
"=",
"packet",
".",
"get",
"(",... | 52.851852 | 26.62963 |
def create_ellipse_mesh(points,**kwargs):
"""Visualize the ellipse by using the mesh of the points."""
import plotly.graph_objs as go
x,y,z = points.T
return (go.Mesh3d(x=x,y=y,z=z,**kwargs),
go.Scatter3d(x=x, y=y, z=z,
marker=dict(size=0.01),
line=dict(width=2,color='#000000'),
showlegend=False,
hoverinfo='none'
)
) | [
"def",
"create_ellipse_mesh",
"(",
"points",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"plotly",
".",
"graph_objs",
"as",
"go",
"x",
",",
"y",
",",
"z",
"=",
"points",
".",
"T",
"return",
"(",
"go",
".",
"Mesh3d",
"(",
"x",
"=",
"x",
",",
"y",... | 37.583333 | 10.416667 |
def patch_datasette():
"""
Monkey patching for original Datasette
"""
def inspect(self):
" Inspect the database and return a dictionary of table metadata "
if self._inspect:
return self._inspect
_inspect = {}
files = self.files
for filename in files:
self.files = (filename,)
path = Path(filename)
name = path.stem
if name in _inspect:
raise Exception("Multiple files with the same stem %s" % name)
try:
_inspect[name] = self.original_inspect()[name]
except sqlite3.DatabaseError:
tables, views, dbtype = connectors.inspect(path)
_inspect[name] = {
"hash": inspect_hash(path),
"file": str(path),
"dbtype": dbtype,
"tables": tables,
"views": views,
}
self.files = files
self._inspect = _inspect
return self._inspect
datasette.app.Datasette.original_inspect = datasette.app.Datasette.inspect
datasette.app.Datasette.inspect = inspect
async def execute(self, db_name, sql, params=None, truncate=False, custom_time_limit=None, page_size=None):
"""Executes sql against db_name in a thread"""
page_size = page_size or self.page_size
def is_sqlite3_conn():
conn = getattr(connections, db_name, None)
if not conn:
info = self.inspect()[db_name]
return info.get('dbtype', 'sqlite3') == 'sqlite3'
else:
return isinstance(conn, sqlite3.Connection)
def sql_operation_in_thread():
conn = getattr(connections, db_name, None)
if not conn:
info = self.inspect()[db_name]
conn = connectors.connect(info['file'], info['dbtype'])
setattr(connections, db_name, conn)
rows, truncated, description = conn.execute(
sql,
params or {},
truncate=truncate,
page_size=page_size,
max_returned_rows=self.max_returned_rows,
)
return Results(rows, truncated, description)
if is_sqlite3_conn():
return await self.original_execute(db_name, sql, params=params, truncate=truncate, custom_time_limit=custom_time_limit, page_size=page_size)
else:
return await asyncio.get_event_loop().run_in_executor(
self.executor, sql_operation_in_thread
)
datasette.app.Datasette.original_execute = datasette.app.Datasette.execute
datasette.app.Datasette.execute = execute | [
"def",
"patch_datasette",
"(",
")",
":",
"def",
"inspect",
"(",
"self",
")",
":",
"\" Inspect the database and return a dictionary of table metadata \"",
"if",
"self",
".",
"_inspect",
":",
"return",
"self",
".",
"_inspect",
"_inspect",
"=",
"{",
"}",
"files",
"="... | 35.447368 | 20.157895 |
def GetPixelColor(self, x: int, y: int) -> int:
"""
Get color value of a pixel.
x: int.
y: int.
Return int, argb color.
b = argb & 0x0000FF
g = (argb & 0x00FF00) >> 8
r = (argb & 0xFF0000) >> 16
a = (argb & 0xFF0000) >> 24
"""
return _DllClient.instance().dll.BitmapGetPixel(self._bitmap, x, y) | [
"def",
"GetPixelColor",
"(",
"self",
",",
"x",
":",
"int",
",",
"y",
":",
"int",
")",
"->",
"int",
":",
"return",
"_DllClient",
".",
"instance",
"(",
")",
".",
"dll",
".",
"BitmapGetPixel",
"(",
"self",
".",
"_bitmap",
",",
"x",
",",
"y",
")"
] | 30.916667 | 11.25 |
def atc(jobid):
'''
Print the at(1) script that will run for the passed job
id. This is mostly for debugging so the output will
just be text.
CLI Example:
.. code-block:: bash
salt '*' at.atc <jobid>
'''
# Shim to produce output similar to what __virtual__() should do
# but __salt__ isn't available in __virtual__()
output = _cmd('at', '-c', six.text_type(jobid))
if output is None:
return '\'at.atc\' is not available.'
elif output == '':
return {'error': 'invalid job id \'{0}\''.format(jobid)}
return output | [
"def",
"atc",
"(",
"jobid",
")",
":",
"# Shim to produce output similar to what __virtual__() should do",
"# but __salt__ isn't available in __virtual__()",
"output",
"=",
"_cmd",
"(",
"'at'",
",",
"'-c'",
",",
"six",
".",
"text_type",
"(",
"jobid",
")",
")",
"if",
"o... | 25.954545 | 24.318182 |
def _make_reversed_operation_costs(self):
"""
Заполняет массив _reversed_operation_costs
на основе имеющегося массива operation_costs
"""
_reversed_operation_costs = dict()
for up, costs in self.operation_costs.items():
for low, cost in costs.items():
if low not in _reversed_operation_costs:
_reversed_operation_costs[low] = dict()
_reversed_operation_costs[low][up] = cost
self._reversed_operation_costs = _reversed_operation_costs | [
"def",
"_make_reversed_operation_costs",
"(",
"self",
")",
":",
"_reversed_operation_costs",
"=",
"dict",
"(",
")",
"for",
"up",
",",
"costs",
"in",
"self",
".",
"operation_costs",
".",
"items",
"(",
")",
":",
"for",
"low",
",",
"cost",
"in",
"costs",
".",... | 45.166667 | 10 |
def patches(self, dwn, install, comp_sum, uncomp_sum):
"""Seperates packages from patches/ directory
"""
dwnp, installp, comp_sump, uncomp_sump = ([] for i in range(4))
for d, i, c, u in zip(dwn, install, comp_sum, uncomp_sum):
if "_slack" + slack_ver() in i:
dwnp.append(d)
dwn.remove(d)
installp.append(i)
install.remove(i)
comp_sump.append(c)
comp_sum.remove(c)
uncomp_sump.append(u)
uncomp_sum.remove(u)
if "--patches" in self.flag:
return dwnp, installp, comp_sump, uncomp_sump
return dwn, install, comp_sum, uncomp_sum | [
"def",
"patches",
"(",
"self",
",",
"dwn",
",",
"install",
",",
"comp_sum",
",",
"uncomp_sum",
")",
":",
"dwnp",
",",
"installp",
",",
"comp_sump",
",",
"uncomp_sump",
"=",
"(",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"4",
")",
")",
"for",
"d",
... | 41.647059 | 9.176471 |
def _is_valid_dkim(self, value):
"""Check if value is a valid DKIM"""
validator_dict = {'h': lambda val: val in ['sha1', 'sha256'],
's': lambda val: val in ['*', 'email'],
't': lambda val: val in ['y', 's'],
'v': lambda val: val == 'DKIM1',
'k': lambda val: val == 'rsa',
'n': lambda _: True,
'g': lambda _: True}
splitted = value.split('\\;')
found_key = False
for splitted_value in splitted:
sub_split = map(lambda x: x.strip(), splitted_value.split("=", 1))
if len(sub_split) < 2:
return False
key, value = sub_split[0], sub_split[1]
if key == "p":
is_valid_key = self._is_valid_dkim_key(value)
if not is_valid_key:
return False
found_key = True
else:
is_valid_key = validator_dict.get(key, lambda _: False)(value)
if not is_valid_key:
return False
return found_key | [
"def",
"_is_valid_dkim",
"(",
"self",
",",
"value",
")",
":",
"validator_dict",
"=",
"{",
"'h'",
":",
"lambda",
"val",
":",
"val",
"in",
"[",
"'sha1'",
",",
"'sha256'",
"]",
",",
"'s'",
":",
"lambda",
"val",
":",
"val",
"in",
"[",
"'*'",
",",
"'ema... | 42.333333 | 14.777778 |
def split_code_and_text_blocks(source_file):
"""Return list with source file separated into code and text blocks.
Returns
-------
blocks : list of (label, content)
List where each element is a tuple with the label ('text' or 'code'),
and content string of block.
"""
docstring, rest_of_content = get_docstring_and_rest(source_file)
blocks = [('text', docstring)]
pattern = re.compile(
r'(?P<header_line>^#{20,}.*)\s(?P<text_content>(?:^#.*\s)*)',
flags=re.M)
pos_so_far = 0
for match in re.finditer(pattern, rest_of_content):
match_start_pos, match_end_pos = match.span()
code_block_content = rest_of_content[pos_so_far:match_start_pos]
text_content = match.group('text_content')
sub_pat = re.compile('^#', flags=re.M)
text_block_content = dedent(re.sub(sub_pat, '', text_content))
if code_block_content.strip():
blocks.append(('code', code_block_content))
if text_block_content.strip():
blocks.append(('text', text_block_content))
pos_so_far = match_end_pos
remaining_content = rest_of_content[pos_so_far:]
if remaining_content.strip():
blocks.append(('code', remaining_content))
return blocks | [
"def",
"split_code_and_text_blocks",
"(",
"source_file",
")",
":",
"docstring",
",",
"rest_of_content",
"=",
"get_docstring_and_rest",
"(",
"source_file",
")",
"blocks",
"=",
"[",
"(",
"'text'",
",",
"docstring",
")",
"]",
"pattern",
"=",
"re",
".",
"compile",
... | 35.6 | 19 |
def _get_calibration_for_hits(hits, lookup):
"""Append the position, direction and t0 columns and add t0 to time"""
n = len(hits)
cal = np.empty((n, 9))
for i in range(n):
calib = lookup[hits['dom_id'][i]][hits['channel_id'][i]]
cal[i] = calib
dir_x = cal[:, 3]
dir_y = cal[:, 4]
dir_z = cal[:, 5]
du = cal[:, 7]
floor = cal[:, 8]
pos_x = cal[:, 0]
pos_y = cal[:, 1]
pos_z = cal[:, 2]
t0 = cal[:, 6]
return [dir_x, dir_y, dir_z, du, floor, pos_x, pos_y, pos_z, t0] | [
"def",
"_get_calibration_for_hits",
"(",
"hits",
",",
"lookup",
")",
":",
"n",
"=",
"len",
"(",
"hits",
")",
"cal",
"=",
"np",
".",
"empty",
"(",
"(",
"n",
",",
"9",
")",
")",
"for",
"i",
"in",
"range",
"(",
"n",
")",
":",
"calib",
"=",
"lookup... | 27.368421 | 20.315789 |
def get_line_matches(input_file: str,
pattern: str,
max_occurrencies: int = 0,
loose_matching: bool = True) -> dict:
r"""Get the line numbers of matched patterns.
:parameter input_file: the file that needs to be read.
:parameter pattern: the pattern that needs to be searched.
:parameter max_occurrencies: the maximum number of expected occurrencies.
Defaults to ``0`` which means that all occurrencies will be matched.
:parameter loose_matching: ignore leading and trailing whitespace
characters for both pattern and matched strings. Defaults to ``True``.
:type input_file: str
:type pattern: str
:type max_occurrencies: int
:type loose_matching: bool
:returns: occurrency_matches, A dictionary where each key corresponds
to the number of occurrencies and each value to the matched line number.
If no match was found for that particular occurrency, the key is not
set. This means means for example: if the first occurrency of
pattern is at line y then: x[1] = y.
:rtype: dict
:raises: a built-in exception.
.. note::
Line numbers start from ``1``.
"""
assert max_occurrencies >= 0
occurrency_counter = 0.0
occurrency_matches = dict()
if max_occurrencies == 0:
max_occurrencies = float('inf')
if loose_matching:
pattern = pattern.strip()
line_counter = 1
with open(input_file, 'r') as f:
line = f.readline()
while line and occurrency_counter < max_occurrencies:
if loose_matching:
line = line.strip()
if line == pattern:
occurrency_counter += 1.0
occurrency_matches[int(occurrency_counter)] = line_counter
line = f.readline()
line_counter += 1
return occurrency_matches | [
"def",
"get_line_matches",
"(",
"input_file",
":",
"str",
",",
"pattern",
":",
"str",
",",
"max_occurrencies",
":",
"int",
"=",
"0",
",",
"loose_matching",
":",
"bool",
"=",
"True",
")",
"->",
"dict",
":",
"assert",
"max_occurrencies",
">=",
"0",
"occurren... | 37.36 | 18.52 |
def adjust_weight(self, stock_code, weight):
"""
雪球组合调仓, weight 为调整后的仓位比例
:param stock_code: str 股票代码
:param weight: float 调整之后的持仓百分比, 0 - 100 之间的浮点数
"""
stock = self._search_stock_info(stock_code)
if stock is None:
raise exceptions.TradeError(u"没有查询要操作的股票信息")
if stock["flag"] != 1:
raise exceptions.TradeError(u"未上市、停牌、涨跌停、退市的股票无法操作。")
# 仓位比例向下取两位数
weight = round(weight, 2)
# 获取原有仓位信息
position_list = self._get_position()
# 调整后的持仓
for position in position_list:
if position["stock_id"] == stock["stock_id"]:
position["proactive"] = True
position["weight"] = weight
if weight != 0 and stock["stock_id"] not in [
k["stock_id"] for k in position_list
]:
position_list.append(
{
"code": stock["code"],
"name": stock["name"],
"enName": stock["enName"],
"hasexist": stock["hasexist"],
"flag": stock["flag"],
"type": stock["type"],
"current": stock["current"],
"chg": stock["chg"],
"percent": str(stock["percent"]),
"stock_id": stock["stock_id"],
"ind_id": stock["ind_id"],
"ind_name": stock["ind_name"],
"ind_color": stock["ind_color"],
"textname": stock["name"],
"segment_name": stock["ind_name"],
"weight": weight,
"url": "/S/" + stock["code"],
"proactive": True,
"price": str(stock["current"]),
}
)
remain_weight = 100 - sum(i.get("weight") for i in position_list)
cash = round(remain_weight, 2)
log.debug("调仓比例:%f, 剩余持仓 :%f", weight, remain_weight)
data = {
"cash": cash,
"holdings": str(json.dumps(position_list)),
"cube_symbol": str(self.account_config["portfolio_code"]),
"segment": "true",
"comment": "",
}
try:
resp = self.s.post(self.config["rebalance_url"], data=data)
# pylint: disable=broad-except
except Exception as e:
log.warning("调仓失败: %s ", e)
return None
log.debug("调仓 %s: 持仓比例%d", stock["name"], weight)
resp_json = json.loads(resp.text)
if "error_description" in resp_json and resp.status_code != 200:
log.error("调仓错误: %s", resp_json["error_description"])
return [
{
"error_no": resp_json["error_code"],
"error_info": resp_json["error_description"],
}
]
log.debug("调仓成功 %s: 持仓比例%d", stock["name"], weight)
return None | [
"def",
"adjust_weight",
"(",
"self",
",",
"stock_code",
",",
"weight",
")",
":",
"stock",
"=",
"self",
".",
"_search_stock_info",
"(",
"stock_code",
")",
"if",
"stock",
"is",
"None",
":",
"raise",
"exceptions",
".",
"TradeError",
"(",
"u\"没有查询要操作的股票信息\")",
"... | 36.425 | 15.55 |
def goal_delete(self, goal_id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/chat/goals#delete-goal"
api_path = "/api/v2/goals/{goal_id}"
api_path = api_path.format(goal_id=goal_id)
return self.call(api_path, method="DELETE", **kwargs) | [
"def",
"goal_delete",
"(",
"self",
",",
"goal_id",
",",
"*",
"*",
"kwargs",
")",
":",
"api_path",
"=",
"\"/api/v2/goals/{goal_id}\"",
"api_path",
"=",
"api_path",
".",
"format",
"(",
"goal_id",
"=",
"goal_id",
")",
"return",
"self",
".",
"call",
"(",
"api_... | 54.6 | 14.6 |
def reduce(self, dimensions=None, function=None, **reduce_map):
"""
Reduces the Raster using functions provided via the
kwargs, where the keyword is the dimension to be reduced.
Optionally a label_prefix can be provided to prepend to
the result Element label.
"""
function, dims = self._reduce_map(dimensions, function, reduce_map)
if len(dims) == self.ndims:
if isinstance(function, np.ufunc):
return function.reduce(self.data, axis=None)
else:
return function(self.data)
else:
dimension = dims[0]
other_dimension = [d for d in self.kdims if d.name != dimension]
oidx = self.get_dimension_index(other_dimension[0])
x_vals = self.dimension_values(other_dimension[0].name, False)
reduced = function(self._zdata, axis=oidx)
if oidx and hasattr(self, 'bounds'):
reduced = reduced[::-1]
data = zip(x_vals, reduced)
params = dict(dict(self.get_param_values(onlychanged=True)),
kdims=other_dimension, vdims=self.vdims)
params.pop('bounds', None)
params.pop('extents', None)
return Table(data, **params) | [
"def",
"reduce",
"(",
"self",
",",
"dimensions",
"=",
"None",
",",
"function",
"=",
"None",
",",
"*",
"*",
"reduce_map",
")",
":",
"function",
",",
"dims",
"=",
"self",
".",
"_reduce_map",
"(",
"dimensions",
",",
"function",
",",
"reduce_map",
")",
"if... | 47.111111 | 14.888889 |
def destroy(self):
""" A reimplemented destructor.
This destructor will clear the reference to the toolkit widget
and set its parent to None.
"""
widget = self.widget
if widget is not None:
parent = widget.getparent()
if parent is not None:
parent.remove(widget)
del self.widget
d = self.declaration
try:
del CACHE[d.ref]
except KeyError:
pass
super(WebComponent, self).destroy() | [
"def",
"destroy",
"(",
"self",
")",
":",
"widget",
"=",
"self",
".",
"widget",
"if",
"widget",
"is",
"not",
"None",
":",
"parent",
"=",
"widget",
".",
"getparent",
"(",
")",
"if",
"parent",
"is",
"not",
"None",
":",
"parent",
".",
"remove",
"(",
"w... | 26.95 | 14.85 |
def _fill_array_from_list(the_list, the_array):
"""Fill an `array` from a `list`"""
for i, val in enumerate(the_list):
the_array[i] = val
return the_array | [
"def",
"_fill_array_from_list",
"(",
"the_list",
",",
"the_array",
")",
":",
"for",
"i",
",",
"val",
"in",
"enumerate",
"(",
"the_list",
")",
":",
"the_array",
"[",
"i",
"]",
"=",
"val",
"return",
"the_array"
] | 37.2 | 7 |
def connect(self, inbox):
"""
Connects the ``Piper`` instance to its upstream ``Pipers`` that should
be given as a sequence. This connects this ``Piper.inbox`` with the
upstream ``Piper.outbox`` respecting any "consume", "spawn" and
"produce" arguments.
Arguments:
- inbox(sequence) sequence of ``Piper`` instances.
"""
if self.started:
self.log.error('Piper %s is started and cannot connect to %s.' % \
(self, inbox))
raise PiperError('Piper %s is started and cannot connect to %s.' % \
(self, inbox))
elif self.connected:
self.log.error('Piper %s is connected and cannot connect to %s.' % \
(self, inbox))
raise PiperError('Piper %s is connected and cannot connect to %s.' % \
(self, inbox))
elif hasattr(self.imap, '_started') and self.imap._started.isSet():
self.log.error('Piper %s cannot connect (NuMap is started).' % \
self)
raise PiperError('Piper %s cannot connect (NuMap is started).' % \
self)
else:
# not started and not connected and NuMap not started
self.log.debug('Piper %s connects to %s' % (self, inbox))
# determine the stride with which result will be consumed from the
# input.
stride = self.imap.stride if hasattr(self.imap, 'stride') else 1
# Tee input iterators. The idea is to create a promise object for a
# tee. The actual teed iterator will be created on start. Each tee
# is protected with a seperate lock the reasons for this are:
# - tee objects are as a collection not thread safe
# - tee objects might be next'ed from different threads, a single
# lock will not guarantee that a thread might be allowed to finish
# it's stride. (How it works that a thread releases the next
# thread only if it finished a stride
teed = []
for piper in inbox:
if hasattr(piper, '_iter'): # isinstance Piper?
piper.tee_num += 1
tee_lock = Lock()
tee_lock.acquire()
piper.tee_locks.append(tee_lock)
piper = _TeePiper(piper, piper.tee_num - 1, stride)
teed.append(_InputIterator(piper, self))
# set how much to consume from input iterators.
self.inbox = _Zip(*teed) if self.consume == 1 else\
_Consume(_Zip(*teed), n=self.consume, stride=stride)
# set how much to
for i in xrange(self.spawn):
self.imap_tasks.append(\
self.imap(self.worker, self.inbox) \
if self.imap is imap else \
self.imap(self.worker, self.inbox, timeout=self.timeout, \
track=self.track))
# chain the results together.
outbox = _Chain(self.imap_tasks, stride=stride)
# Make output
#prd = ProduceFromSequence if self.produce_from_sequence else Produce
if self.produce == 1:
self.outbox = outbox
elif self.repeat:
self.outbox = _Repeat(outbox, n=self.produce, stride=stride)
else:
self.outbox = _Produce(outbox, n=self.produce, stride=stride)
self.connected = True
return self | [
"def",
"connect",
"(",
"self",
",",
"inbox",
")",
":",
"if",
"self",
".",
"started",
":",
"self",
".",
"log",
".",
"error",
"(",
"'Piper %s is started and cannot connect to %s.'",
"%",
"(",
"self",
",",
"inbox",
")",
")",
"raise",
"PiperError",
"(",
"'Pipe... | 47.368421 | 22.026316 |
def get_instance(cls, device):
"""
This is only a slot to store and get already initialized poco instance rather than initializing again. You can
simply pass the ``current device instance`` provided by ``airtest`` to get the AndroidUiautomationPoco instance.
If no such AndroidUiautomationPoco instance, a new instance will be created and stored.
Args:
device (:py:obj:`airtest.core.device.Device`): more details refer to ``airtest doc``
Returns:
poco instance
"""
if cls._nuis.get(device) is None:
cls._nuis[device] = AndroidUiautomationPoco(device)
return cls._nuis[device] | [
"def",
"get_instance",
"(",
"cls",
",",
"device",
")",
":",
"if",
"cls",
".",
"_nuis",
".",
"get",
"(",
"device",
")",
"is",
"None",
":",
"cls",
".",
"_nuis",
"[",
"device",
"]",
"=",
"AndroidUiautomationPoco",
"(",
"device",
")",
"return",
"cls",
".... | 42 | 31.125 |
def listen(self, log, noprint=True):
"""
Return a dictionary representation of the Log instance.
Note:
This function won't work with anonymous events.
Args:
log (processblock.Log): The Log instance that needs to be parsed.
noprint (bool): Flag to turn off priting of the decoded log instance.
"""
try:
result = self.decode_event(log.topics, log.data)
except ValueError:
return # api compatibility
if not noprint:
print(result)
return result | [
"def",
"listen",
"(",
"self",
",",
"log",
",",
"noprint",
"=",
"True",
")",
":",
"try",
":",
"result",
"=",
"self",
".",
"decode_event",
"(",
"log",
".",
"topics",
",",
"log",
".",
"data",
")",
"except",
"ValueError",
":",
"return",
"# api compatibilit... | 28.5 | 22.6 |
def array_shift(a, n, fill="average"):
"""
This will return an array with all the elements shifted forward in index by n.
a is the array
n is the amount by which to shift (can be positive or negative)
fill="average" fill the new empty elements with the average of the array
fill="wrap" fill the new empty elements with the lopped-off elements
fill=37.2 fill the new empty elements with the value 37.2
"""
new_a = _n.array(a)
if n==0: return new_a
fill_array = _n.array([])
fill_array.resize(_n.abs(n))
# fill up the fill array before we do the shift
if fill is "average": fill_array = 0.0*fill_array + _n.average(a)
elif fill is "wrap" and n >= 0:
for i in range(0,n): fill_array[i] = a[i-n]
elif fill is "wrap" and n < 0:
for i in range(0,-n): fill_array[i] = a[i]
else: fill_array = 0.0*fill_array + fill
# shift and fill
if n > 0:
for i in range(n, len(a)): new_a[i] = a[i-n]
for i in range(0, n): new_a[i] = fill_array[i]
else:
for i in range(0, len(a)+n): new_a[i] = a[i-n]
for i in range(0, -n): new_a[-i-1] = fill_array[-i-1]
return new_a | [
"def",
"array_shift",
"(",
"a",
",",
"n",
",",
"fill",
"=",
"\"average\"",
")",
":",
"new_a",
"=",
"_n",
".",
"array",
"(",
"a",
")",
"if",
"n",
"==",
"0",
":",
"return",
"new_a",
"fill_array",
"=",
"_n",
".",
"array",
"(",
"[",
"]",
")",
"fill... | 33 | 23.055556 |
def safe_call(func, *args, **kwargs):
"""
安全调用
"""
try:
return func(*args, **kwargs)
except Exception as e:
logger.error('exc occur. e: %s, func: %s', e, func, exc_info=True)
# 调用方可以通过 isinstance(e, BaseException) 来判断是否发生了异常
return e | [
"def",
"safe_call",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"'exc occur. e: ... | 27.6 | 16 |
def _len_tube(Flow, Diam, HeadLoss, conc_chem, temp, en_chem, KMinor):
"""Length of tube required to get desired head loss at maximum flow based on
the Hagen-Poiseuille equation."""
num1 = pc.gravity.magnitude * HeadLoss * np.pi * (Diam**4)
denom1 = 128 * viscosity_kinematic_chem(conc_chem, temp, en_chem) * Flow
num2 = Flow * KMinor
denom2 = 16 * np.pi * viscosity_kinematic_chem(conc_chem, temp, en_chem)
len = ((num1/denom1) - (num2/denom2))
return len.magnitude | [
"def",
"_len_tube",
"(",
"Flow",
",",
"Diam",
",",
"HeadLoss",
",",
"conc_chem",
",",
"temp",
",",
"en_chem",
",",
"KMinor",
")",
":",
"num1",
"=",
"pc",
".",
"gravity",
".",
"magnitude",
"*",
"HeadLoss",
"*",
"np",
".",
"pi",
"*",
"(",
"Diam",
"**... | 54.444444 | 17.444444 |
def sign(self, privkey):
"""Sign this with a private key"""
if self.v:
raise InvalidSignature("already signed")
if privkey in (0, '', '\x00' * 32):
raise InvalidSignature("Zero privkey cannot sign")
rawhash = sha3(rlp.encode(self, self.__class__.exclude(['v', 'r', 's'])))
if len(privkey) == 64:
privkey = encode_privkey(privkey, 'bin')
pk = PrivateKey(privkey, raw=True)
signature = pk.ecdsa_recoverable_serialize(pk.ecdsa_sign_recoverable(rawhash, raw=True))
signature = signature[0] + chr(signature[1])
self.v = ord(signature[64]) + 27
self.r = big_endian_to_int(signature[0:32])
self.s = big_endian_to_int(signature[32:64])
self._sender = None
return self | [
"def",
"sign",
"(",
"self",
",",
"privkey",
")",
":",
"if",
"self",
".",
"v",
":",
"raise",
"InvalidSignature",
"(",
"\"already signed\"",
")",
"if",
"privkey",
"in",
"(",
"0",
",",
"''",
",",
"'\\x00'",
"*",
"32",
")",
":",
"raise",
"InvalidSignature"... | 34.043478 | 21.956522 |
def fade_to_color(self, fade_milliseconds, color):
"""
Fade the light to a known colour in a
:param fade_milliseconds: Duration of the fade in milliseconds
:param color: Named color to fade to
:return: None
"""
red, green, blue = self.color_to_rgb(color)
return self.fade_to_rgb(fade_milliseconds, red, green, blue) | [
"def",
"fade_to_color",
"(",
"self",
",",
"fade_milliseconds",
",",
"color",
")",
":",
"red",
",",
"green",
",",
"blue",
"=",
"self",
".",
"color_to_rgb",
"(",
"color",
")",
"return",
"self",
".",
"fade_to_rgb",
"(",
"fade_milliseconds",
",",
"red",
",",
... | 37.1 | 14.7 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.