text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def _mark_html_fields_as_safe(self, page):
"""
Mark the html content as safe so we don't have to use the safe
template tag in all cms templates:
"""
page.title = mark_safe(page.title)
page.content = mark_safe(page.content)
return page | [
"def",
"_mark_html_fields_as_safe",
"(",
"self",
",",
"page",
")",
":",
"page",
".",
"title",
"=",
"mark_safe",
"(",
"page",
".",
"title",
")",
"page",
".",
"content",
"=",
"mark_safe",
"(",
"page",
".",
"content",
")",
"return",
"page"
] | 35.375 | 7.875 |
def _consolidate_slices(slices):
"""Consolidate adjacent slices in a list of slices.
"""
result = []
last_slice = slice(None)
for slice_ in slices:
if not isinstance(slice_, slice):
raise ValueError('list element is not a slice: %r' % slice_)
if (result and last_slice.stop == slice_.start and
_is_one_or_none(last_slice.step) and
_is_one_or_none(slice_.step)):
last_slice = slice(last_slice.start, slice_.stop, slice_.step)
result[-1] = last_slice
else:
result.append(slice_)
last_slice = slice_
return result | [
"def",
"_consolidate_slices",
"(",
"slices",
")",
":",
"result",
"=",
"[",
"]",
"last_slice",
"=",
"slice",
"(",
"None",
")",
"for",
"slice_",
"in",
"slices",
":",
"if",
"not",
"isinstance",
"(",
"slice_",
",",
"slice",
")",
":",
"raise",
"ValueError",
... | 37.294118 | 13.764706 |
def fixUTF8(cls, data): # Ensure proper encoding for UA's servers...
""" Convert all strings to UTF-8 """
for key in data:
if isinstance(data[key], str):
data[key] = data[key].encode('utf-8')
return data | [
"def",
"fixUTF8",
"(",
"cls",
",",
"data",
")",
":",
"# Ensure proper encoding for UA's servers...",
"for",
"key",
"in",
"data",
":",
"if",
"isinstance",
"(",
"data",
"[",
"key",
"]",
",",
"str",
")",
":",
"data",
"[",
"key",
"]",
"=",
"data",
"[",
"ke... | 41.833333 | 13.5 |
def _spin(coordinates, theta, around):
"""Rotate a set of coordinates in place around an arbitrary vector.
Parameters
----------
coordinates : np.ndarray, shape=(n,3), dtype=float
The coordinates being spun.
theta : float
The angle by which to spin the coordinates, in radians.
around : np.ndarray, shape=(3,), dtype=float
The axis about which to spin the coordinates.
"""
around = np.asarray(around).reshape(3)
if np.array_equal(around, np.zeros(3)):
raise ValueError('Cannot spin around a zero vector')
center_pos = np.mean(coordinates, axis=0)
coordinates -= center_pos
coordinates = _rotate(coordinates, theta, around)
coordinates += center_pos
return coordinates | [
"def",
"_spin",
"(",
"coordinates",
",",
"theta",
",",
"around",
")",
":",
"around",
"=",
"np",
".",
"asarray",
"(",
"around",
")",
".",
"reshape",
"(",
"3",
")",
"if",
"np",
".",
"array_equal",
"(",
"around",
",",
"np",
".",
"zeros",
"(",
"3",
"... | 35.095238 | 14.428571 |
def handle_xmlrpc(self, request_text):
"""Handle a single XML-RPC request"""
response = self._marshaled_dispatch(request_text)
print('Content-Type: text/xml')
print('Content-Length: %d' % len(response))
print()
sys.stdout.flush()
sys.stdout.buffer.write(response)
sys.stdout.buffer.flush() | [
"def",
"handle_xmlrpc",
"(",
"self",
",",
"request_text",
")",
":",
"response",
"=",
"self",
".",
"_marshaled_dispatch",
"(",
"request_text",
")",
"print",
"(",
"'Content-Type: text/xml'",
")",
"print",
"(",
"'Content-Length: %d'",
"%",
"len",
"(",
"response",
"... | 31.363636 | 14.363636 |
async def document(
self, file, title=None, *, description=None, type=None,
mime_type=None, attributes=None, force_document=False,
voice_note=False, video_note=False, use_cache=True, id=None,
text=None, parse_mode=(), link_preview=True,
geo=None, period=60, contact=None, game=False, buttons=None
):
"""
Creates a new inline result of document type.
`use_cache`, `mime_type`, `attributes`, `force_document`,
`voice_note` and `video_note` are described in `client.send_file
<telethon.client.uploads.UploadMethods.send_file>`.
Args:
file (`obj`):
Same as ``file`` for `client.send_file
<telethon.client.uploads.UploadMethods.send_file>`.
title (`str`, optional):
The title to be shown for this result.
description (`str`, optional):
Further explanation of what this result means.
type (`str`, optional):
The type of the document. May be one of: photo, gif,
mpeg4_gif, video, audio, voice, document, sticker.
See "Type of the result" in https://core.telegram.org/bots/api.
"""
if type is None:
if voice_note:
type = 'voice'
else:
type = 'document'
try:
fh = utils.get_input_document(file)
except TypeError:
use_cache = types.InputDocument if use_cache else None
fh = await self._client.upload_file(file, use_cache=use_cache)
if not isinstance(fh, types.InputDocument):
attributes, mime_type = utils.get_attributes(
file,
mime_type=mime_type,
attributes=attributes,
force_document=force_document,
voice_note=voice_note,
video_note=video_note
)
r = await self._client(functions.messages.UploadMediaRequest(
types.InputPeerSelf(), media=types.InputMediaUploadedDocument(
fh,
mime_type=mime_type,
attributes=attributes,
nosound_video=None,
thumb=None
)))
fh = utils.get_input_document(r.document)
result = types.InputBotInlineResultDocument(
id=id or '',
type=type,
document=fh,
send_message=await self._message(
# Empty string for text if there's media but text is None.
# We may want to display a document but send text; however
# default to sending the media (without text, i.e. stickers).
text=text or '',
parse_mode=parse_mode,
link_preview=link_preview,
geo=geo,
period=period,
contact=contact,
game=game,
buttons=buttons
),
title=title,
description=description
)
if id is None:
result.id = hashlib.sha256(bytes(result)).hexdigest()
return result | [
"async",
"def",
"document",
"(",
"self",
",",
"file",
",",
"title",
"=",
"None",
",",
"*",
",",
"description",
"=",
"None",
",",
"type",
"=",
"None",
",",
"mime_type",
"=",
"None",
",",
"attributes",
"=",
"None",
",",
"force_document",
"=",
"False",
... | 36.627907 | 19.651163 |
def _init_browser(self):
"""
Ovveride this method with the appropriate way to prepare a logged in
browser.
"""
self.browser = mechanize.Browser()
self.browser.set_handle_robots(False)
self.browser.open(self.server_url + "/youraccount/login")
self.browser.select_form(nr=0)
try:
self.browser['nickname'] = self.user
self.browser['password'] = self.password
except:
self.browser['p_un'] = self.user
self.browser['p_pw'] = self.password
# Set login_method to be writable
self.browser.form.find_control('login_method').readonly = False
self.browser['login_method'] = self.login_method
self.browser.submit() | [
"def",
"_init_browser",
"(",
"self",
")",
":",
"self",
".",
"browser",
"=",
"mechanize",
".",
"Browser",
"(",
")",
"self",
".",
"browser",
".",
"set_handle_robots",
"(",
"False",
")",
"self",
".",
"browser",
".",
"open",
"(",
"self",
".",
"server_url",
... | 39.157895 | 13.368421 |
def enforce_timezone(self, value):
"""
When `self.default_timezone` is `None`, always return naive datetimes.
When `self.default_timezone` is not `None`, always return aware datetimes.
"""
field_timezone = getattr(self, 'timezone', self.default_timezone())
if (field_timezone is not None) and not timezone.is_aware(value):
return timezone.make_aware(value, field_timezone)
elif (field_timezone is None) and timezone.is_aware(value):
return timezone.make_naive(value, timezone.UTC())
return value | [
"def",
"enforce_timezone",
"(",
"self",
",",
"value",
")",
":",
"field_timezone",
"=",
"getattr",
"(",
"self",
",",
"'timezone'",
",",
"self",
".",
"default_timezone",
"(",
")",
")",
"if",
"(",
"field_timezone",
"is",
"not",
"None",
")",
"and",
"not",
"t... | 47.75 | 23.583333 |
def union(self, *others: 'Substitution') -> 'Substitution':
"""Try to merge the substitutions.
If a variable occurs in multiple substitutions, try to merge the replacements.
See :meth:`union_with_variable` to see how replacements are merged.
Does not modify any of the original substitutions.
Example:
>>> subst1 = Substitution({'x': Multiset(['a', 'b']), 'z': a})
>>> subst2 = Substitution({'x': ('a', 'b'), 'y': ('c', )})
>>> print(subst1.union(subst2))
{x ↦ (a, b), y ↦ (c), z ↦ a}
Args:
others:
The other substitutions to merge with this one.
Returns:
The new substitution with the other substitutions merged.
Raises:
ValueError:
if a variable occurs in multiple substitutions but cannot be merged because the
substitutions conflict.
"""
new_subst = Substitution(self)
for other in others:
for variable_name, replacement in other.items():
new_subst.try_add_variable(variable_name, replacement)
return new_subst | [
"def",
"union",
"(",
"self",
",",
"*",
"others",
":",
"'Substitution'",
")",
"->",
"'Substitution'",
":",
"new_subst",
"=",
"Substitution",
"(",
"self",
")",
"for",
"other",
"in",
"others",
":",
"for",
"variable_name",
",",
"replacement",
"in",
"other",
".... | 35.3125 | 24.53125 |
def create(buffer_capacity: int, buffer_initial_size: int, frame_stack_compensation: bool = False,
frame_history: int = 1):
""" Vel factory function """
return CircularReplayBufferFactory(
buffer_capacity=buffer_capacity,
buffer_initial_size=buffer_initial_size,
frame_stack_compensation=frame_stack_compensation,
frame_history=frame_history
) | [
"def",
"create",
"(",
"buffer_capacity",
":",
"int",
",",
"buffer_initial_size",
":",
"int",
",",
"frame_stack_compensation",
":",
"bool",
"=",
"False",
",",
"frame_history",
":",
"int",
"=",
"1",
")",
":",
"return",
"CircularReplayBufferFactory",
"(",
"buffer_c... | 43.333333 | 14.444444 |
def cast(self, val: str):
"""converts string to type requested by `cast_as`"""
try:
return getattr(self, 'cast_as_{}'.format(
self.cast_as.__name__.lower()))(val)
except AttributeError:
return self.cast_as(val) | [
"def",
"cast",
"(",
"self",
",",
"val",
":",
"str",
")",
":",
"try",
":",
"return",
"getattr",
"(",
"self",
",",
"'cast_as_{}'",
".",
"format",
"(",
"self",
".",
"cast_as",
".",
"__name__",
".",
"lower",
"(",
")",
")",
")",
"(",
"val",
")",
"exce... | 38.285714 | 11.714286 |
def landweber(op, x, rhs, niter, omega=None, projection=None, callback=None):
r"""Optimized implementation of Landweber's method.
Solves the inverse problem::
A(x) = rhs
Parameters
----------
op : `Operator`
Operator in the inverse problem. ``op.derivative(x).adjoint`` must be
well-defined for ``x`` in the operator domain.
x : ``op.domain`` element
Element to which the result is written. Its initial value is
used as starting point of the iteration, and its values are
updated in each iteration step.
rhs : ``op.range`` element
Right-hand side of the equation defining the inverse problem.
niter : int
Number of iterations.
omega : positive float, optional
Relaxation parameter in the iteration.
Default: ``1 / op.norm(estimate=True) ** 2``
projection : callable, optional
Function that can be used to modify the iterates in each iteration,
for example enforcing positivity. The function should take one
argument and modify it in-place.
callback : callable, optional
Object executing code per iteration, e.g. plotting each iterate.
Notes
-----
This method calculates an approximate least-squares solution of
the inverse problem of the first kind
.. math::
\mathcal{A} (x) = y,
for a given :math:`y\in \mathcal{Y}`, i.e. an approximate
solution :math:`x^*` to
.. math::
\min_{x\in \mathcal{X}} \| \mathcal{A}(x) - y \|_{\mathcal{Y}}^2
for a (Frechet-) differentiable operator
:math:`\mathcal{A}: \mathcal{X} \to \mathcal{Y}` between Hilbert
spaces :math:`\mathcal{X}` and :math:`\mathcal{Y}`. The method
starts from an initial guess :math:`x_0` and uses the
iteration
.. math::
x_{k+1} = x_k -
\omega \ \partial \mathcal{A}(x)^* (\mathcal{A}(x_k) - y),
where :math:`\partial \mathcal{A}(x)` is the Frechet derivative
of :math:`\mathcal{A}` at :math:`x` and :math:`\omega` is a
relaxation parameter. For linear problems, a choice
:math:`0 < \omega < 2/\lVert \mathcal{A}^2\rVert` guarantees
convergence, where :math:`\lVert\mathcal{A}\rVert` stands for the
operator norm of :math:`\mathcal{A}`.
Users may also optionally provide a projection to project each
iterate onto some subset. For example enforcing positivity.
This implementation uses a minimum amount of memory copies by
applying re-usable temporaries and in-place evaluation.
The method is also described in a
`Wikipedia article
<https://en.wikipedia.org/wiki/Landweber_iteration>`_.
"""
# TODO: add a book reference
if x not in op.domain:
raise TypeError('`x` {!r} is not in the domain of `op` {!r}'
''.format(x, op.domain))
if omega is None:
omega = 1 / op.norm(estimate=True) ** 2
# Reusable temporaries
tmp_ran = op.range.element()
tmp_dom = op.domain.element()
for _ in range(niter):
op(x, out=tmp_ran)
tmp_ran -= rhs
op.derivative(x).adjoint(tmp_ran, out=tmp_dom)
x.lincomb(1, x, -omega, tmp_dom)
if projection is not None:
projection(x)
if callback is not None:
callback(x) | [
"def",
"landweber",
"(",
"op",
",",
"x",
",",
"rhs",
",",
"niter",
",",
"omega",
"=",
"None",
",",
"projection",
"=",
"None",
",",
"callback",
"=",
"None",
")",
":",
"# TODO: add a book reference",
"if",
"x",
"not",
"in",
"op",
".",
"domain",
":",
"r... | 33.852632 | 22.084211 |
def create_template(
self,
name,
subject,
html,
text='',
timeout=None
):
""" API call to create a template """
payload = {
'name': name,
'subject': subject,
'html': html,
'text': text
}
return self._api_request(
self.TEMPLATES_ENDPOINT,
self.HTTP_POST,
payload=payload,
timeout=timeout
) | [
"def",
"create_template",
"(",
"self",
",",
"name",
",",
"subject",
",",
"html",
",",
"text",
"=",
"''",
",",
"timeout",
"=",
"None",
")",
":",
"payload",
"=",
"{",
"'name'",
":",
"name",
",",
"'subject'",
":",
"subject",
",",
"'html'",
":",
"html",
... | 20.681818 | 19.545455 |
def check(self, src_tgt, actual_deps):
"""Check for missing deps.
See docstring for _compute_missing_deps for details.
"""
if self._check_missing_direct_deps or self._check_unnecessary_deps:
missing_file_deps, missing_direct_tgt_deps = \
self._compute_missing_deps(src_tgt, actual_deps)
buildroot = get_buildroot()
def shorten(path): # Make the output easier to read.
if path.startswith(buildroot):
return os.path.relpath(path, buildroot)
return path
def filter_whitelisted(missing_deps):
# Removing any targets that exist in the whitelist from the list of dependency issues.
return [(tgt_pair, evidence) for (tgt_pair, evidence) in missing_deps
if tgt_pair[0].address not in self._target_whitelist]
missing_direct_tgt_deps = filter_whitelisted(missing_direct_tgt_deps)
if self._check_missing_direct_deps and missing_direct_tgt_deps:
log_fn = (self.context.log.error if self._check_missing_direct_deps == 'fatal'
else self.context.log.warn)
for (tgt_pair, evidence) in missing_direct_tgt_deps:
evidence_str = '\n'.join([' {} uses {}'.format(shorten(e[0]), shorten(e[1]))
for e in evidence])
log_fn('Missing direct BUILD dependency {} -> {} because:\n{}'
.format(tgt_pair[0].address.spec, tgt_pair[1].address.spec, evidence_str))
if self._check_missing_direct_deps == 'fatal':
raise TaskError('Missing direct deps.')
if self._check_unnecessary_deps:
log_fn = (self.context.log.error if self._check_unnecessary_deps == 'fatal'
else self.context.log.warn)
had_unused = self._do_check_unnecessary_deps(src_tgt, actual_deps, log_fn)
if had_unused and self._check_unnecessary_deps == 'fatal':
raise TaskError('Unnecessary deps.') | [
"def",
"check",
"(",
"self",
",",
"src_tgt",
",",
"actual_deps",
")",
":",
"if",
"self",
".",
"_check_missing_direct_deps",
"or",
"self",
".",
"_check_unnecessary_deps",
":",
"missing_file_deps",
",",
"missing_direct_tgt_deps",
"=",
"self",
".",
"_compute_missing_de... | 47.65 | 24.425 |
def add_request_handlers_object(self, rh_obj):
"""Add fake request handlers from an object with request_* method(s)
See :meth:`FakeInspectingClientManager.add_request_handlers_dict` for more detail.
"""
rh_dict = {}
for name in dir(rh_obj):
if not callable(getattr(rh_obj, name)):
continue
if name.startswith("request_"):
request_name = convert_method_name("request_", name)
req_meth = getattr(rh_obj, name)
rh_dict[request_name] = req_meth
self.add_request_handlers_dict(rh_dict) | [
"def",
"add_request_handlers_object",
"(",
"self",
",",
"rh_obj",
")",
":",
"rh_dict",
"=",
"{",
"}",
"for",
"name",
"in",
"dir",
"(",
"rh_obj",
")",
":",
"if",
"not",
"callable",
"(",
"getattr",
"(",
"rh_obj",
",",
"name",
")",
")",
":",
"continue",
... | 37.75 | 17.8125 |
def create(cls, pid_type=None, pid_value=None, object_type=None,
object_uuid=None, status=None, **kwargs):
"""Create a new instance for the given type and pid.
:param pid_type: Persistent identifier type. (Default: None).
:param pid_value: Persistent identifier value. (Default: None).
:param status: Current PID status.
(Default: :attr:`invenio_pidstore.models.PIDStatus.NEW`)
:param object_type: The object type is a string that identify its type.
(Default: None).
:param object_uuid: The object UUID. (Default: None).
:returns: A :class:`invenio_pidstore.providers.base.BaseProvider`
instance.
"""
assert pid_value
assert pid_type or cls.pid_type
pid = PersistentIdentifier.create(
pid_type or cls.pid_type,
pid_value,
pid_provider=cls.pid_provider,
object_type=object_type,
object_uuid=object_uuid,
status=status or cls.default_status,
)
return cls(pid, **kwargs) | [
"def",
"create",
"(",
"cls",
",",
"pid_type",
"=",
"None",
",",
"pid_value",
"=",
"None",
",",
"object_type",
"=",
"None",
",",
"object_uuid",
"=",
"None",
",",
"status",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"pid_value",
"assert",
... | 41.192308 | 16.538462 |
def connect(url):
"""Connect to UNIX or TCP socket.
url can be either tcp://<host>:port or ipc://<path>
"""
url = urlparse(url)
if url.scheme == 'tcp':
sock = socket()
netloc = tuple(url.netloc.rsplit(':', 1))
hostname = socket.gethostname()
elif url.scheme == 'ipc':
sock = socket(AF_UNIX)
netloc = url.path
hostname = 'localhost'
else:
raise ValueError('unknown socket type: %s' % url.scheme)
sock.connect(netloc)
return sock, hostname | [
"def",
"connect",
"(",
"url",
")",
":",
"url",
"=",
"urlparse",
"(",
"url",
")",
"if",
"url",
".",
"scheme",
"==",
"'tcp'",
":",
"sock",
"=",
"socket",
"(",
")",
"netloc",
"=",
"tuple",
"(",
"url",
".",
"netloc",
".",
"rsplit",
"(",
"':'",
",",
... | 27.210526 | 16.368421 |
def download(url, file=None):
"""
Pass file as a filename, open file object, or None to return the request bytes
Args:
url (str): URL of file to download
file (Union[str, io, None]): One of the following:
- Filename of output file
- File opened in binary write mode
- None: Return raw bytes instead
Returns:
Union[bytes, None]: Bytes of file if file is None
"""
import urllib.request
import shutil
if isinstance(file, str):
file = open(file, 'wb')
try:
with urllib.request.urlopen(url) as response:
if file:
shutil.copyfileobj(response, file)
else:
return response.read()
finally:
if file:
file.close() | [
"def",
"download",
"(",
"url",
",",
"file",
"=",
"None",
")",
":",
"import",
"urllib",
".",
"request",
"import",
"shutil",
"if",
"isinstance",
"(",
"file",
",",
"str",
")",
":",
"file",
"=",
"open",
"(",
"file",
",",
"'wb'",
")",
"try",
":",
"with"... | 28.62963 | 17.37037 |
def get_resources(self, ids, cache=True):
"""Support server side filtering on arns or names
"""
if ids[0].startswith('arn:'):
params = {'LoadBalancerArns': ids}
else:
params = {'Names': ids}
return self.query.filter(self.manager, **params) | [
"def",
"get_resources",
"(",
"self",
",",
"ids",
",",
"cache",
"=",
"True",
")",
":",
"if",
"ids",
"[",
"0",
"]",
".",
"startswith",
"(",
"'arn:'",
")",
":",
"params",
"=",
"{",
"'LoadBalancerArns'",
":",
"ids",
"}",
"else",
":",
"params",
"=",
"{"... | 37 | 7.25 |
def is_panel_visible_for_user(panel, user):
"""
Checks if the user is allowed to see the panel
:param panel: panel ID as string
:param user: a MemberData object
:return: Boolean
"""
roles = user.getRoles()
visibility = get_dashboard_panels_visibility_by_section(panel)
for pair in visibility:
if pair[0] in roles and pair[1] == 'yes':
return True
return False | [
"def",
"is_panel_visible_for_user",
"(",
"panel",
",",
"user",
")",
":",
"roles",
"=",
"user",
".",
"getRoles",
"(",
")",
"visibility",
"=",
"get_dashboard_panels_visibility_by_section",
"(",
"panel",
")",
"for",
"pair",
"in",
"visibility",
":",
"if",
"pair",
... | 31.307692 | 11 |
def content_type(self, request=None, response=None):
"""Returns the content type that should be used by default for this endpoint"""
if callable(self.outputs.content_type):
return self.outputs.content_type(request=request, response=response)
else:
return self.outputs.content_type | [
"def",
"content_type",
"(",
"self",
",",
"request",
"=",
"None",
",",
"response",
"=",
"None",
")",
":",
"if",
"callable",
"(",
"self",
".",
"outputs",
".",
"content_type",
")",
":",
"return",
"self",
".",
"outputs",
".",
"content_type",
"(",
"request",
... | 53.833333 | 15 |
def _brownian_eigs(n_grid, lag_time, grad_potential, xmin, xmax, reflect_bc):
"""Analytic eigenvalues/eigenvectors for 1D Brownian dynamics
"""
transmat = _brownian_transmat(n_grid, lag_time, grad_potential, xmin, xmax, reflect_bc)
u, lv, rv = _solve_msm_eigensystem(transmat, k=len(transmat) - 1)
return u, rv | [
"def",
"_brownian_eigs",
"(",
"n_grid",
",",
"lag_time",
",",
"grad_potential",
",",
"xmin",
",",
"xmax",
",",
"reflect_bc",
")",
":",
"transmat",
"=",
"_brownian_transmat",
"(",
"n_grid",
",",
"lag_time",
",",
"grad_potential",
",",
"xmin",
",",
"xmax",
","... | 54.166667 | 23.5 |
def _wait_for_exec_ready(self):
"""
Wait for response.
:return: CliResponse object coming in
:raises: TestStepTimeout, TestStepError
"""
while not self.response_received.wait(1) and self.query_timeout != 0:
if self.query_timeout != 0 and self.query_timeout < self.get_time():
if self.prev:
cmd = self.prev.cmd
else:
cmd = "???"
self.logger.error("CMD timeout: "+ cmd)
self.query_timeout = 0
raise TestStepTimeout(self.name + " CMD timeout: " + cmd)
self.logger.debug("Waiting for response... "
"timeout=%d", self.query_timeout - self.get_time())
self._dut_is_alive()
if self.response_coming_in == -1:
if self.query_async_response is not None:
# fullfill the async response with a dummy response and clean the state
self.query_async_response.set_response(CliResponse())
self.query_async_response = None
# raise and log the error
self.logger.error("No response received, DUT died")
raise TestStepError("No response received, DUT "+self.name+" died")
# if an async response is pending, fullfill it with the result
if self.query_async_response is not None:
self.query_async_response.set_response(self.response_coming_in)
self.query_async_response = None
self.query_timeout = 0
return self.response_coming_in | [
"def",
"_wait_for_exec_ready",
"(",
"self",
")",
":",
"while",
"not",
"self",
".",
"response_received",
".",
"wait",
"(",
"1",
")",
"and",
"self",
".",
"query_timeout",
"!=",
"0",
":",
"if",
"self",
".",
"query_timeout",
"!=",
"0",
"and",
"self",
".",
... | 43.5 | 18.888889 |
def replace_first_key_in_makefile(buf, key, replacement, outfile=None):
'''
Replaces first line in 'buf' matching 'key' with 'replacement'.
Optionally, writes out this new buffer into 'outfile'.
Returns: Buffer after replacement has been done
'''
regexp = re.compile(r'''
\n\s* # there might be some leading spaces
( # start group to return
(?:{0}\s*) # placeholder for tags to detect '\S+' == all
\s*:*=\s* # optional spaces, optional colon, = , optional spaces
.* # the value
) # end group to return
'''.format(key), re.VERBOSE)
matches = regexp.findall(buf)
if matches is None:
msg = "Could not find key = {0} in the provided buffer. "\
"Pattern used = {1}".format(key, regexp.pattern)
raise ValueError(msg)
# Only replace the first occurence
newbuf = regexp.sub(replacement, buf, count=1)
if outfile is not None:
write_text_file(outfile, newbuf)
return newbuf | [
"def",
"replace_first_key_in_makefile",
"(",
"buf",
",",
"key",
",",
"replacement",
",",
"outfile",
"=",
"None",
")",
":",
"regexp",
"=",
"re",
".",
"compile",
"(",
"r'''\n \\n\\s* # there might be some leading spaces\n ( # start group to return\... | 36.285714 | 19.714286 |
def _init_repo(self):
""" create and initialize a new Git Repo """
log.debug("initializing new Git Repo: {0}".format(self._engine_path))
if os.path.exists(self._engine_path):
log.error("Path already exists! Aborting!")
raise RuntimeError
else:
# create the repo if it doesn't already exist
_logg_repo = git.Repo.init(path=self._engine_path, mkdir=True)
record = "idid Logg repo initialized on {0}".format(today())
c = _logg_repo.index.commit(record)
assert c.type == 'commit'
log.info('Created git repo [{0}]'.format(self._engine_path))
return _logg_repo | [
"def",
"_init_repo",
"(",
"self",
")",
":",
"log",
".",
"debug",
"(",
"\"initializing new Git Repo: {0}\"",
".",
"format",
"(",
"self",
".",
"_engine_path",
")",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"_engine_path",
")",
":",
"lo... | 48.357143 | 18.071429 |
def __wait_for_one_server_connection(self):
"""Wait until at least one server is connected. Since quitting relies
on a bunch of loops terminating, attempting to quit [cleanly]
immediately will still have to wait for the connections to finish
starting.
"""
_logger.info("Waiting for first connection.")
while 1:
is_connected_to_one = False
for (n, c, g) in self.__connections:
if c.is_connected is True:
is_connected_to_one = True
break
elif g.exception == nsq.exceptions.NsqConnectGiveUpError:
raise IOError("One of the servers could not be connected "
"during startup: [%s]" % (c))
elif g.exception is not None:
raise IOError("One of the connection gthreads had an "
"uncaught exception during startup: [%s] "
"[%s]" %
(g.exception.__class__.__name__,
str(g.exception)))
elif g.dead is True:
raise SystemError("One of the connection gthreads died "
"during startup: [%s]" % (c,))
if is_connected_to_one is True:
break
gevent.sleep(nsq.config.client.CONNECT_AUDIT_WAIT_INTERVAL_S) | [
"def",
"__wait_for_one_server_connection",
"(",
"self",
")",
":",
"_logger",
".",
"info",
"(",
"\"Waiting for first connection.\"",
")",
"while",
"1",
":",
"is_connected_to_one",
"=",
"False",
"for",
"(",
"n",
",",
"c",
",",
"g",
")",
"in",
"self",
".",
"__c... | 45.34375 | 20.4375 |
def add_triple(self, sub, pred=None, obj=None, **kwargs):
""" Adds a triple to the dataset
args:
sub: The subject of the triple or dictionary contaning a
triple
pred: Optional if supplied in sub, predicate of the triple
obj: Optional if supplied in sub, object of the triple
kwargs:
map: Optional, a ditionary mapping for a supplied dictionary
strip_orphans: Optional, remove triples that have an orphan
blanknode as the object
obj_method: if "list" than the object will be returned in the
form of a list
"""
self.__set_map__(**kwargs)
strip_orphans = kwargs.get("strip_orphans", False)
obj_method = kwargs.get("obj_method")
if isinstance(sub, DictClass) or isinstance(sub, dict):
pred = sub[self.pmap]
obj = sub[self.omap]
sub = sub[self.smap]
pred = pyrdf(pred)
obj = pyrdf(obj)
sub = pyrdf(sub)
# reference existing attr for bnodes and uris
if obj.type in self.relate_obj_types :
if strip_orphans and not self.get(obj):
return
obj = self.get(obj,obj)
try:
self[sub].add_property(pred, obj)
except KeyError:
self[sub] = RdfClassBase(sub, self, **kwargs)
self[sub].add_property(pred, obj) | [
"def",
"add_triple",
"(",
"self",
",",
"sub",
",",
"pred",
"=",
"None",
",",
"obj",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"__set_map__",
"(",
"*",
"*",
"kwargs",
")",
"strip_orphans",
"=",
"kwargs",
".",
"get",
"(",
"\"strip_... | 39.837838 | 17.864865 |
def msvc_output_flag(target, source, env, for_signature):
"""
Returns the correct /Fo flag for batching.
If batching is disabled or there's only one source file, then we
return an /Fo string that specifies the target explicitly. Otherwise,
we return an /Fo string that just specifies the first target's
directory (where the Visual C/C++ compiler will put the .obj files).
"""
# Fixing MSVC_BATCH mode. Previous if did not work when MSVC_BATCH
# was set to False. This new version should work better. Removed
# len(source)==1 as batch mode can compile only one file
# (and it also fixed problem with compiling only one changed file
# with batch mode enabled)
if not 'MSVC_BATCH' in env or env.subst('$MSVC_BATCH') in ('0', 'False', '', None):
return '/Fo$TARGET'
else:
# The Visual C/C++ compiler requires a \ at the end of the /Fo
# option to indicate an output directory. We use os.sep here so
# that the test(s) for this can be run on non-Windows systems
# without having a hard-coded backslash mess up command-line
# argument parsing.
return '/Fo${TARGET.dir}' + os.sep | [
"def",
"msvc_output_flag",
"(",
"target",
",",
"source",
",",
"env",
",",
"for_signature",
")",
":",
"# Fixing MSVC_BATCH mode. Previous if did not work when MSVC_BATCH",
"# was set to False. This new version should work better. Removed",
"# len(source)==1 as batch mode can compile only ... | 48.541667 | 23.541667 |
def _output_from_file(self, entry='git_describe'):
"""
Read the version from a .version file that may exist alongside __init__.py.
This file can be generated by piping the following output to file:
git describe --long --match v*.*
"""
try:
vfile = os.path.join(os.path.dirname(self.fpath), '.version')
with open(vfile, 'r') as f:
return json.loads(f.read()).get(entry, None)
except: # File may be missing if using pip + git archive
return None | [
"def",
"_output_from_file",
"(",
"self",
",",
"entry",
"=",
"'git_describe'",
")",
":",
"try",
":",
"vfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"self",
".",
"fpath",
")",
",",
"'.version'",
")",
"with",
... | 38.571429 | 20.714286 |
def octocat(self, say=None):
"""Returns an easter egg of the API.
:params str say: (optional), pass in what you'd like Octocat to say
:returns: ascii art of Octocat
"""
url = self._build_url('octocat')
req = self._get(url, params={'s': say})
return req.content if req.ok else '' | [
"def",
"octocat",
"(",
"self",
",",
"say",
"=",
"None",
")",
":",
"url",
"=",
"self",
".",
"_build_url",
"(",
"'octocat'",
")",
"req",
"=",
"self",
".",
"_get",
"(",
"url",
",",
"params",
"=",
"{",
"'s'",
":",
"say",
"}",
")",
"return",
"req",
... | 36.333333 | 11.111111 |
def shutdown(self, msg):
"""Shutdown the scheduler."""
try:
self.cleanup()
self.history.append("Completed on: %s" % time.asctime())
self.history.append("Elapsed time: %s" % self.get_delta_etime())
if self.debug:
print(">>>>> shutdown: Number of open file descriptors: %s" % get_open_fds())
retcode = self.send_email(msg)
if self.debug:
print("send_mail retcode", retcode)
# Write file with the list of exceptions:
if self.exceptions:
dump_file = os.path.join(self.flow.workdir, "_exceptions")
with open(dump_file, "wt") as fh:
fh.writelines(self.exceptions)
fh.write("Shutdown message:\n%s" % msg)
lines = []
app = lines.append
app("Submitted on: %s" % time.ctime(self.start_time))
app("Completed on: %s" % time.asctime())
app("Elapsed time: %s" % str(self.get_delta_etime()))
if self.flow.all_ok:
app("Flow completed successfully")
else:
app("Flow %s didn't complete successfully" % repr(self.flow.workdir))
app("use `abirun.py FLOWDIR debug` to analyze the problem.")
app("Shutdown message:\n%s" % msg)
print("")
print("\n".join(lines))
print("")
self._do_customer_service()
if self.flow.all_ok:
print("Calling flow.finalize()...")
self.flow.finalize()
#print("finalized:", self.flow.finalized)
if self.rmflow:
app("Flow directory will be removed...")
try:
self.flow.rmtree()
except Exception:
logger.warning("Ignoring exception while trying to remove flow dir.")
finally:
# Shutdown the scheduler thus allowing the process to exit.
logger.debug('This should be the shutdown of the scheduler')
# Unschedule all the jobs before calling shutdown
#self.sched.print_jobs()
if not has_sched_v3:
for job in self.sched.get_jobs():
self.sched.unschedule_job(job)
#self.sched.print_jobs()
self.sched.shutdown() | [
"def",
"shutdown",
"(",
"self",
",",
"msg",
")",
":",
"try",
":",
"self",
".",
"cleanup",
"(",
")",
"self",
".",
"history",
".",
"append",
"(",
"\"Completed on: %s\"",
"%",
"time",
".",
"asctime",
"(",
")",
")",
"self",
".",
"history",
".",
"append",... | 37.125 | 21.078125 |
def get_annotation(db_path, db_list):
""" Checks if database is set as annotated. """
annotated = False
for db in db_list:
if db["path"] == db_path:
annotated = db["annotated"]
break
return annotated | [
"def",
"get_annotation",
"(",
"db_path",
",",
"db_list",
")",
":",
"annotated",
"=",
"False",
"for",
"db",
"in",
"db_list",
":",
"if",
"db",
"[",
"\"path\"",
"]",
"==",
"db_path",
":",
"annotated",
"=",
"db",
"[",
"\"annotated\"",
"]",
"break",
"return",... | 24 | 17.1 |
def has(self, name):
"""
Returns True if there is atleast one annotation by a given name, otherwise False.
"""
for a in self.all_annotations:
if a.name == name:
return True
return False | [
"def",
"has",
"(",
"self",
",",
"name",
")",
":",
"for",
"a",
"in",
"self",
".",
"all_annotations",
":",
"if",
"a",
".",
"name",
"==",
"name",
":",
"return",
"True",
"return",
"False"
] | 30.75 | 14.25 |
def _get_distance_term(self, C, mag, rrup):
"""
Returns the distance scaling term
"""
return (C['C4'] + C['C5'] * (mag - 6.3)) *\
np.log(np.sqrt(rrup ** 2. + np.exp(C['H']) ** 2.)) | [
"def",
"_get_distance_term",
"(",
"self",
",",
"C",
",",
"mag",
",",
"rrup",
")",
":",
"return",
"(",
"C",
"[",
"'C4'",
"]",
"+",
"C",
"[",
"'C5'",
"]",
"*",
"(",
"mag",
"-",
"6.3",
")",
")",
"*",
"np",
".",
"log",
"(",
"np",
".",
"sqrt",
"... | 36.5 | 6.166667 |
def random_rescale_to_mahalanobis(self, x):
"""change `x` like for injection, all on genotypic level"""
x -= self.mean
if any(x):
x *= sum(self.randn(len(x))**2)**0.5 / self.mahalanobis_norm(x)
x += self.mean
return x | [
"def",
"random_rescale_to_mahalanobis",
"(",
"self",
",",
"x",
")",
":",
"x",
"-=",
"self",
".",
"mean",
"if",
"any",
"(",
"x",
")",
":",
"x",
"*=",
"sum",
"(",
"self",
".",
"randn",
"(",
"len",
"(",
"x",
")",
")",
"**",
"2",
")",
"**",
"0.5",
... | 37.571429 | 17.142857 |
def scatter(self, x, y, xerr=None, yerr=None, cov=None, corr=None, s_expr=None, c_expr=None, labels=None, selection=None, length_limit=50000,
length_check=True, label=None, xlabel=None, ylabel=None, errorbar_kwargs={}, ellipse_kwargs={}, **kwargs):
"""Viz (small amounts) of data in 2d using a scatter plot
Convenience wrapper around pylab.scatter when for working with small DataFrames or selections
:param x: Expression for x axis
:param y: Idem for y
:param s_expr: When given, use if for the s (size) argument of pylab.scatter
:param c_expr: When given, use if for the c (color) argument of pylab.scatter
:param labels: Annotate the points with these text values
:param selection: Single selection expression, or None
:param length_limit: maximum number of rows it will plot
:param length_check: should we do the maximum row check or not?
:param label: label for the legend
:param xlabel: label for x axis, if None .label(x) is used
:param ylabel: label for y axis, if None .label(y) is used
:param errorbar_kwargs: extra dict with arguments passed to plt.errorbar
:param kwargs: extra arguments passed to pylab.scatter
:return:
"""
import pylab as plt
x = _ensure_strings_from_expressions(x)
y = _ensure_strings_from_expressions(y)
label = str(label or selection)
selection = _ensure_strings_from_expressions(selection)
if length_check:
count = self.count(selection=selection)
if count > length_limit:
raise ValueError("the number of rows (%d) is above the limit (%d), pass length_check=False, or increase length_limit" % (count, length_limit))
x_values = self.evaluate(x, selection=selection)
y_values = self.evaluate(y, selection=selection)
if s_expr:
kwargs["s"] = self.evaluate(s_expr, selection=selection)
if c_expr:
kwargs["c"] = self.evaluate(c_expr, selection=selection)
plt.xlabel(xlabel or self.label(x))
plt.ylabel(ylabel or self.label(y))
s = plt.scatter(x_values, y_values, label=label, **kwargs)
if labels:
label_values = self.evaluate(labels, selection=selection)
for i, label_value in enumerate(label_values):
plt.annotate(label_value, (x_values[i], y_values[i]))
xerr_values = None
yerr_values = None
if cov is not None or corr is not None:
from matplotlib.patches import Ellipse
sx = self.evaluate(xerr, selection=selection)
sy = self.evaluate(yerr, selection=selection)
if corr is not None:
sxy = self.evaluate(corr, selection=selection) * sx * sy
elif cov is not None:
sxy = self.evaluate(cov, selection=selection)
cov_matrix = np.zeros((len(sx), 2, 2))
cov_matrix[:,0,0] = sx**2
cov_matrix[:,1,1] = sy**2
cov_matrix[:,0,1] = cov_matrix[:,1,0] = sxy
ax = plt.gca()
ellipse_kwargs = dict(ellipse_kwargs)
ellipse_kwargs['facecolor'] = ellipse_kwargs.get('facecolor', 'none')
ellipse_kwargs['edgecolor'] = ellipse_kwargs.get('edgecolor', 'black')
for i in range(len(sx)):
eigen_values, eigen_vectors = np.linalg.eig(cov_matrix[i])
indices = np.argsort(eigen_values)[::-1]
eigen_values = eigen_values[indices]
eigen_vectors = eigen_vectors[:,indices]
v1 = eigen_vectors[:, 0]
v2 = eigen_vectors[:, 1]
varx = cov_matrix[i, 0, 0]
vary = cov_matrix[i, 1, 1]
angle = np.arctan2(v1[1], v1[0])
# round off errors cause negative values?
if eigen_values[1] < 0 and abs((eigen_values[1]/eigen_values[0])) < 1e-10:
eigen_values[1] = 0
if eigen_values[0] < 0 or eigen_values[1] < 0:
raise ValueError('neg val')
width, height = np.sqrt(np.max(eigen_values)), np.sqrt(np.min(eigen_values))
e = Ellipse(xy=(x_values[i], y_values[i]), width=width, height=height, angle=np.degrees(angle), **ellipse_kwargs)
ax.add_artist(e)
else:
if xerr is not None:
if _issequence(xerr):
assert len(xerr) == 2, "if xerr is a sequence it should be of length 2"
xerr_values = [self.evaluate(xerr[0], selection=selection), self.evaluate(xerr[1], selection=selection)]
else:
xerr_values = self.evaluate(xerr, selection=selection)
if yerr is not None:
if _issequence(yerr):
assert len(yerr) == 2, "if yerr is a sequence it should be of length 2"
yerr_values = [self.evaluate(yerr[0], selection=selection), self.evaluate(yerr[1], selection=selection)]
else:
yerr_values = self.evaluate(yerr, selection=selection)
if xerr_values is not None or yerr_values is not None:
errorbar_kwargs = dict(errorbar_kwargs)
errorbar_kwargs['fmt'] = errorbar_kwargs.get('fmt', 'none')
plt.errorbar(x_values, y_values, yerr=yerr_values, xerr=xerr_values, **errorbar_kwargs)
return s | [
"def",
"scatter",
"(",
"self",
",",
"x",
",",
"y",
",",
"xerr",
"=",
"None",
",",
"yerr",
"=",
"None",
",",
"cov",
"=",
"None",
",",
"corr",
"=",
"None",
",",
"s_expr",
"=",
"None",
",",
"c_expr",
"=",
"None",
",",
"labels",
"=",
"None",
",",
... | 51.814433 | 22.907216 |
def _intertext_score(full_text):
'''returns tuple of scored sentences
in order of appearance
Note: Doing an A/B test to
compare results, reverting to
original algorithm.'''
sentences = sentence_tokenizer(full_text)
norm = _normalize(sentences)
similarity_matrix = pairwise_kernels(norm, metric='cosine')
scores = _textrank(similarity_matrix)
scored_sentences = []
for i, s in enumerate(sentences):
scored_sentences.append((scores[i],i,s))
top_scorers = sorted(scored_sentences,
key=lambda tup: tup[0],
reverse=True)
return top_scorers | [
"def",
"_intertext_score",
"(",
"full_text",
")",
":",
"sentences",
"=",
"sentence_tokenizer",
"(",
"full_text",
")",
"norm",
"=",
"_normalize",
"(",
"sentences",
")",
"similarity_matrix",
"=",
"pairwise_kernels",
"(",
"norm",
",",
"metric",
"=",
"'cosine'",
")"... | 35.666667 | 9.666667 |
def count_lines_of_code(self, fname=''):
""" counts non blank lines """
if fname == '':
fname = self.fullname
loc = 0
try:
with open(fname) as f:
for l in f:
if l.strip() != '':
loc += 1
return loc
except Exception as ex:
print('cant count lines of code in "', fname, '":', str(ex))
return 0 | [
"def",
"count_lines_of_code",
"(",
"self",
",",
"fname",
"=",
"''",
")",
":",
"if",
"fname",
"==",
"''",
":",
"fname",
"=",
"self",
".",
"fullname",
"loc",
"=",
"0",
"try",
":",
"with",
"open",
"(",
"fname",
")",
"as",
"f",
":",
"for",
"l",
"in",... | 31.857143 | 12.571429 |
def search_track(self, artist, album=None, track=None,
full_album_art_uri=False):
"""Search for an artist, an artist's albums, or specific track.
Args:
artist (str): an artist's name.
album (str, optional): an album name. Default `None`.
track (str, optional): a track name. Default `None`.
full_album_art_uri (bool): whether the album art URI should be
absolute (i.e. including the IP address). Default `False`.
Returns:
A `SearchResult` instance.
"""
subcategories = [artist]
subcategories.append(album or '')
# Perform the search
result = self.get_album_artists(
full_album_art_uri=full_album_art_uri,
subcategories=subcategories, search_term=track,
complete_result=True)
result._metadata['search_type'] = 'search_track'
return result | [
"def",
"search_track",
"(",
"self",
",",
"artist",
",",
"album",
"=",
"None",
",",
"track",
"=",
"None",
",",
"full_album_art_uri",
"=",
"False",
")",
":",
"subcategories",
"=",
"[",
"artist",
"]",
"subcategories",
".",
"append",
"(",
"album",
"or",
"''"... | 38.75 | 16.916667 |
def register_module(self, module, namespace=None):
"""
Register a module.
:param module: must be a string or a module object to register.
:type module: str
:param namespace: Namespace tag. If it is None module will be used as namespace tag
:type namespace: str
"""
namespace = namespace if namespace is not None else module \
if isinstance(module, str) else module.__name__
self.register_namespace(namespace, module) | [
"def",
"register_module",
"(",
"self",
",",
"module",
",",
"namespace",
"=",
"None",
")",
":",
"namespace",
"=",
"namespace",
"if",
"namespace",
"is",
"not",
"None",
"else",
"module",
"if",
"isinstance",
"(",
"module",
",",
"str",
")",
"else",
"module",
... | 40.833333 | 19.166667 |
def insert_ising_model(cur, nodelist, edgelist, linear, quadratic, offset, encoded_data=None):
"""Insert an Ising model into the cache.
Args:
cur (:class:`sqlite3.Cursor`): An sqlite3 cursor. This function
is meant to be run within a :obj:`with` statement.
nodelist (list): The nodes in the graph.
edgelist (list): The edges in the graph.
linear (dict): The linear bias associated with each node in nodelist.
quadratic (dict): The quadratic bias associated with teach edge in edgelist.
offset (float): The constant offset applied to the ising problem.
encoded_data (dict, optional): If a dictionary is provided, it
will be populated with the serialized data. This is useful for
preventing encoding the same information many times.
"""
if encoded_data is None:
encoded_data = {}
# insert graph and partially populate encoded_data with graph info
insert_graph(cur, nodelist, edgelist, encoded_data=encoded_data)
# need to encode the biases
if 'linear_biases' not in encoded_data:
encoded_data['linear_biases'] = _serialize_linear_biases(linear, nodelist)
if 'quadratic_biases' not in encoded_data:
encoded_data['quadratic_biases'] = _serialize_quadratic_biases(quadratic, edgelist)
if 'offset' not in encoded_data:
encoded_data['offset'] = offset
if 'max_quadratic_bias' not in encoded_data:
encoded_data['max_quadratic_bias'] = max(itervalues(quadratic))
if 'min_quadratic_bias' not in encoded_data:
encoded_data['min_quadratic_bias'] = min(itervalues(quadratic))
if 'max_linear_bias' not in encoded_data:
encoded_data['max_linear_bias'] = max(itervalues(linear))
if 'min_linear_bias' not in encoded_data:
encoded_data['min_linear_bias'] = min(itervalues(linear))
insert = \
"""
INSERT OR IGNORE INTO ising_model(
linear_biases,
quadratic_biases,
offset,
max_quadratic_bias,
min_quadratic_bias,
max_linear_bias,
min_linear_bias,
graph_id)
SELECT
:linear_biases,
:quadratic_biases,
:offset,
:max_quadratic_bias,
:min_quadratic_bias,
:max_linear_bias,
:min_linear_bias,
graph.id
FROM graph WHERE
num_nodes = :num_nodes AND
num_edges = :num_edges AND
edges = :edges;
"""
cur.execute(insert, encoded_data) | [
"def",
"insert_ising_model",
"(",
"cur",
",",
"nodelist",
",",
"edgelist",
",",
"linear",
",",
"quadratic",
",",
"offset",
",",
"encoded_data",
"=",
"None",
")",
":",
"if",
"encoded_data",
"is",
"None",
":",
"encoded_data",
"=",
"{",
"}",
"# insert graph and... | 38.923077 | 18.969231 |
def simulate(self, T):
"""Simulate state and observation processes.
Parameters
----------
T: int
processes are simulated from time 0 to time T-1
Returns
-------
x, y: lists
lists of length T
"""
x = []
for t in range(T):
law_x = self.PX0() if t == 0 else self.PX(t, x[-1])
x.append(law_x.rvs(size=1))
y = self.simulate_given_x(x)
return x, y | [
"def",
"simulate",
"(",
"self",
",",
"T",
")",
":",
"x",
"=",
"[",
"]",
"for",
"t",
"in",
"range",
"(",
"T",
")",
":",
"law_x",
"=",
"self",
".",
"PX0",
"(",
")",
"if",
"t",
"==",
"0",
"else",
"self",
".",
"PX",
"(",
"t",
",",
"x",
"[",
... | 24.736842 | 18.842105 |
def compute_canonical_key_ids(self, search_amplifier=100):
"""
A canonical key id is the lowest integer key id that maps to
a particular shard. The mapping to canonical key ids depends on the
number of shards.
Returns a dictionary mapping from shard number to canonical key id.
This method will throw an exception if it fails to compute all of
the canonical key ids.
"""
canonical_keys = {}
num_shards = self.num_shards()
# Guarantees enough to find all keys without running forever
num_iterations = (num_shards**2) * search_amplifier
for key_id in range(1, num_iterations):
shard_num = self.get_shard_num_by_key(str(key_id))
if shard_num in canonical_keys:
continue
canonical_keys[shard_num] = str(key_id)
if len(canonical_keys) == num_shards:
break
if len(canonical_keys) != num_shards:
raise ValueError("Failed to compute enough keys. " +
"Wanted %d, got %d (search_amp=%d).".format(
num_shards, len(canonical_keys),
search_amplifier))
return canonical_keys | [
"def",
"compute_canonical_key_ids",
"(",
"self",
",",
"search_amplifier",
"=",
"100",
")",
":",
"canonical_keys",
"=",
"{",
"}",
"num_shards",
"=",
"self",
".",
"num_shards",
"(",
")",
"# Guarantees enough to find all keys without running forever",
"num_iterations",
"="... | 41.4 | 19.733333 |
def roulette_selection(population, fitnesses):
"""Create a list of parents with roulette selection."""
probabilities = _fitnesses_to_probabilities(fitnesses)
intermediate_population = []
for _ in range(len(population)):
# Choose a random individual
selection = random.uniform(0.0, 1.0)
# Iterate over probabilities list
for i, probability in enumerate(probabilities):
if probability >= selection: # First probability that is greater
intermediate_population.append(population[i])
break
return intermediate_population | [
"def",
"roulette_selection",
"(",
"population",
",",
"fitnesses",
")",
":",
"probabilities",
"=",
"_fitnesses_to_probabilities",
"(",
"fitnesses",
")",
"intermediate_population",
"=",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"len",
"(",
"population",
")",
")",
... | 40 | 14.866667 |
def assert_headers(context):
"""
:type context: behave.runner.Context
"""
expected_headers = [(k, v) for k, v in row_table(context).items()]
request = httpretty.last_request()
actual_headers = request.headers.items()
for expected_header in expected_headers:
assert_in(expected_header, actual_headers) | [
"def",
"assert_headers",
"(",
"context",
")",
":",
"expected_headers",
"=",
"[",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"row_table",
"(",
"context",
")",
".",
"items",
"(",
")",
"]",
"request",
"=",
"httpretty",
".",
"last_request",
"("... | 33.1 | 10.5 |
def extractDate(text):
""" Tries to extract a date from a given :obj:`str`.
:param str text: Input date. A :obj:`datetime.date` object is passed
thought without modification.
:rtype: :obj:`datetime.date`"""
if type(text) is datetime.date:
return text
match = date_format.search(text.lower())
if not match:
raise ValueError('unsupported date format: {0}'.format(text.lower()))
# convert DD.MM.YYYY into YYYY-MM-DD
if match.group('month'):
if not match.group('month') in month_names:
raise ValueError('unknown month names: "{0}"'
.format(match.group('month')))
year = int(match.group('year'))
return datetime.date(
year if year > 2000 else 2000 + year,
int(month_names[match.group('month')]),
int(match.group('day')))
else:
parts = list(map(lambda v: int(v), '-'.join(reversed(
match.group('datestr').split('.'))).split('-')))
if parts[0] < 2000:
parts[0] += 2000
return datetime.date(*parts) | [
"def",
"extractDate",
"(",
"text",
")",
":",
"if",
"type",
"(",
"text",
")",
"is",
"datetime",
".",
"date",
":",
"return",
"text",
"match",
"=",
"date_format",
".",
"search",
"(",
"text",
".",
"lower",
"(",
")",
")",
"if",
"not",
"match",
":",
"rai... | 40.259259 | 14.148148 |
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(OpenstackSwiftReconCollector, self).get_default_config()
config.update({
'path': 'swiftrecon',
'recon_account_cache': '/var/cache/swift/account.recon',
'recon_container_cache': '/var/cache/swift/container.recon',
'recon_object_cache': '/var/cache/swift/object.recon',
'interval': 300,
})
return config | [
"def",
"get_default_config",
"(",
"self",
")",
":",
"config",
"=",
"super",
"(",
"OpenstackSwiftReconCollector",
",",
"self",
")",
".",
"get_default_config",
"(",
")",
"config",
".",
"update",
"(",
"{",
"'path'",
":",
"'swiftrecon'",
",",
"'recon_account_cache'"... | 38.230769 | 17.461538 |
def entitlements(self, request, pk=None): # pylint: disable=invalid-name,unused-argument
"""
Retrieve the list of entitlements available to this learner.
Only those entitlements are returned that satisfy enterprise customer's data sharing setting.
Arguments:
request (HttpRequest): Reference to in-progress request instance.
pk (Int): Primary key value of the selected enterprise learner.
Returns:
(HttpResponse): Response object containing a list of learner's entitlements.
"""
enterprise_customer_user = self.get_object()
instance = {"entitlements": enterprise_customer_user.entitlements}
serializer = serializers.EnterpriseCustomerUserEntitlementSerializer(instance, context={'request': request})
return Response(serializer.data) | [
"def",
"entitlements",
"(",
"self",
",",
"request",
",",
"pk",
"=",
"None",
")",
":",
"# pylint: disable=invalid-name,unused-argument",
"enterprise_customer_user",
"=",
"self",
".",
"get_object",
"(",
")",
"instance",
"=",
"{",
"\"entitlements\"",
":",
"enterprise_c... | 49.176471 | 32.117647 |
def exptime(self):
''' exptime: 下一個日期時間
:returns: 下一個預設時間
'''
return self.nextday + timedelta(hours=self.__hour - 8,
minutes=self.__minutes) | [
"def",
"exptime",
"(",
"self",
")",
":",
"return",
"self",
".",
"nextday",
"+",
"timedelta",
"(",
"hours",
"=",
"self",
".",
"__hour",
"-",
"8",
",",
"minutes",
"=",
"self",
".",
"__minutes",
")"
] | 30.142857 | 22.714286 |
def handle(cls, value, **kwargs):
"""Split the supplied string on the given delimiter, providing a list.
Format of value:
<delimiter>::<value>
For example:
Subnets: ${split ,::subnet-1,subnet-2,subnet-3}
Would result in the variable `Subnets` getting a list consisting of:
["subnet-1", "subnet-2", "subnet-3"]
This is particularly useful when getting an output from another stack
that contains a list. For example, the standard vpc blueprint outputs
the list of Subnets it creates as a pair of Outputs (PublicSubnets,
PrivateSubnets) that are comma separated, so you could use this in your
config:
Subnets: ${split ,::${output vpc::PrivateSubnets}}
"""
try:
delimiter, text = value.split("::", 1)
except ValueError:
raise ValueError("Invalid value for split: %s. Must be in "
"<delimiter>::<text> format." % value)
return text.split(delimiter) | [
"def",
"handle",
"(",
"cls",
",",
"value",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"delimiter",
",",
"text",
"=",
"value",
".",
"split",
"(",
"\"::\"",
",",
"1",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for ... | 33.16129 | 26.548387 |
def get_access_token(self, code=None, **params):
"""
Return the memoized access token or go out and fetch one.
"""
if self._access_token is None:
if code is None:
raise ValueError(_('Invalid code.'))
self.access_token_dict = self._get_access_token(code, **params)
try:
self._access_token = self.access_token_dict['access_token']
except KeyError, e:
raise OAuthError("Credentials could not be validated, the provider returned no access token.")
return self._access_token | [
"def",
"get_access_token",
"(",
"self",
",",
"code",
"=",
"None",
",",
"*",
"*",
"params",
")",
":",
"if",
"self",
".",
"_access_token",
"is",
"None",
":",
"if",
"code",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"_",
"(",
"'Invalid code.'",
")",
... | 41.4 | 19.4 |
def hicpro_stats_table(self):
""" Add HiC-Pro stats to the general stats table """
headers = OrderedDict()
headers['percent_duplicates'] = {
'title': '% Duplicates',
'description': 'Percent of duplicated valid pairs (%)',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'YlOrRd',
'hidden': True
}
headers['valid_interaction_rmdup'] = {
'title': '{} Valid Pairs Unique'.format(config.read_count_prefix),
'description': 'Number of valid pairs after duplicates removal ({})'.format(config.read_count_desc),
'min': 0,
'scale': 'RdYlBu',
'modify': lambda x: x * config.read_count_multiplier,
'shared_key': 'read_count',
}
headers['percent_valid'] = {
'title': '% Valid Pairs',
'description': 'Percentage of valid pairs over reported ones (%)',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'YlGn',
'hidden': True
}
headers['valid_interaction'] = {
'title': '{} Valid Pairs'.format(config.read_count_prefix),
'description': 'Number of valid pairs ({})'.format(config.read_count_desc),
'min': 0,
'scale': 'RdYlBu',
'modify': lambda x: x * config.read_count_multiplier,
'shared_key': 'read_count',
'hidden': True
}
headers['percent_paired_reads'] = {
'title': '% Reported',
'description': 'Percentage of paired reads (%) passing the mapping filters',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'YlGn',
'hidden': True
}
headers['paired_reads'] = {
'title': 'Reported Read Pairs',
'description' : 'Total number of read pairs ({}) passing the mapping filters'.format(config.read_count_desc),
'min' : '0',
'scale' : 'RdYlBu',
'modify': lambda x: x * config.read_count_multiplier,
'shared_key' : 'read_count',
}
headers['percent_mapped_R2'] = {
'title': '% Aligned [R2]',
'description': 'Percentage of aligned reads [R2] (%)',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'YlGn',
'hidden': True
}
headers['mapped_R2'] = {
'title': 'Aligned [R2]',
'description' : 'Total number of aligned reads [R2] ({})'.format(config.read_count_desc),
'min' : '0',
'scale' : 'RdYlBu',
'modify': lambda x: x * config.read_count_multiplier,
'shared_key' : 'read_count',
'hidden': True
}
headers['percent_mapped_R1'] = {
'title': '% Aligned [R1]',
'description': 'Percentage of aligned reads [R1] (%)',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'YlGn',
'hidden': True
}
headers['mapped_R1'] = {
'title': 'Aligned [R1]',
'description' : 'Total number of aligned reads [R1] ({})'.format(config.read_count_desc),
'min' : '0',
'scale' : 'RdYlBu',
'modify': lambda x: x * config.read_count_multiplier,
'shared_key' : 'read_count',
'hidden': True
}
headers['total_R1'] = {
'title': 'Total',
'description' : 'Total Number of Read Pairs',
'min' : '0',
'scale' : 'RdYlBu',
'modify': lambda x: x * config.read_count_multiplier,
'shared_key' : 'read_count',
}
self.general_stats_addcols(self.hicpro_data, headers, 'HiC-Pro') | [
"def",
"hicpro_stats_table",
"(",
"self",
")",
":",
"headers",
"=",
"OrderedDict",
"(",
")",
"headers",
"[",
"'percent_duplicates'",
"]",
"=",
"{",
"'title'",
":",
"'% Duplicates'",
",",
"'description'",
":",
"'Percent of duplicated valid pairs (%)'",
",",
"'max'",
... | 34.955357 | 19.5 |
def interact_GxE_1dof(snps,pheno,env,K=None,covs=None, test='lrt'):
"""
Univariate GxE fixed effects interaction linear mixed model test for all
pairs of SNPs and environmental variables.
Args:
snps: [N x S] SP.array of S SNPs for N individuals
pheno: [N x 1] SP.array of 1 phenotype for N individuals
env: [N x E] SP.array of E environmental variables for N individuals
K: [N x N] SP.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
covs: [N x D] SP.array of D covariates for N individuals
test: 'lrt' for likelihood ratio test (default) or 'f' for F-test
Returns:
pv: [E x S] SP.array of P values for interaction tests between all
E environmental variables and all S SNPs
"""
N=snps.shape[0]
if K is None:
K=SP.eye(N)
if covs is None:
covs = SP.ones((N,1))
assert (env.shape[0]==N and pheno.shape[0]==N and K.shape[0]==N and K.shape[1]==N and covs.shape[0]==N), "shapes missmatch"
Inter0 = SP.ones((N,1))
pv = SP.zeros((env.shape[1],snps.shape[1]))
print(("starting %i interaction scans for %i SNPs each." % (env.shape[1], snps.shape[1])))
t0=time.time()
for i in range(env.shape[1]):
t0_i = time.time()
cov_i = SP.concatenate((covs,env[:,i:(i+1)]),1)
lm_i = simple_interaction(snps=snps,pheno=pheno,covs=cov_i,Inter=env[:,i:(i+1)],Inter0=Inter0, test=test)
pv[i,:]=lm_i.getPv()[0,:]
t1_i = time.time()
print(("Finished %i out of %i interaction scans in %.2f seconds."%((i+1),env.shape[1],(t1_i-t0_i))))
t1 = time.time()
print(("-----------------------------------------------------------\nFinished all %i interaction scans in %.2f seconds."%(env.shape[1],(t1-t0))))
return pv | [
"def",
"interact_GxE_1dof",
"(",
"snps",
",",
"pheno",
",",
"env",
",",
"K",
"=",
"None",
",",
"covs",
"=",
"None",
",",
"test",
"=",
"'lrt'",
")",
":",
"N",
"=",
"snps",
".",
"shape",
"[",
"0",
"]",
"if",
"K",
"is",
"None",
":",
"K",
"=",
"S... | 49.052632 | 29.684211 |
def create_from_header(header):
""" Creates a File from an existing header,
allocating the array of point according to the provided header.
The input header is copied.
Parameters
----------
header : existing header to be used to create the file
Returns
-------
pylas.lasdatas.base.LasBase
"""
header = copy.copy(header)
header.point_count = 0
points = record.PackedPointRecord.empty(PointFormat(header.point_format_id))
if header.version >= "1.4":
return las14.LasData(header=header, points=points)
return las12.LasData(header=header, points=points) | [
"def",
"create_from_header",
"(",
"header",
")",
":",
"header",
"=",
"copy",
".",
"copy",
"(",
"header",
")",
"header",
".",
"point_count",
"=",
"0",
"points",
"=",
"record",
".",
"PackedPointRecord",
".",
"empty",
"(",
"PointFormat",
"(",
"header",
".",
... | 30.05 | 20.35 |
def _parse_message(self, data):
"""
Parse the bytes received from the socket.
:param data: the bytes received from the socket
:return:
"""
if TwitchChatStream._check_has_ping(data):
self._send_pong()
if TwitchChatStream._check_has_channel(data):
self.current_channel = \
TwitchChatStream._check_has_channel(data)[0]
if TwitchChatStream._check_has_message(data):
return {
'channel': re.findall(r'^:.+![a-zA-Z0-9_]+'
r'@[a-zA-Z0-9_]+'
r'.+ '
r'PRIVMSG (.*?) :',
data)[0],
'username': re.findall(r'^:([a-zA-Z0-9_]+)!', data)[0],
'message': re.findall(r'PRIVMSG #[a-zA-Z0-9_]+ :(.+)',
data)[0].decode('utf8')
}
else:
return None | [
"def",
"_parse_message",
"(",
"self",
",",
"data",
")",
":",
"if",
"TwitchChatStream",
".",
"_check_has_ping",
"(",
"data",
")",
":",
"self",
".",
"_send_pong",
"(",
")",
"if",
"TwitchChatStream",
".",
"_check_has_channel",
"(",
"data",
")",
":",
"self",
"... | 37.961538 | 17.038462 |
def install(self, module):
"""Install a module into this binder.
In this context the module is one of the following:
* function taking the :class:`Binder` as it's only parameter
::
def configure(binder):
bind(str, to='s')
binder.install(configure)
* instance of :class:`Module` (instance of it's subclass counts)
::
class MyModule(Module):
def configure(self, binder):
binder.bind(str, to='s')
binder.install(MyModule())
* subclass of :class:`Module` - the subclass needs to be instantiable so if it
expects any parameters they need to be injected
::
binder.install(MyModule)
"""
if type(module) is type and issubclass(module, Module):
instance = module()
else:
instance = module
instance(self) | [
"def",
"install",
"(",
"self",
",",
"module",
")",
":",
"if",
"type",
"(",
"module",
")",
"is",
"type",
"and",
"issubclass",
"(",
"module",
",",
"Module",
")",
":",
"instance",
"=",
"module",
"(",
")",
"else",
":",
"instance",
"=",
"module",
"instanc... | 25.527778 | 23.416667 |
def max_entries(self, entries):
"""
Chance the maximum number of retained log entries
:param entries: The maximum number of log entries to retain at any given time
:type entries: int
"""
self._debug_log.info('Changing maximum log entries from {old} to {new}'
.format(old=self._log_entries, new=entries))
self._max_entries = entries
# This is a bit awkward, but since the maxlen can't be changed after instantiation, we have to reverse the
# deque before re-instantiating it, then reverse the new deque back in order to preserve the reverse order
# in case any entries are truncated
self._log_entries.reverse()
self._log_entries = deque(self._log_entries, maxlen=self._max_entries)
self._log_entries.reverse() | [
"def",
"max_entries",
"(",
"self",
",",
"entries",
")",
":",
"self",
".",
"_debug_log",
".",
"info",
"(",
"'Changing maximum log entries from {old} to {new}'",
".",
"format",
"(",
"old",
"=",
"self",
".",
"_log_entries",
",",
"new",
"=",
"entries",
")",
")",
... | 51.75 | 25 |
def _q_to_dcm(self, q):
"""
Create DCM (Matrix3) from q
:param q: array q which represents a quaternion [w, x, y, z]
:returns: Matrix3
"""
assert(len(q) == 4)
arr = super(Quaternion, self)._q_to_dcm(q)
return self._dcm_array_to_matrix3(arr) | [
"def",
"_q_to_dcm",
"(",
"self",
",",
"q",
")",
":",
"assert",
"(",
"len",
"(",
"q",
")",
"==",
"4",
")",
"arr",
"=",
"super",
"(",
"Quaternion",
",",
"self",
")",
".",
"_q_to_dcm",
"(",
"q",
")",
"return",
"self",
".",
"_dcm_array_to_matrix3",
"("... | 32.888889 | 10.444444 |
def pretty_str(something, indent=0):
"""Return a human-readable string representation of an object.
Uses `pretty_str` if the given value is an instance of
`CodeEntity` and `repr` otherwise.
Args:
something: Some value to convert.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
if isinstance(something, CodeEntity):
return something.pretty_str(indent=indent)
else:
return (' ' * indent) + repr(something) | [
"def",
"pretty_str",
"(",
"something",
",",
"indent",
"=",
"0",
")",
":",
"if",
"isinstance",
"(",
"something",
",",
"CodeEntity",
")",
":",
"return",
"something",
".",
"pretty_str",
"(",
"indent",
"=",
"indent",
")",
"else",
":",
"return",
"(",
"' '",
... | 30.375 | 17.6875 |
def _convert_to_image_color(self, color):
""":return: a color that can be used by the image"""
rgb = self._convert_color_to_rrggbb(color)
return self._convert_rrggbb_to_image_color(rgb) | [
"def",
"_convert_to_image_color",
"(",
"self",
",",
"color",
")",
":",
"rgb",
"=",
"self",
".",
"_convert_color_to_rrggbb",
"(",
"color",
")",
"return",
"self",
".",
"_convert_rrggbb_to_image_color",
"(",
"rgb",
")"
] | 51.5 | 6.5 |
def merge_code(left_code, right_code):
"""
{ relative_line:
((left_abs_line, ((offset, op, args), ...)),
(right_abs_line, ((offset, op, args), ...))),
... }
"""
data = dict()
code_lines = (left_code and left_code.iter_code_by_lines()) or tuple()
for abs_line, rel_line, dis in code_lines:
data[rel_line] = [(abs_line, dis), None]
code_lines = (right_code and right_code.iter_code_by_lines()) or tuple()
for abs_line, rel_line, dis in code_lines:
found = data.get(rel_line, None)
if found is None:
found = [None, (abs_line, dis)]
data[rel_line] = found
else:
found[1] = (abs_line, dis)
return data | [
"def",
"merge_code",
"(",
"left_code",
",",
"right_code",
")",
":",
"data",
"=",
"dict",
"(",
")",
"code_lines",
"=",
"(",
"left_code",
"and",
"left_code",
".",
"iter_code_by_lines",
"(",
")",
")",
"or",
"tuple",
"(",
")",
"for",
"abs_line",
",",
"rel_li... | 29.166667 | 17.666667 |
def rollback_group(self, group_id, version, force=False):
"""Roll a group back to a previous version.
:param str group_id: group ID
:param str version: group version
:param bool force: apply even if a deployment is in progress
:returns: a dict containing the deployment id and version
:rtype: dict
"""
params = {'force': force}
response = self._do_request(
'PUT',
'/v2/groups/{group_id}/versions/{version}'.format(
group_id=group_id, version=version),
params=params)
return response.json() | [
"def",
"rollback_group",
"(",
"self",
",",
"group_id",
",",
"version",
",",
"force",
"=",
"False",
")",
":",
"params",
"=",
"{",
"'force'",
":",
"force",
"}",
"response",
"=",
"self",
".",
"_do_request",
"(",
"'PUT'",
",",
"'/v2/groups/{group_id}/versions/{v... | 35.705882 | 15.588235 |
def extract(self, *args):
"""
Extract a specific variable
"""
self.time = np.loadtxt(self.abspath,
skiprows=self._attributes['data_idx']+1,
unpack=True, usecols=(0,))
for variable_idx in args:
data = np.loadtxt(self.abspath,
skiprows=self._attributes['data_idx']+1,
unpack=True,
usecols=(variable_idx,))
with open(self.abspath) as fobj:
for idx, line in enumerate(fobj):
if idx == 1 + variable_idx+self._attributes['CATALOG']:
try:
self.data[variable_idx] = data[:len(self.time)]
except TypeError:
self.data[variable_idx] = data.base
self.label[variable_idx] = line.replace("\'",
'').replace("\n",
"")
break | [
"def",
"extract",
"(",
"self",
",",
"*",
"args",
")",
":",
"self",
".",
"time",
"=",
"np",
".",
"loadtxt",
"(",
"self",
".",
"abspath",
",",
"skiprows",
"=",
"self",
".",
"_attributes",
"[",
"'data_idx'",
"]",
"+",
"1",
",",
"unpack",
"=",
"True",
... | 50.043478 | 16.478261 |
def _get_split_tasks(args, split_fn, file_key, outfile_i=-1):
"""Split up input files and arguments, returning arguments for parallel processing.
outfile_i specifies the location of the output file in the arguments to
the processing function. Defaults to the last item in the list.
"""
split_args = []
combine_map = {}
finished_map = collections.OrderedDict()
extras = []
for data in args:
out_final, out_parts = split_fn(data)
for parts in out_parts:
split_args.append([utils.deepish_copy(data)] + list(parts))
for part_file in [x[outfile_i] for x in out_parts]:
combine_map[part_file] = out_final
if len(out_parts) == 0:
if out_final is not None:
if out_final not in finished_map:
data[file_key] = out_final
finished_map[out_final] = [data]
else:
extras.append([data])
else:
extras.append([data])
return split_args, combine_map, list(finished_map.values()), extras | [
"def",
"_get_split_tasks",
"(",
"args",
",",
"split_fn",
",",
"file_key",
",",
"outfile_i",
"=",
"-",
"1",
")",
":",
"split_args",
"=",
"[",
"]",
"combine_map",
"=",
"{",
"}",
"finished_map",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"extras",
"=... | 41.153846 | 15.307692 |
def find_external_compartment(model):
"""Find the external compartment in the model.
Uses a simple heuristic where the external compartment should be the one
with the most exchange reactions.
Arguments
---------
model : cobra.Model
A cobra model.
Returns
-------
str
The putative external compartment.
"""
if model.boundary:
counts = pd.Series(tuple(r.compartments)[0] for r in model.boundary)
most = counts.value_counts()
most = most.index[most == most.max()].to_series()
else:
most = None
like_external = compartment_shortlist["e"] + ["e"]
matches = pd.Series([co in like_external for co in model.compartments],
index=model.compartments)
if matches.sum() == 1:
compartment = matches.index[matches][0]
LOGGER.info("Compartment `%s` sounds like an external compartment. "
"Using this one without counting boundary reactions" %
compartment)
return compartment
elif most is not None and matches.sum() > 1 and matches[most].sum() == 1:
compartment = most[matches[most]][0]
LOGGER.warning("There are several compartments that look like an "
"external compartment but `%s` has the most boundary "
"reactions, so using that as the external "
"compartment." % compartment)
return compartment
elif matches.sum() > 1:
raise RuntimeError("There are several compartments (%s) that look "
"like external compartments but we can't tell "
"which one to use. Consider renaming your "
"compartments please.")
if most is not None:
return most[0]
LOGGER.warning("Could not identify an external compartment by name and"
" choosing one with the most boundary reactions. That "
"might be complete nonsense or change suddenly. "
"Consider renaming your compartments using "
"`Model.compartments` to fix this.")
# No info in the model, so give up
raise RuntimeError("The heuristic for discovering an external compartment "
"relies on names and boundary reactions. Yet, there "
"are neither compartments with recognized names nor "
"boundary reactions in the model.") | [
"def",
"find_external_compartment",
"(",
"model",
")",
":",
"if",
"model",
".",
"boundary",
":",
"counts",
"=",
"pd",
".",
"Series",
"(",
"tuple",
"(",
"r",
".",
"compartments",
")",
"[",
"0",
"]",
"for",
"r",
"in",
"model",
".",
"boundary",
")",
"mo... | 43.280702 | 23.298246 |
def per_section(it, is_delimiter=lambda x: x.isspace()):
"""
From http://stackoverflow.com/a/25226944/610569
"""
ret = []
for line in it:
if is_delimiter(line):
if ret:
yield ret # OR ''.join(ret)
ret = []
else:
ret.append(line.rstrip()) # OR ret.append(line)
if ret:
yield ret | [
"def",
"per_section",
"(",
"it",
",",
"is_delimiter",
"=",
"lambda",
"x",
":",
"x",
".",
"isspace",
"(",
")",
")",
":",
"ret",
"=",
"[",
"]",
"for",
"line",
"in",
"it",
":",
"if",
"is_delimiter",
"(",
"line",
")",
":",
"if",
"ret",
":",
"yield",
... | 26.571429 | 16.285714 |
def resolve_input_references(to_resolve, inputs_to_reference):
"""
Resolves input references given in the string to_resolve by using the inputs_to_reference.
See http://www.commonwl.org/user_guide/06-params/index.html for more information.
Example:
"$(inputs.my_file.nameroot).md" -> "filename.md"
:param to_resolve: The path to match
:param inputs_to_reference: Inputs which are used to resolve input references like $(inputs.my_input_file.basename).
:return: A string in which the input references are replaced with actual values.
"""
splitted = split_input_references(to_resolve)
result = []
for part in splitted:
if is_input_reference(part):
result.append(str(resolve_input_reference(part, inputs_to_reference)))
else:
result.append(part)
return ''.join(result) | [
"def",
"resolve_input_references",
"(",
"to_resolve",
",",
"inputs_to_reference",
")",
":",
"splitted",
"=",
"split_input_references",
"(",
"to_resolve",
")",
"result",
"=",
"[",
"]",
"for",
"part",
"in",
"splitted",
":",
"if",
"is_input_reference",
"(",
"part",
... | 32.307692 | 28.846154 |
def _remove_broken_links():
'''
Remove broken links in `<conda prefix>/etc/microdrop/plugins/enabled/`.
Returns
-------
list
List of links removed (if any).
'''
enabled_dir = MICRODROP_CONDA_PLUGINS.joinpath('enabled')
if not enabled_dir.isdir():
return []
broken_links = []
for dir_i in enabled_dir.walkdirs(errors='ignore'):
if platform.system() == 'Windows':
if dir_i.isjunction() and not dir_i.readlink().isdir():
# Junction/link target no longer exists.
broken_links.append(dir_i)
else:
raise NotImplementedError('Unsupported platform')
removed_links = []
for link_i in broken_links:
try:
link_i.unlink()
except:
pass
else:
removed_links.append(link_i)
return removed_links | [
"def",
"_remove_broken_links",
"(",
")",
":",
"enabled_dir",
"=",
"MICRODROP_CONDA_PLUGINS",
".",
"joinpath",
"(",
"'enabled'",
")",
"if",
"not",
"enabled_dir",
".",
"isdir",
"(",
")",
":",
"return",
"[",
"]",
"broken_links",
"=",
"[",
"]",
"for",
"dir_i",
... | 27.451613 | 21.516129 |
def channel_close(
self,
registry_address: PaymentNetworkID,
token_address: TokenAddress,
partner_address: Address,
retry_timeout: NetworkTimeout = DEFAULT_RETRY_TIMEOUT,
):
"""Close a channel opened with `partner_address` for the given
`token_address`.
Race condition, this can fail if channel was closed externally.
"""
self.channel_batch_close(
registry_address=registry_address,
token_address=token_address,
partner_addresses=[partner_address],
retry_timeout=retry_timeout,
) | [
"def",
"channel_close",
"(",
"self",
",",
"registry_address",
":",
"PaymentNetworkID",
",",
"token_address",
":",
"TokenAddress",
",",
"partner_address",
":",
"Address",
",",
"retry_timeout",
":",
"NetworkTimeout",
"=",
"DEFAULT_RETRY_TIMEOUT",
",",
")",
":",
"self"... | 34.611111 | 14.111111 |
def get_info(node_id, info_id):
"""Get a specific info.
Both the node and info id must be specified in the url.
"""
exp = experiment(session)
# check the node exists
node = models.Node.query.get(node_id)
if node is None:
return error_response(error_type="/info, node does not exist")
# execute the experiment method:
info = models.Info.query.get(info_id)
if info is None:
return error_response(error_type="/info GET, info does not exist",
participant=node.participant)
elif (info.origin_id != node.id and
info.id not in
[t.info_id for t in node.transmissions(direction="incoming",
status="received")]):
return error_response(error_type="/info GET, forbidden info",
status=403,
participant=node.participant)
try:
# ping the experiment
exp.info_get_request(node=node, infos=info)
session.commit()
except:
return error_response(error_type="/info GET server error",
status=403,
participant=node.participant)
# return the data
return success_response(field="info",
data=info.__json__(),
request_type="info get") | [
"def",
"get_info",
"(",
"node_id",
",",
"info_id",
")",
":",
"exp",
"=",
"experiment",
"(",
"session",
")",
"# check the node exists",
"node",
"=",
"models",
".",
"Node",
".",
"query",
".",
"get",
"(",
"node_id",
")",
"if",
"node",
"is",
"None",
":",
"... | 36.026316 | 18.342105 |
def _pd_post_process(self, cfg):
"""
Take care of those loop headers/tails where we manually broke their
connection to the next BBL
"""
loop_back_edges = self._cfg.get_loop_back_edges()
for b1, b2 in loop_back_edges:
# The edge between b1 and b2 is manually broken
# The post dominator of b1 should be b2 (or not?)
successors = list(self._pd_graph_successors(cfg, b1))
if len(successors) == 0:
if b2 in self._post_dom:
self._post_dom.add_edge(b1, b2)
else:
_l.debug("%s is not in post dominator dict.", b2) | [
"def",
"_pd_post_process",
"(",
"self",
",",
"cfg",
")",
":",
"loop_back_edges",
"=",
"self",
".",
"_cfg",
".",
"get_loop_back_edges",
"(",
")",
"for",
"b1",
",",
"b2",
"in",
"loop_back_edges",
":",
"# The edge between b1 and b2 is manually broken",
"# The post domi... | 36.666667 | 17.555556 |
def _submit_resource_request(self):
"""
**Purpose**: Create and submits a RADICAL Pilot Job as per the user
provided resource description
"""
try:
self._prof.prof('creating rreq', uid=self._uid)
def _pilot_state_cb(pilot, state):
self._logger.info('Pilot %s state: %s' % (pilot.uid, state))
if state == rp.FAILED:
self._logger.error('Pilot has failed')
elif state == rp.DONE:
self._logger.error('Pilot has completed')
self._session = rp.Session(dburl=self._mlab_url, uid=self._sid)
self._pmgr = rp.PilotManager(session=self._session)
self._pmgr.register_callback(_pilot_state_cb)
pd_init = {
'resource': self._resource,
'runtime': self._walltime,
'cores': self._cpus,
'project': self._project,
}
if self._gpus:
pd_init['gpus'] = self._gpus
if self._access_schema:
pd_init['access_schema'] = self._access_schema
if self._queue:
pd_init['queue'] = self._queue
if self._rts_config.get('sandbox_cleanup', None):
pd_init['cleanup'] = True
# Create Compute Pilot with validated resource description
pdesc = rp.ComputePilotDescription(pd_init)
self._prof.prof('rreq created', uid=self._uid)
# Launch the pilot
self._pilot = self._pmgr.submit_pilots(pdesc)
self._prof.prof('rreq submitted', uid=self._uid)
shared_staging_directives = list()
for data in self._shared_data:
temp = {
'source': data,
'target': 'pilot:///' + os.path.basename(data)
}
shared_staging_directives.append(temp)
self._pilot.stage_in(shared_staging_directives)
self._prof.prof('shared data staging initiated', uid=self._uid)
self._logger.info('Resource request submission successful.. waiting for pilot to go Active')
# Wait for pilot to go active
self._pilot.wait([rp.PMGR_ACTIVE, rp.FAILED, rp.CANCELED])
self._prof.prof('resource active', uid=self._uid)
self._logger.info('Pilot is now active')
except KeyboardInterrupt:
if self._session:
self._session.close()
self._logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' +
'trying to exit callback thread gracefully...')
raise KeyboardInterrupt
except Exception, ex:
self._logger.exception('Resource request submission failed')
raise | [
"def",
"_submit_resource_request",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"_prof",
".",
"prof",
"(",
"'creating rreq'",
",",
"uid",
"=",
"self",
".",
"_uid",
")",
"def",
"_pilot_state_cb",
"(",
"pilot",
",",
"state",
")",
":",
"self",
".",
"_l... | 33.819277 | 23.216867 |
def getEncodableAttributes(self, obj, codec=None):
"""
Must return a C{dict} of attributes to be encoded, even if its empty.
@param codec: An optional argument that will contain the encoder
instance calling this function.
@since: 0.5
"""
if not self._compiled:
self.compile()
if self.is_dict:
return dict(obj)
if self.shortcut_encode and self.dynamic:
return obj.__dict__.copy()
attrs = {}
if self.static_attrs:
for attr in self.static_attrs:
attrs[attr] = getattr(obj, attr, pyamf.Undefined)
if not self.dynamic:
if self.non_static_encodable_properties:
for attr in self.non_static_encodable_properties:
attrs[attr] = getattr(obj, attr)
return attrs
dynamic_props = util.get_properties(obj)
if not self.shortcut_encode:
dynamic_props = set(dynamic_props)
if self.encodable_properties:
dynamic_props.update(self.encodable_properties)
if self.static_attrs:
dynamic_props.difference_update(self.static_attrs)
if self.exclude_attrs:
dynamic_props.difference_update(self.exclude_attrs)
for attr in dynamic_props:
attrs[attr] = getattr(obj, attr)
if self.proxy_attrs is not None and attrs and codec:
context = codec.context
for k, v in attrs.copy().iteritems():
if k in self.proxy_attrs:
attrs[k] = context.getProxyForObject(v)
if self.synonym_attrs:
missing = object()
for k, v in self.synonym_attrs.iteritems():
value = attrs.pop(k, missing)
if value is missing:
continue
attrs[v] = value
return attrs | [
"def",
"getEncodableAttributes",
"(",
"self",
",",
"obj",
",",
"codec",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"_compiled",
":",
"self",
".",
"compile",
"(",
")",
"if",
"self",
".",
"is_dict",
":",
"return",
"dict",
"(",
"obj",
")",
"if",
... | 28.560606 | 20.590909 |
def _equals(self, b):
'''Checks whether two records are equal by comparing all fields.
:param b: Another _AzureRecord object
:type b: _AzureRecord
:type return: bool
'''
def parse_dict(params):
vals = []
for char in params:
if char != 'ttl':
list_records = params[char]
try:
for record in list_records:
vals.append(record.__dict__)
except:
vals.append(list_records.__dict__)
vals.sort()
return vals
return (self.resource_group == b.resource_group) & \
(self.zone_name == b.zone_name) & \
(self.record_type == b.record_type) & \
(self.params['ttl'] == b.params['ttl']) & \
(parse_dict(self.params) == parse_dict(b.params)) & \
(self.relative_record_set_name == b.relative_record_set_name) | [
"def",
"_equals",
"(",
"self",
",",
"b",
")",
":",
"def",
"parse_dict",
"(",
"params",
")",
":",
"vals",
"=",
"[",
"]",
"for",
"char",
"in",
"params",
":",
"if",
"char",
"!=",
"'ttl'",
":",
"list_records",
"=",
"params",
"[",
"char",
"]",
"try",
... | 38.769231 | 18.076923 |
def get_schema_object(self, fully_qualified_name: str) -> 'BaseSchema':
"""
Used to generate a schema object from the given fully_qualified_name.
:param fully_qualified_name: The fully qualified name of the object needed.
:return: An initialized schema object
"""
if fully_qualified_name not in self._schema_cache:
spec = self.get_schema_spec(fully_qualified_name)
if spec:
try:
self._schema_cache[fully_qualified_name] = TypeLoader.load_schema(
spec.get(ATTRIBUTE_TYPE, None))(fully_qualified_name, self)
except TypeLoaderError as err:
self.add_errors(
InvalidTypeError(fully_qualified_name, spec, ATTRIBUTE_TYPE,
InvalidTypeError.Reason.TYPE_NOT_LOADED,
err.type_class_name))
return self._schema_cache.get(fully_qualified_name, None) | [
"def",
"get_schema_object",
"(",
"self",
",",
"fully_qualified_name",
":",
"str",
")",
"->",
"'BaseSchema'",
":",
"if",
"fully_qualified_name",
"not",
"in",
"self",
".",
"_schema_cache",
":",
"spec",
"=",
"self",
".",
"get_schema_spec",
"(",
"fully_qualified_name"... | 47.619048 | 26 |
def sparse_covariance_matrix(self,x,y,names):
"""build a pyemu.Cov instance from GeoStruct
Parameters
----------
x : (iterable of floats)
x-coordinate locations
y : (iterable of floats)
y-coordinate locations
names : (iterable of str)
(parameter) names of locations.
Returns
-------
sparse : pyemu.SparseMatrix
the sparse covariance matrix implied by this GeoStruct for the x,y pairs.
Example
-------
``>>>pp_df = pyemu.pp_utils.pp_file_to_dataframe("hkpp.dat")``
``>>>cov = gs.covariance_matrix(pp_df.x,pp_df.y,pp_df.name)``
"""
if not isinstance(x, np.ndarray):
x = np.array(x)
if not isinstance(y, np.ndarray):
y = np.array(y)
assert x.shape[0] == y.shape[0]
assert x.shape[0] == len(names)
iidx = [i for i in range(len(names))]
jidx = list(iidx)
data = list(np.zeros(x.shape[0])+self.nugget)
for v in self.variograms:
v.add_sparse_covariance_matrix(x,y,names,iidx,jidx,data)
coo = scipy.sparse.coo_matrix((data,(iidx,jidx)),shape=(len(names),len(names)))
coo.eliminate_zeros()
coo.sum_duplicates()
return SparseMatrix(coo,row_names=names,col_names=names) | [
"def",
"sparse_covariance_matrix",
"(",
"self",
",",
"x",
",",
"y",
",",
"names",
")",
":",
"if",
"not",
"isinstance",
"(",
"x",
",",
"np",
".",
"ndarray",
")",
":",
"x",
"=",
"np",
".",
"array",
"(",
"x",
")",
"if",
"not",
"isinstance",
"(",
"y"... | 33.767442 | 18.581395 |
def project_dir(self) -> str:
"""Generate a random path to project directory.
:return: Path to project.
:Example:
/home/sherika/Development/Falcon/mercenary
"""
dev_dir = self.dev_dir()
project = self.random.choice(PROJECT_NAMES)
return str(self._pathlib_home / dev_dir / project) | [
"def",
"project_dir",
"(",
"self",
")",
"->",
"str",
":",
"dev_dir",
"=",
"self",
".",
"dev_dir",
"(",
")",
"project",
"=",
"self",
".",
"random",
".",
"choice",
"(",
"PROJECT_NAMES",
")",
"return",
"str",
"(",
"self",
".",
"_pathlib_home",
"/",
"dev_d... | 30.909091 | 15.636364 |
def VerifyServerPEM(self, http_object):
"""Check the server PEM for validity.
This is used to determine connectivity to the server. Sometimes captive
portals return a valid HTTP status, but the data is corrupted.
Args:
http_object: The response received from the server.
Returns:
True if the response contains a valid server certificate.
"""
try:
server_pem = http_object.data
server_url = http_object.url
if b"BEGIN CERTIFICATE" in server_pem:
# Now we know that this proxy is working. We still have to verify the
# certificate. This will raise if the server cert is invalid.
server_certificate = rdf_crypto.RDFX509Cert(server_pem)
self.communicator.LoadServerCertificate(
server_certificate=server_certificate, ca_certificate=self.ca_cert)
logging.info("Server PEM re-keyed.")
return True
except Exception as e: # pylint: disable=broad-except
logging.info("Unable to verify server certificate at %s: %s", server_url,
e)
return False | [
"def",
"VerifyServerPEM",
"(",
"self",
",",
"http_object",
")",
":",
"try",
":",
"server_pem",
"=",
"http_object",
".",
"data",
"server_url",
"=",
"http_object",
".",
"url",
"if",
"b\"BEGIN CERTIFICATE\"",
"in",
"server_pem",
":",
"# Now we know that this proxy is w... | 35.5 | 23.566667 |
def key_value_convert(dictin, keyfn=lambda x: x, valuefn=lambda x: x, dropfailedkeys=False, dropfailedvalues=False,
exception=ValueError):
# type: (DictUpperBound, Callable[[Any], Any], Callable[[Any], Any], bool, bool, ExceptionUpperBound) -> Dict
"""Convert keys and/or values of dictionary using functions passed in as parameters
Args:
dictin (DictUpperBound): Input dictionary
keyfn (Callable[[Any], Any]): Function to convert keys. Defaults to lambda x: x
valuefn (Callable[[Any], Any]): Function to convert values. Defaults to lambda x: x
dropfailedkeys (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False.
dropfailedvalues (bool): Whether to drop dictionary entries where value conversion fails. Defaults to False.
exception (ExceptionUpperBound): The exception to expect if keyfn or valuefn fail. Defaults to ValueError.
Returns:
Dict: Dictionary with converted keys and/or values
"""
dictout = dict()
for key in dictin:
try:
new_key = keyfn(key)
except exception:
if dropfailedkeys:
continue
new_key = key
value = dictin[key]
try:
new_value = valuefn(value)
except exception:
if dropfailedvalues:
continue
new_value = value
dictout[new_key] = new_value
return dictout | [
"def",
"key_value_convert",
"(",
"dictin",
",",
"keyfn",
"=",
"lambda",
"x",
":",
"x",
",",
"valuefn",
"=",
"lambda",
"x",
":",
"x",
",",
"dropfailedkeys",
"=",
"False",
",",
"dropfailedvalues",
"=",
"False",
",",
"exception",
"=",
"ValueError",
")",
":"... | 42.5 | 27.264706 |
def _run_nb_cmd(self, cmd):
'''
cmd iterator
'''
try:
proc = salt.utils.nb_popen.NonBlockingPopen(
cmd,
shell=True,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
while True:
time.sleep(0.1)
out = proc.recv()
err = proc.recv_err()
rcode = proc.returncode
if out is None and err is None:
break
if err:
err = self.get_error(err)
yield out, err, rcode
except Exception:
yield ('', 'Unknown Error', None) | [
"def",
"_run_nb_cmd",
"(",
"self",
",",
"cmd",
")",
":",
"try",
":",
"proc",
"=",
"salt",
".",
"utils",
".",
"nb_popen",
".",
"NonBlockingPopen",
"(",
"cmd",
",",
"shell",
"=",
"True",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
",",
"stdout",
"="... | 29.782609 | 13.086957 |
def fragment(self, message):
"""Fragment message based on max payload size
note: if the message doesn't need to fragment,
it will return a list which only contains original
message itself.
:param message: raw message
:return: list of messages whose sizes <= max
payload size
"""
if message.message_type in [Types.CALL_RES,
Types.CALL_REQ,
Types.CALL_REQ_CONTINUE,
Types.CALL_RES_CONTINUE]:
rw = RW[message.message_type]
payload_space = (common.MAX_PAYLOAD_SIZE -
rw.length_no_args(message))
# split a call/request message into an array
# with a call/request message and {0~n} continue
# message
fragment_msg = message.fragment(payload_space)
self.generate_checksum(message)
yield message
while fragment_msg is not None:
message = fragment_msg
rw = RW[message.message_type]
payload_space = (common.MAX_PAYLOAD_SIZE -
rw.length_no_args(message))
fragment_msg = message.fragment(payload_space)
self.generate_checksum(message)
yield message
else:
yield message | [
"def",
"fragment",
"(",
"self",
",",
"message",
")",
":",
"if",
"message",
".",
"message_type",
"in",
"[",
"Types",
".",
"CALL_RES",
",",
"Types",
".",
"CALL_REQ",
",",
"Types",
".",
"CALL_REQ_CONTINUE",
",",
"Types",
".",
"CALL_RES_CONTINUE",
"]",
":",
... | 39.857143 | 15.114286 |
def _get_activation(self, F, inputs, activation, **kwargs):
"""Get activation function. Convert if is string"""
func = {'tanh': F.tanh,
'relu': F.relu,
'sigmoid': F.sigmoid,
'softsign': F.softsign}.get(activation)
if func:
return func(inputs, **kwargs)
elif isinstance(activation, string_types):
return F.Activation(inputs, act_type=activation, **kwargs)
elif isinstance(activation, LeakyReLU):
return F.LeakyReLU(inputs, act_type='leaky', slope=activation._alpha, **kwargs)
return activation(inputs, **kwargs) | [
"def",
"_get_activation",
"(",
"self",
",",
"F",
",",
"inputs",
",",
"activation",
",",
"*",
"*",
"kwargs",
")",
":",
"func",
"=",
"{",
"'tanh'",
":",
"F",
".",
"tanh",
",",
"'relu'",
":",
"F",
".",
"relu",
",",
"'sigmoid'",
":",
"F",
".",
"sigmo... | 48.461538 | 13.923077 |
def common_values_dict():
"""Build a basic values object used in every create method.
All our resources contain a same subset of value. Instead of
redoing this code everytime, this method ensures it is done only at
one place.
"""
now = datetime.datetime.utcnow().isoformat()
etag = utils.gen_etag()
values = {
'id': utils.gen_uuid(),
'created_at': now,
'updated_at': now,
'etag': etag
}
return values | [
"def",
"common_values_dict",
"(",
")",
":",
"now",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
".",
"isoformat",
"(",
")",
"etag",
"=",
"utils",
".",
"gen_etag",
"(",
")",
"values",
"=",
"{",
"'id'",
":",
"utils",
".",
"gen_uuid",
"(",... | 27.470588 | 20.058824 |
def submit(self, map, method, postfix):
'''Realiza um requisição HTTP para a networkAPI.
:param map: Dicionário com os dados para gerar o XML enviado no corpo da requisição HTTP.
:param method: Método da requisição HTTP ('GET', 'POST', 'PUT' ou 'DELETE').
:param postfix: Posfixo a ser colocado na URL básica de acesso à networkAPI. Ex: /ambiente
:return: Tupla com o código e o corpo da resposta HTTP:
(< codigo>, < descricao>)
:raise NetworkAPIClientError: Erro durante a chamada HTTP para acesso à networkAPI.
'''
try:
rest_request = RestRequest(
self.get_url(postfix),
method,
self.user,
self.password,
self.user_ldap)
return rest_request.submit(map)
except RestError as e:
raise ErrorHandler.handle(None, str(e)) | [
"def",
"submit",
"(",
"self",
",",
"map",
",",
"method",
",",
"postfix",
")",
":",
"try",
":",
"rest_request",
"=",
"RestRequest",
"(",
"self",
".",
"get_url",
"(",
"postfix",
")",
",",
"method",
",",
"self",
".",
"user",
",",
"self",
".",
"password"... | 40.863636 | 23.045455 |
def update_policy(self):
"""
Uses demonstration_buffer to update the policy.
"""
self.trainer_metrics.start_policy_update_timer(
number_experiences=len(self.training_buffer.update_buffer['actions']),
mean_return=float(np.mean(self.cumulative_returns_since_policy_update)))
n_sequences = max(int(self.trainer_parameters['batch_size'] / self.policy.sequence_length), 1)
value_total, policy_total, forward_total, inverse_total = [], [], [], []
advantages = self.training_buffer.update_buffer['advantages'].get_batch()
self.training_buffer.update_buffer['advantages'].set(
(advantages - advantages.mean()) / (advantages.std() + 1e-10))
num_epoch = self.trainer_parameters['num_epoch']
for _ in range(num_epoch):
self.training_buffer.update_buffer.shuffle()
buffer = self.training_buffer.update_buffer
for l in range(len(self.training_buffer.update_buffer['actions']) // n_sequences):
start = l * n_sequences
end = (l + 1) * n_sequences
run_out = self.policy.update(buffer.make_mini_batch(start, end), n_sequences)
value_total.append(run_out['value_loss'])
policy_total.append(np.abs(run_out['policy_loss']))
if self.use_curiosity:
inverse_total.append(run_out['inverse_loss'])
forward_total.append(run_out['forward_loss'])
self.stats['Losses/Value Loss'].append(np.mean(value_total))
self.stats['Losses/Policy Loss'].append(np.mean(policy_total))
if self.use_curiosity:
self.stats['Losses/Forward Loss'].append(np.mean(forward_total))
self.stats['Losses/Inverse Loss'].append(np.mean(inverse_total))
self.training_buffer.reset_update_buffer()
self.trainer_metrics.end_policy_update() | [
"def",
"update_policy",
"(",
"self",
")",
":",
"self",
".",
"trainer_metrics",
".",
"start_policy_update_timer",
"(",
"number_experiences",
"=",
"len",
"(",
"self",
".",
"training_buffer",
".",
"update_buffer",
"[",
"'actions'",
"]",
")",
",",
"mean_return",
"="... | 59.375 | 23.375 |
def take_screen_shot_to_array(self, screen_id, width, height, bitmap_format):
"""Takes a guest screen shot of the requested size and format
and returns it as an array of bytes.
in screen_id of type int
The guest monitor to take screenshot from.
in width of type int
Desired image width.
in height of type int
Desired image height.
in bitmap_format of type :class:`BitmapFormat`
The requested format.
return screen_data of type str
Array with resulting screen data.
"""
if not isinstance(screen_id, baseinteger):
raise TypeError("screen_id can only be an instance of type baseinteger")
if not isinstance(width, baseinteger):
raise TypeError("width can only be an instance of type baseinteger")
if not isinstance(height, baseinteger):
raise TypeError("height can only be an instance of type baseinteger")
if not isinstance(bitmap_format, BitmapFormat):
raise TypeError("bitmap_format can only be an instance of type BitmapFormat")
screen_data = self._call("takeScreenShotToArray",
in_p=[screen_id, width, height, bitmap_format])
return screen_data | [
"def",
"take_screen_shot_to_array",
"(",
"self",
",",
"screen_id",
",",
"width",
",",
"height",
",",
"bitmap_format",
")",
":",
"if",
"not",
"isinstance",
"(",
"screen_id",
",",
"baseinteger",
")",
":",
"raise",
"TypeError",
"(",
"\"screen_id can only be an instan... | 40.709677 | 20.645161 |
def send_keysequence_window_up(self, window, keysequence, delay=12000):
"""Send key release (up) events for the given key sequence"""
_libxdo.xdo_send_keysequence_window_up(
self._xdo, window, keysequence, ctypes.c_ulong(delay)) | [
"def",
"send_keysequence_window_up",
"(",
"self",
",",
"window",
",",
"keysequence",
",",
"delay",
"=",
"12000",
")",
":",
"_libxdo",
".",
"xdo_send_keysequence_window_up",
"(",
"self",
".",
"_xdo",
",",
"window",
",",
"keysequence",
",",
"ctypes",
".",
"c_ulo... | 63.25 | 16 |
def merged():
# type: () -> None
""" Cleanup a remotely merged branch. """
develop = conf.get('git.devel_branch', 'develop')
master = conf.get('git.master_branch', 'master')
branch = git.current_branch(refresh=True)
common.assert_branch_type('hotfix')
# Pull master with the merged hotfix
common.git_checkout(master)
common.git_pull(master)
# Merge to develop
common.git_checkout(develop)
common.git_pull(develop)
common.git_merge(develop, branch.name)
# Cleanup
common.git_branch_delete(branch.name)
common.git_prune()
common.git_checkout(master) | [
"def",
"merged",
"(",
")",
":",
"# type: () -> None",
"develop",
"=",
"conf",
".",
"get",
"(",
"'git.devel_branch'",
",",
"'develop'",
")",
"master",
"=",
"conf",
".",
"get",
"(",
"'git.master_branch'",
",",
"'master'",
")",
"branch",
"=",
"git",
".",
"cur... | 26 | 17.086957 |
def write_float(self, value, little_endian=True):
"""
Pack the value as a float and write 4 bytes to the stream.
Args:
value (number): the value to write to the stream.
little_endian (bool): specify the endianness. (Default) Little endian.
Returns:
int: the number of bytes written.
"""
if little_endian:
endian = "<"
else:
endian = ">"
return self.pack('%sf' % endian, value) | [
"def",
"write_float",
"(",
"self",
",",
"value",
",",
"little_endian",
"=",
"True",
")",
":",
"if",
"little_endian",
":",
"endian",
"=",
"\"<\"",
"else",
":",
"endian",
"=",
"\">\"",
"return",
"self",
".",
"pack",
"(",
"'%sf'",
"%",
"endian",
",",
"val... | 30.4375 | 19.6875 |
def get_capacity_grav(self, min_voltage=None, max_voltage=None,
use_overall_normalization=True):
"""
Get the gravimetric capacity of the electrode.
Args:
min_voltage (float): The minimum allowable voltage for a given
step.
max_voltage (float): The maximum allowable voltage allowable for a
given step.
use_overall_normalization (booL): If False, normalize by the
discharged state of only the voltage pairs matching the voltage
criteria. if True, use default normalization of the full
electrode path.
Returns:
Gravimetric capacity in mAh/g across the insertion path (a subset
of the path can be chosen by the optional arguments).
"""
pairs_in_range = self._select_in_voltage_range(min_voltage,
max_voltage)
normalization_mass = self.normalization_mass \
if use_overall_normalization or len(pairs_in_range) == 0 \
else pairs_in_range[-1].mass_discharge
return sum([pair.mAh for pair in pairs_in_range]) / normalization_mass | [
"def",
"get_capacity_grav",
"(",
"self",
",",
"min_voltage",
"=",
"None",
",",
"max_voltage",
"=",
"None",
",",
"use_overall_normalization",
"=",
"True",
")",
":",
"pairs_in_range",
"=",
"self",
".",
"_select_in_voltage_range",
"(",
"min_voltage",
",",
"max_voltag... | 48.32 | 24.4 |
def concat(ctx, *strings):
'''
Yields one string, concatenation of argument strings
'''
strings = flatten([ (s.compute(ctx) if callable(s) else s) for s in strings ])
strings = (next(string_arg(ctx, s), '') for s in strings)
#assert(all(map(lambda x: isinstance(x, str), strings)))
#FIXME: Check arg types
yield ''.join(strings) | [
"def",
"concat",
"(",
"ctx",
",",
"*",
"strings",
")",
":",
"strings",
"=",
"flatten",
"(",
"[",
"(",
"s",
".",
"compute",
"(",
"ctx",
")",
"if",
"callable",
"(",
"s",
")",
"else",
"s",
")",
"for",
"s",
"in",
"strings",
"]",
")",
"strings",
"="... | 39.111111 | 22.888889 |
def get_auth_providers(self, netloc):
"""BIG-IQ specific query for auth providers
BIG-IP doesn't really need this because BIG-IP's multiple auth providers
seem to handle fallthrough just fine. BIG-IQ on the other hand, needs to
have its auth provider specified if you're using one of the non-default
ones.
:param netloc:
:return:
"""
url = "https://%s/info/system?null" % (netloc)
response = requests.get(url, verify=self.verify)
if not response.ok or not hasattr(response, "json"):
error_message = '%s Unexpected Error: %s for uri: %s\nText: %r' %\
(response.status_code,
response.reason,
response.url,
response.text)
raise iControlUnexpectedHTTPError(error_message, response=response)
respJson = response.json()
result = respJson['providers']
return result | [
"def",
"get_auth_providers",
"(",
"self",
",",
"netloc",
")",
":",
"url",
"=",
"\"https://%s/info/system?null\"",
"%",
"(",
"netloc",
")",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"verify",
"=",
"self",
".",
"verify",
")",
"if",
"not",
"... | 41.208333 | 20.208333 |
def locked_get(self):
"""Retrieve the credentials from the dictionary, if they exist.
Returns: A :class:`oauth2client.client.OAuth2Credentials` instance.
"""
serialized = self._dictionary.get(self._key)
if serialized is None:
return None
credentials = client.OAuth2Credentials.from_json(serialized)
credentials.set_store(self)
return credentials | [
"def",
"locked_get",
"(",
"self",
")",
":",
"serialized",
"=",
"self",
".",
"_dictionary",
".",
"get",
"(",
"self",
".",
"_key",
")",
"if",
"serialized",
"is",
"None",
":",
"return",
"None",
"credentials",
"=",
"client",
".",
"OAuth2Credentials",
".",
"f... | 29.428571 | 21.428571 |
def parse_option(self, option, block_name, *values):
""" Parse domain values for option.
"""
_extra_subs = ('www', 'm', 'mobile')
if len(values) == 0: # expect some values here..
raise ValueError
for value in values:
value = value.lower()
# if it doesn't look like a protocol, assume http
# (e.g. only domain supplied)
if not _RE_PROTOCOL.match(value):
value = 'http://' + value
# did it parse? pull hostname/domain
parsed = urlparse.urlparse(value)
if parsed:
domain = parsed.hostname
if domain and _RE_TLD.search(domain): # must have a TLD
# doesn't have subdomain, tack on www, m, and mobile
# for good measure. note, this check fails for
# multi-part TLDs, e.g. .co.uk
domain = _RE_WWW_SUB.sub('', domain) # strip "www."
if len(domain.split('.')) == 2:
for sub in _extra_subs:
self.domains.add('{0}.{1}'.format(sub, domain))
self.domains.add(domain)
# no domains.. must have failed
if not self.domains:
raise ValueError | [
"def",
"parse_option",
"(",
"self",
",",
"option",
",",
"block_name",
",",
"*",
"values",
")",
":",
"_extra_subs",
"=",
"(",
"'www'",
",",
"'m'",
",",
"'mobile'",
")",
"if",
"len",
"(",
"values",
")",
"==",
"0",
":",
"# expect some values here..",
"raise... | 35.75 | 18.25 |
def headers_to_sign(self, http_request):
"""
Select the headers from the request that need to be included
in the StringToSign.
"""
headers_to_sign = {}
headers_to_sign = {'Host' : self.host}
for name, value in http_request.headers.items():
lname = name.lower()
if lname.startswith('x-amz'):
headers_to_sign[name] = value
return headers_to_sign | [
"def",
"headers_to_sign",
"(",
"self",
",",
"http_request",
")",
":",
"headers_to_sign",
"=",
"{",
"}",
"headers_to_sign",
"=",
"{",
"'Host'",
":",
"self",
".",
"host",
"}",
"for",
"name",
",",
"value",
"in",
"http_request",
".",
"headers",
".",
"items",
... | 36.333333 | 8.166667 |
def send(self, data):
"""
Sends a packet of data through this connection mode.
This method returns a coroutine.
"""
if not self._connected:
raise ConnectionError('Not connected')
return self._send_queue.put(data) | [
"def",
"send",
"(",
"self",
",",
"data",
")",
":",
"if",
"not",
"self",
".",
"_connected",
":",
"raise",
"ConnectionError",
"(",
"'Not connected'",
")",
"return",
"self",
".",
"_send_queue",
".",
"put",
"(",
"data",
")"
] | 26.5 | 13.9 |
def errorhandler(self, code_or_exception):
"""
Register a function to handle errors by code or exception class.
A decorator that is used to register a function given an
error code. Example::
@app.errorhandler(404)
def page_not_found(error):
return 'This page does not exist', 404
You can also register handlers for arbitrary exceptions::
@app.errorhandler(DatabaseError)
def special_exception_handler(error):
return 'Database connection failed', 500
:param code_or_exception: the code as integer for the handler, or
an arbitrary exception
"""
def decorator(fn):
self._defer(lambda app: app.register_error_handler(code_or_exception, fn))
return fn
return decorator | [
"def",
"errorhandler",
"(",
"self",
",",
"code_or_exception",
")",
":",
"def",
"decorator",
"(",
"fn",
")",
":",
"self",
".",
"_defer",
"(",
"lambda",
"app",
":",
"app",
".",
"register_error_handler",
"(",
"code_or_exception",
",",
"fn",
")",
")",
"return"... | 35.666667 | 20.333333 |
def get_version():
"""Get version from git and VERSION file.
In the case where the version is not tagged in git, this function appends
.post0+commit if the version has been released and .dev0+commit if the
version has not yet been released.
Derived from: https://github.com/Changaco/version.py
"""
d = os.path.dirname(__file__)
# get release number from VERSION
with open(os.path.join(d, 'VERSION')) as f:
vre = re.compile('.Version: (.+)$', re.M)
version = vre.search(f.read()).group(1)
if os.path.isdir(os.path.join(d, '.git')):
# Get the version using "git describe".
cmd = 'git describe --tags'
try:
git_version = check_output(cmd.split()).decode().strip()[1:]
except CalledProcessError:
print('Unable to get version number from git tags\n'
'Setting to x.x')
git_version = 'x.x'
# PEP440 compatibility
if '-' in git_version:
git_revision = check_output(['git', 'rev-parse', 'HEAD'])
git_revision = git_revision.strip().decode('ascii')
# add post0 if the version is released
# otherwise add dev0 if the version is not yet released
if ISRELEASED:
version += '.post0+' + git_revision[:7]
else:
version += '.dev0+' + git_revision[:7]
return version | [
"def",
"get_version",
"(",
")",
":",
"d",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
"# get release number from VERSION",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"d",
",",
"'VERSION'",
")",
")",
"as",
"f",
":",
"v... | 37.378378 | 17.918919 |
def _debug_dump_dom(el):
"""Debugging helper. Prints out `el` contents."""
import xml.dom.minidom
s = [el.nodeName]
att_container = el.attributes
for i in range(att_container.length):
attr = att_container.item(i)
s.append(' @{a}="{v}"'.format(a=attr.name, v=attr.value))
for c in el.childNodes:
if c.nodeType == xml.dom.minidom.Node.TEXT_NODE:
s.append(' {a} type="TEXT" data="{d}"'.format(a=c.nodeName, d=c.data))
else:
s.append(' {a} child'.format(a=c.nodeName))
return '\n'.join(s) | [
"def",
"_debug_dump_dom",
"(",
"el",
")",
":",
"import",
"xml",
".",
"dom",
".",
"minidom",
"s",
"=",
"[",
"el",
".",
"nodeName",
"]",
"att_container",
"=",
"el",
".",
"attributes",
"for",
"i",
"in",
"range",
"(",
"att_container",
".",
"length",
")",
... | 39.857143 | 15.642857 |
def to_string(input_):
"""Format an input for representation as text
This method is just a convenience that handles default LaTeX formatting
"""
usetex = rcParams['text.usetex']
if isinstance(input_, units.UnitBase):
return input_.to_string('latex_inline')
if isinstance(input_, (float, int)) and usetex:
return tex.float_to_latex(input_)
if usetex:
return tex.label_to_latex(input_)
return str(input_) | [
"def",
"to_string",
"(",
"input_",
")",
":",
"usetex",
"=",
"rcParams",
"[",
"'text.usetex'",
"]",
"if",
"isinstance",
"(",
"input_",
",",
"units",
".",
"UnitBase",
")",
":",
"return",
"input_",
".",
"to_string",
"(",
"'latex_inline'",
")",
"if",
"isinstan... | 34.384615 | 12.538462 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.