repo stringlengths 7 54 | path stringlengths 4 192 | url stringlengths 87 284 | code stringlengths 78 104k | code_tokens list | docstring stringlengths 1 46.9k | docstring_tokens list | language stringclasses 1
value | partition stringclasses 3
values |
|---|---|---|---|---|---|---|---|---|
tk0miya/tk.phpautodoc | src/phply/phpparse.py | https://github.com/tk0miya/tk.phpautodoc/blob/cf789f64abaf76351485cee231a075227e665fb6/src/phply/phpparse.py#L1207-L1212 | def p_common_scalar_magic_dir(p):
'common_scalar : DIR'
value = getattr(p.lexer, 'filename', None)
if value is not None:
value = os.path.dirname(value)
p[0] = ast.MagicConstant(p[1].upper(), value, lineno=p.lineno(1)) | [
"def",
"p_common_scalar_magic_dir",
"(",
"p",
")",
":",
"value",
"=",
"getattr",
"(",
"p",
".",
"lexer",
",",
"'filename'",
",",
"None",
")",
"if",
"value",
"is",
"not",
"None",
":",
"value",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"value",
")",... | common_scalar : DIR | [
"common_scalar",
":",
"DIR"
] | python | train |
codeinn/vcs | vcs/utils/__init__.py | https://github.com/codeinn/vcs/blob/e6cd94188e9c36d273411bf3adc0584ac6ab92a0/vcs/utils/__init__.py#L110-L152 | def safe_str(unicode_, to_encoding=None):
"""
safe str function. Does few trick to turn unicode_ into string
In case of UnicodeEncodeError we try to return it with encoding detected
by chardet library if it fails fallback to string with errors replaced
:param unicode_: unicode to encode
:rtype: str
:returns: str object
"""
# if it's not basestr cast to str
if not isinstance(unicode_, basestring):
return str(unicode_)
if isinstance(unicode_, str):
return unicode_
if not to_encoding:
from vcs.conf import settings
to_encoding = settings.DEFAULT_ENCODINGS
if not isinstance(to_encoding, (list, tuple)):
to_encoding = [to_encoding]
for enc in to_encoding:
try:
return unicode_.encode(enc)
except UnicodeEncodeError:
pass
try:
import chardet
encoding = chardet.detect(unicode_)['encoding']
if encoding is None:
raise UnicodeEncodeError()
return unicode_.encode(encoding)
except (ImportError, UnicodeEncodeError):
return unicode_.encode(to_encoding[0], 'replace')
return safe_str | [
"def",
"safe_str",
"(",
"unicode_",
",",
"to_encoding",
"=",
"None",
")",
":",
"# if it's not basestr cast to str",
"if",
"not",
"isinstance",
"(",
"unicode_",
",",
"basestring",
")",
":",
"return",
"str",
"(",
"unicode_",
")",
"if",
"isinstance",
"(",
"unicod... | safe str function. Does few trick to turn unicode_ into string
In case of UnicodeEncodeError we try to return it with encoding detected
by chardet library if it fails fallback to string with errors replaced
:param unicode_: unicode to encode
:rtype: str
:returns: str object | [
"safe",
"str",
"function",
".",
"Does",
"few",
"trick",
"to",
"turn",
"unicode_",
"into",
"string"
] | python | train |
stephrdev/django-formwizard | formwizard/views.py | https://github.com/stephrdev/django-formwizard/blob/7b35165f0340aae4e8302d5b05b0cb443f6c9904/formwizard/views.py#L590-L635 | def get(self, *args, **kwargs):
"""
This renders the form or, if needed, does the http redirects.
"""
step_url = kwargs.get('step', None)
if step_url is None:
if 'reset' in self.request.GET:
self.storage.reset()
self.storage.current_step = self.steps.first
if self.request.GET:
query_string = "?%s" % self.request.GET.urlencode()
else:
query_string = ""
next_step_url = reverse(self.url_name, kwargs={
'step': self.steps.current,
}) + query_string
return redirect(next_step_url)
# is the current step the "done" name/view?
elif step_url == self.done_step_name:
last_step = self.steps.last
return self.render_done(self.get_form(step=last_step,
data=self.storage.get_step_data(last_step),
files=self.storage.get_step_files(last_step)
), **kwargs)
# is the url step name not equal to the step in the storage?
# if yes, change the step in the storage (if name exists)
elif step_url == self.steps.current:
# URL step name and storage step name are equal, render!
return self.render(self.get_form(
data=self.storage.current_step_data,
files=self.storage.current_step_data,
), **kwargs)
elif step_url in self.get_form_list():
self.storage.current_step = step_url
return self.render(self.get_form(
data=self.storage.current_step_data,
files=self.storage.current_step_data,
), **kwargs)
# invalid step name, reset to first and redirect.
else:
self.storage.current_step = self.steps.first
return redirect(self.url_name, step=self.steps.first) | [
"def",
"get",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"step_url",
"=",
"kwargs",
".",
"get",
"(",
"'step'",
",",
"None",
")",
"if",
"step_url",
"is",
"None",
":",
"if",
"'reset'",
"in",
"self",
".",
"request",
".",
"GET"... | This renders the form or, if needed, does the http redirects. | [
"This",
"renders",
"the",
"form",
"or",
"if",
"needed",
"does",
"the",
"http",
"redirects",
"."
] | python | train |
tensorflow/probability | tensorflow_probability/examples/sprites_dataset.py | https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/sprites_dataset.py#L113-L118 | def read_image(filepath):
"""Returns an image tensor."""
im_bytes = tf.io.read_file(filepath)
im = tf.image.decode_image(im_bytes, channels=CHANNELS)
im = tf.image.convert_image_dtype(im, tf.float32)
return im | [
"def",
"read_image",
"(",
"filepath",
")",
":",
"im_bytes",
"=",
"tf",
".",
"io",
".",
"read_file",
"(",
"filepath",
")",
"im",
"=",
"tf",
".",
"image",
".",
"decode_image",
"(",
"im_bytes",
",",
"channels",
"=",
"CHANNELS",
")",
"im",
"=",
"tf",
"."... | Returns an image tensor. | [
"Returns",
"an",
"image",
"tensor",
"."
] | python | test |
Alignak-monitoring/alignak | alignak/objects/timeperiod.py | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/timeperiod.py#L252-L272 | def get_raw_import_values(self): # pragma: no cover, deprecation
"""
Get some properties of timeperiod (timeperiod is a bit different
from classic item)
TODO: never called anywhere, still useful?
:return: a dictionnary of some properties
:rtype: dict
"""
properties = ['timeperiod_name', 'alias', 'use', 'register']
res = {}
for prop in properties:
if hasattr(self, prop):
val = getattr(self, prop)
res[prop] = val
# Now the unresolved one. The only way to get ride of same key things is to put
# directly the full value as the key
for other in self.unresolved:
res[other] = ''
return res | [
"def",
"get_raw_import_values",
"(",
"self",
")",
":",
"# pragma: no cover, deprecation",
"properties",
"=",
"[",
"'timeperiod_name'",
",",
"'alias'",
",",
"'use'",
",",
"'register'",
"]",
"res",
"=",
"{",
"}",
"for",
"prop",
"in",
"properties",
":",
"if",
"ha... | Get some properties of timeperiod (timeperiod is a bit different
from classic item)
TODO: never called anywhere, still useful?
:return: a dictionnary of some properties
:rtype: dict | [
"Get",
"some",
"properties",
"of",
"timeperiod",
"(",
"timeperiod",
"is",
"a",
"bit",
"different",
"from",
"classic",
"item",
")"
] | python | train |
ipfs/py-ipfs-api | ipfsapi/client.py | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/client.py#L1158-L1199 | def pin_verify(self, path, *paths, **kwargs):
"""Verify that recursive pins are complete.
Scan the repo for pinned object graphs and check their integrity.
Issues will be reported back with a helpful human-readable error
message to aid in error recovery. This is useful to help recover
from datastore corruptions (such as when accidentally deleting
files added using the filestore backend).
This function returns an iterator needs to be closed using a context
manager (``with``-statement) or using the ``.close()`` method.
.. code-block:: python
>>> with c.pin_verify("QmN…TTZ", verbose=True) as pin_verify_iter:
... for item in pin_verify_iter:
... print(item)
...
{"Cid":"QmVkNdzCBukBRdpyFiKPyL2R15qPExMr9rV9RFV2kf9eeV","Ok":True}
{"Cid":"QmbPzQruAEFjUU3gQfupns6b8USr8VrD9H71GrqGDXQSxm","Ok":True}
{"Cid":"Qmcns1nUvbeWiecdGDPw8JxWeUfxCV8JKhTfgzs3F8JM4P","Ok":True}
…
Parameters
----------
path : str
Path to object(s) to be checked
verbose : bool
Also report status of items that were OK? (Default: ``False``)
Returns
-------
iterable
"""
#PY2: No support for kw-only parameters after glob parameters
if "verbose" in kwargs:
kwargs.setdefault("opts", {"verbose": kwargs["verbose"]})
del kwargs["verbose"]
args = (path,) + paths
return self._client.request('/pin/verify', args, decoder='json',
stream=True, **kwargs) | [
"def",
"pin_verify",
"(",
"self",
",",
"path",
",",
"*",
"paths",
",",
"*",
"*",
"kwargs",
")",
":",
"#PY2: No support for kw-only parameters after glob parameters",
"if",
"\"verbose\"",
"in",
"kwargs",
":",
"kwargs",
".",
"setdefault",
"(",
"\"opts\"",
",",
"{"... | Verify that recursive pins are complete.
Scan the repo for pinned object graphs and check their integrity.
Issues will be reported back with a helpful human-readable error
message to aid in error recovery. This is useful to help recover
from datastore corruptions (such as when accidentally deleting
files added using the filestore backend).
This function returns an iterator needs to be closed using a context
manager (``with``-statement) or using the ``.close()`` method.
.. code-block:: python
>>> with c.pin_verify("QmN…TTZ", verbose=True) as pin_verify_iter:
... for item in pin_verify_iter:
... print(item)
...
{"Cid":"QmVkNdzCBukBRdpyFiKPyL2R15qPExMr9rV9RFV2kf9eeV","Ok":True}
{"Cid":"QmbPzQruAEFjUU3gQfupns6b8USr8VrD9H71GrqGDXQSxm","Ok":True}
{"Cid":"Qmcns1nUvbeWiecdGDPw8JxWeUfxCV8JKhTfgzs3F8JM4P","Ok":True}
…
Parameters
----------
path : str
Path to object(s) to be checked
verbose : bool
Also report status of items that were OK? (Default: ``False``)
Returns
-------
iterable | [
"Verify",
"that",
"recursive",
"pins",
"are",
"complete",
"."
] | python | train |
mitsei/dlkit | dlkit/services/learning.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/learning.py#L1986-L1994 | def use_plenary_activity_view(self):
"""Pass through to provider ActivityLookupSession.use_plenary_activity_view"""
self._object_views['activity'] = PLENARY
# self._get_provider_session('activity_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_plenary_activity_view()
except AttributeError:
pass | [
"def",
"use_plenary_activity_view",
"(",
"self",
")",
":",
"self",
".",
"_object_views",
"[",
"'activity'",
"]",
"=",
"PLENARY",
"# self._get_provider_session('activity_lookup_session') # To make sure the session is tracked",
"for",
"session",
"in",
"self",
".",
"_get_provide... | Pass through to provider ActivityLookupSession.use_plenary_activity_view | [
"Pass",
"through",
"to",
"provider",
"ActivityLookupSession",
".",
"use_plenary_activity_view"
] | python | train |
log2timeline/plaso | plaso/parsers/bsm.py | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/bsm.py#L530-L551 | def _FormatSubjectOrProcessToken(self, token_data):
"""Formats a subject or process token as a dictionary of values.
Args:
token_data (bsm_token_data_subject32|bsm_token_data_subject64):
AUT_SUBJECT32, AUT_PROCESS32, AUT_SUBJECT64 or AUT_PROCESS64 token
data.
Returns:
dict[str, str]: token values.
"""
ip_address = self._FormatPackedIPv4Address(token_data.ip_address)
return {
'aid': token_data.audit_user_identifier,
'euid': token_data.effective_user_identifier,
'egid': token_data.effective_group_identifier,
'uid': token_data.real_user_identifier,
'gid': token_data.real_group_identifier,
'pid': token_data.process_identifier,
'session_id': token_data.session_identifier,
'terminal_port': token_data.terminal_port,
'terminal_ip': ip_address} | [
"def",
"_FormatSubjectOrProcessToken",
"(",
"self",
",",
"token_data",
")",
":",
"ip_address",
"=",
"self",
".",
"_FormatPackedIPv4Address",
"(",
"token_data",
".",
"ip_address",
")",
"return",
"{",
"'aid'",
":",
"token_data",
".",
"audit_user_identifier",
",",
"'... | Formats a subject or process token as a dictionary of values.
Args:
token_data (bsm_token_data_subject32|bsm_token_data_subject64):
AUT_SUBJECT32, AUT_PROCESS32, AUT_SUBJECT64 or AUT_PROCESS64 token
data.
Returns:
dict[str, str]: token values. | [
"Formats",
"a",
"subject",
"or",
"process",
"token",
"as",
"a",
"dictionary",
"of",
"values",
"."
] | python | train |
onnx/onnxmltools | onnxutils/onnxconverter_common/container.py | https://github.com/onnx/onnxmltools/blob/d4e4c31990fc2d9fd1f92139f497d360914c9df2/onnxutils/onnxconverter_common/container.py#L203-L215 | def add_initializer(self, name, onnx_type, shape, content):
'''
Add a TensorProto into the initializer list of the final ONNX model
:param name: Variable name in the produced ONNX model.
:param onnx_type: Element types allowed in ONNX tensor, e.g., TensorProto.FLOAT and TensorProto.STRING.
:param shape: Tensor shape, a list of integers.
:param content: Flattened tensor values (i.e., a float list or a float array).
'''
if any(d is None for d in shape):
raise ValueError('Shape of initializer cannot contain None')
tensor = helper.make_tensor(name, onnx_type, shape, content)
self.initializers.append(tensor) | [
"def",
"add_initializer",
"(",
"self",
",",
"name",
",",
"onnx_type",
",",
"shape",
",",
"content",
")",
":",
"if",
"any",
"(",
"d",
"is",
"None",
"for",
"d",
"in",
"shape",
")",
":",
"raise",
"ValueError",
"(",
"'Shape of initializer cannot contain None'",
... | Add a TensorProto into the initializer list of the final ONNX model
:param name: Variable name in the produced ONNX model.
:param onnx_type: Element types allowed in ONNX tensor, e.g., TensorProto.FLOAT and TensorProto.STRING.
:param shape: Tensor shape, a list of integers.
:param content: Flattened tensor values (i.e., a float list or a float array). | [
"Add",
"a",
"TensorProto",
"into",
"the",
"initializer",
"list",
"of",
"the",
"final",
"ONNX",
"model"
] | python | train |
i3visio/entify | entify/lib/patterns/regexp.py | https://github.com/i3visio/entify/blob/51c5b89cebee3a39d44d0918e2798739361f337c/entify/lib/patterns/regexp.py#L74-L95 | def getResults(self, parFound = None):
'''
Function to obtain the Dictionarythat represents this object.
:param parFound: values to return.
:return: The output format will be like:
[{"type" : "i3visio.email", "value": "foo@bar.com", "attributes": [] }, {"type" : "i3visio.email", "value": "bar@foo.com", "attributes": [] }]
'''
# Defining a dictionary
results = []
# Defining a dictionary inside with a couple of fields: reg_exp for the regular expression and found_exp for the expressions found.
#results[self.name] = {"reg_exp" : self.reg_exp, "found_exp" : parFound}
#results[self.name] = parFound
if len(parFound ) >0:
for found in parFound:
aux = {}
aux["type"] = self.name
aux["value"] = found
aux["attributes"] = self.getAttributes(found)
results.append(aux)
return results | [
"def",
"getResults",
"(",
"self",
",",
"parFound",
"=",
"None",
")",
":",
"# Defining a dictionary",
"results",
"=",
"[",
"]",
"# Defining a dictionary inside with a couple of fields: reg_exp for the regular expression and found_exp for the expressions found.",
"#results[self.name] =... | Function to obtain the Dictionarythat represents this object.
:param parFound: values to return.
:return: The output format will be like:
[{"type" : "i3visio.email", "value": "foo@bar.com", "attributes": [] }, {"type" : "i3visio.email", "value": "bar@foo.com", "attributes": [] }] | [
"Function",
"to",
"obtain",
"the",
"Dictionarythat",
"represents",
"this",
"object",
".",
":",
"param",
"parFound",
":",
"values",
"to",
"return",
"."
] | python | train |
scott-griffiths/bitstring | bitstring.py | https://github.com/scott-griffiths/bitstring/blob/ab40ae7f0b43fe223a39b63cbc0529b09f3ef653/bitstring.py#L1330-L1333 | def _setbytes_unsafe(self, data, length, offset):
"""Unchecked version of _setbytes_safe."""
self._datastore = ByteStore(data[:], length, offset)
assert self._assertsanity() | [
"def",
"_setbytes_unsafe",
"(",
"self",
",",
"data",
",",
"length",
",",
"offset",
")",
":",
"self",
".",
"_datastore",
"=",
"ByteStore",
"(",
"data",
"[",
":",
"]",
",",
"length",
",",
"offset",
")",
"assert",
"self",
".",
"_assertsanity",
"(",
")"
] | Unchecked version of _setbytes_safe. | [
"Unchecked",
"version",
"of",
"_setbytes_safe",
"."
] | python | train |
SiLab-Bonn/basil | basil/utils/sim/Protocol.py | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/utils/sim/Protocol.py#L57-L62 | def send(self, obj):
"""Prepend a 4-byte length to the string"""
assert isinstance(obj, ProtocolBase)
string = pickle.dumps(obj)
length = len(string)
self.sock.sendall(struct.pack("<I", length) + string) | [
"def",
"send",
"(",
"self",
",",
"obj",
")",
":",
"assert",
"isinstance",
"(",
"obj",
",",
"ProtocolBase",
")",
"string",
"=",
"pickle",
".",
"dumps",
"(",
"obj",
")",
"length",
"=",
"len",
"(",
"string",
")",
"self",
".",
"sock",
".",
"sendall",
"... | Prepend a 4-byte length to the string | [
"Prepend",
"a",
"4",
"-",
"byte",
"length",
"to",
"the",
"string"
] | python | train |
Bystroushaak/pyDHTMLParser | src/dhtmlparser/htmlelement/html_parser.py | https://github.com/Bystroushaak/pyDHTMLParser/blob/4756f93dd048500b038ece2323fe26e46b6bfdea/src/dhtmlparser/htmlelement/html_parser.py#L197-L205 | def _parseIsComment(self):
"""
Detect whether the element is HTML comment or not.
Result is saved to the :attr:`_iscomment` property.
"""
self._iscomment = (
self._element.startswith("<!--") and self._element.endswith("-->")
) | [
"def",
"_parseIsComment",
"(",
"self",
")",
":",
"self",
".",
"_iscomment",
"=",
"(",
"self",
".",
"_element",
".",
"startswith",
"(",
"\"<!--\"",
")",
"and",
"self",
".",
"_element",
".",
"endswith",
"(",
"\"-->\"",
")",
")"
] | Detect whether the element is HTML comment or not.
Result is saved to the :attr:`_iscomment` property. | [
"Detect",
"whether",
"the",
"element",
"is",
"HTML",
"comment",
"or",
"not",
"."
] | python | train |
ArabellaTech/aa-intercom | aa_intercom/tasks.py | https://github.com/ArabellaTech/aa-intercom/blob/f7e2ab63967529660f9c2fe4f1d0bf3cec1502c2/aa_intercom/tasks.py#L113-L127 | def push_not_registered_user_data_task(data):
"""
Async: push_not_registered_user_data_task.apply_async(args=[data], countdown=100)
"""
lock_id = "%s-push-not-registered-user-data-task-%s" % (settings.ENV_PREFIX, data["email"])
acquire_lock = lambda: cache.add(lock_id, "true", LOCK_EXPIRE) # noqa: E731
release_lock = lambda: cache.delete(lock_id) # noqa: E731
if acquire_lock():
try:
upload_not_registered_user_data(data)
except (KeyError, NotImplementedError, MultipleMatchingUsersError):
release_lock()
raise
release_lock() | [
"def",
"push_not_registered_user_data_task",
"(",
"data",
")",
":",
"lock_id",
"=",
"\"%s-push-not-registered-user-data-task-%s\"",
"%",
"(",
"settings",
".",
"ENV_PREFIX",
",",
"data",
"[",
"\"email\"",
"]",
")",
"acquire_lock",
"=",
"lambda",
":",
"cache",
".",
... | Async: push_not_registered_user_data_task.apply_async(args=[data], countdown=100) | [
"Async",
":",
"push_not_registered_user_data_task",
".",
"apply_async",
"(",
"args",
"=",
"[",
"data",
"]",
"countdown",
"=",
"100",
")"
] | python | train |
rflamary/POT | ot/optim.py | https://github.com/rflamary/POT/blob/c5108efc7b6702e1af3928bef1032e6b37734d1c/ot/optim.py#L18-L72 | def line_search_armijo(f, xk, pk, gfk, old_fval,
args=(), c1=1e-4, alpha0=0.99):
"""
Armijo linesearch function that works with matrices
find an approximate minimum of f(xk+alpha*pk) that satifies the
armijo conditions.
Parameters
----------
f : function
loss function
xk : np.ndarray
initial position
pk : np.ndarray
descent direction
gfk : np.ndarray
gradient of f at xk
old_fval : float
loss value at xk
args : tuple, optional
arguments given to f
c1 : float, optional
c1 const in armijo rule (>0)
alpha0 : float, optional
initial step (>0)
Returns
-------
alpha : float
step that satisfy armijo conditions
fc : int
nb of function call
fa : float
loss value at step alpha
"""
xk = np.atleast_1d(xk)
fc = [0]
def phi(alpha1):
fc[0] += 1
return f(xk + alpha1 * pk, *args)
if old_fval is None:
phi0 = phi(0.)
else:
phi0 = old_fval
derphi0 = np.sum(pk * gfk) # Quickfix for matrices
alpha, phi1 = scalar_search_armijo(
phi, phi0, derphi0, c1=c1, alpha0=alpha0)
return alpha, fc[0], phi1 | [
"def",
"line_search_armijo",
"(",
"f",
",",
"xk",
",",
"pk",
",",
"gfk",
",",
"old_fval",
",",
"args",
"=",
"(",
")",
",",
"c1",
"=",
"1e-4",
",",
"alpha0",
"=",
"0.99",
")",
":",
"xk",
"=",
"np",
".",
"atleast_1d",
"(",
"xk",
")",
"fc",
"=",
... | Armijo linesearch function that works with matrices
find an approximate minimum of f(xk+alpha*pk) that satifies the
armijo conditions.
Parameters
----------
f : function
loss function
xk : np.ndarray
initial position
pk : np.ndarray
descent direction
gfk : np.ndarray
gradient of f at xk
old_fval : float
loss value at xk
args : tuple, optional
arguments given to f
c1 : float, optional
c1 const in armijo rule (>0)
alpha0 : float, optional
initial step (>0)
Returns
-------
alpha : float
step that satisfy armijo conditions
fc : int
nb of function call
fa : float
loss value at step alpha | [
"Armijo",
"linesearch",
"function",
"that",
"works",
"with",
"matrices"
] | python | train |
StackStorm/pybind | pybind/slxos/v17s_1_02/routing_system/interface/ve/ip/interface_vlan_ospf_conf/__init__.py | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/routing_system/interface/ve/ip/interface_vlan_ospf_conf/__init__.py#L92-L113 | def _set_ospf1(self, v, load=False):
"""
Setter method for ospf1, mapped from YANG variable /routing_system/interface/ve/ip/interface_vlan_ospf_conf/ospf1 (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ospf1 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ospf1() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=ospf1.ospf1, is_container='container', presence=False, yang_name="ospf1", rest_name="ospf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Open Shortest Path First (OSPF).', u'cli-incomplete-no': None, u'display-when': u'/vcsmode/vcs-mode = "false"', u'sort-priority': u'130', u'alt-name': u'ospf'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ospf1 must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=ospf1.ospf1, is_container='container', presence=False, yang_name="ospf1", rest_name="ospf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Open Shortest Path First (OSPF).', u'cli-incomplete-no': None, u'display-when': u'/vcsmode/vcs-mode = "false"', u'sort-priority': u'130', u'alt-name': u'ospf'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)""",
})
self.__ospf1 = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_ospf1",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
... | Setter method for ospf1, mapped from YANG variable /routing_system/interface/ve/ip/interface_vlan_ospf_conf/ospf1 (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ospf1 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ospf1() directly. | [
"Setter",
"method",
"for",
"ospf1",
"mapped",
"from",
"YANG",
"variable",
"/",
"routing_system",
"/",
"interface",
"/",
"ve",
"/",
"ip",
"/",
"interface_vlan_ospf_conf",
"/",
"ospf1",
"(",
"container",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only"... | python | train |
BreakingBytes/UncertaintyWrapper | uncertainty_wrapper/core.py | https://github.com/BreakingBytes/UncertaintyWrapper/blob/b2431588fb6c1cf6f2a54e2afc9bfa8e10067bd0/uncertainty_wrapper/core.py#L133-L292 | def unc_wrapper_args(*covariance_keys):
"""
Wrap function, calculate its Jacobian and calculate the covariance of the
outputs given the covariance of the specified inputs.
:param covariance_keys: indices and names of arguments corresponding to
covariance
:return: wrapped function bound to specified covariance keys
This is the outer uncertainty wrapper that allows you to specify the
arguments in the original function that correspond to the covariance. The
inner wrapper takes the original function to be wrapped. ::
def f(a, b, c, d, kw1='foo', *args, **kwargs):
pass
# arguments a, c, d and kw1 correspond to the covariance matrix
f_wrapped = unc_wrapper_args(0, 2, 3, 'kw1')(f)
cov = np.array([[0.0001, 0., 0., 0.], [0., 0.0001, 0., 0.],
[0., 0., 0.0001, 0.], [0., 0., 0., 0.0001])
y, cov, jac = f_wrapped(a, b, c, d, kw1='bar', __covariance__=cov)
The covariance keys can be indices of positional arguments or the names of
keywords argument used in calling the function. If no covariance keys are
specified then the arguments that correspond to the covariance shoud be
grouped into a sequence. If ``None`` is anywhere in ``covariance_keys`` then
all of the arguments will be used to calculate the Jacobian.
The covariance matrix must be a symmetrical matrix with positive numbers on
the diagonal that correspond to the square of the standard deviation, second
moment around the mean or root-mean-square(RMS) of the function with respect
to the arguments specified as covariance keys. The other elements are the
covariances corresponding to the arguments intersecting at that element.
Pass the covariance matrix with the keyword ``__covariance__`` and it will
be popped from the dictionary of keyword arguments provided to the wrapped
function.
The wrapped function will return the evaluation of the original function,
its Jacobian, which is the sensitivity of the return output to each
argument specified as a covariance key and the covariance propagated using
the first order terms of a Taylor series expansion around the arguments.
An optional keyword argument ``__method__`` can also be passed to the
wrapped function (not the wrapper) that specifies the method used to
calculate the dot product. The default method is ``'loop'``. The other
methods are ``'dense'``, ``'sparse'`` and ``'pool'``.
If the arguments specified as covariance keys are arrays, they should all be
the same size. These dimensions will be considered as separate observations.
Another argument, not in the covariance keys, may also create observations.
The resulting Jacobian will have dimensions of number of observations (nobs)
by number of return output (nf) by number of covariance keys (nargs). The
resulting covariance will be nobs x nf x nf.
"""
def wrapper(f):
@wraps(f)
def wrapped_function(*args, **kwargs):
cov = kwargs.pop('__covariance__', None) # pop covariance
method = kwargs.pop('__method__', 'loop') # pop covariance
# covariance keys cannot be defaults, they must be in args or kwargs
cov_keys = covariance_keys
# convert args to kwargs by index
kwargs.update({n: v for n, v in enumerate(args)})
args = () # empty args
if None in cov_keys:
# use all keys
cov_keys = kwargs.keys()
# group covariance keys
if len(cov_keys) > 0:
# uses specified keys
x = [np.atleast_1d(kwargs.pop(k)) for k in cov_keys]
else:
# arguments already grouped
x = kwargs.pop(0) # use first argument
# remaining args
args_dict = {}
def args_from_kwargs(kwargs_):
"""unpack positional arguments from keyword arguments"""
# create mapping of positional arguments by index
args_ = [(n, v) for n, v in kwargs_.iteritems()
if not isinstance(n, basestring)]
# sort positional arguments by index
idx, args_ = zip(*sorted(args_, key=lambda m: m[0]))
# remove args_ and their indices from kwargs_
args_dict_ = {n: kwargs_.pop(n) for n in idx}
return args_, args_dict_
if kwargs:
args, args_dict = args_from_kwargs(kwargs)
def f_(x_, *args_, **kwargs_):
"""call original function with independent variables grouped"""
args_dict_ = args_dict
if cov_keys:
kwargs_.update(zip(cov_keys, x_), **args_dict_)
if kwargs_:
args_, _ = args_from_kwargs(kwargs_)
return np.array(f(*args_, **kwargs_))
# assumes independent variables already grouped
return f(x_, *args_, **kwargs_)
# evaluate function and Jacobian
avg = f_(x, *args, **kwargs)
# number of returns and observations
if avg.ndim > 1:
nf, nobs = avg.shape
else:
nf, nobs = avg.size, 1
jac = jacobian(f_, x, nf, nobs, *args, **kwargs)
# calculate covariance
if cov is not None:
# covariance must account for all observations
# scale covariances by x squared in each direction
if cov.ndim == 3:
x = np.array([np.repeat(y, nobs) if len(y)==1
else y for y in x])
LOGGER.debug('x:\n%r', x)
cov = np.array([c * y * np.row_stack(y)
for c, y in zip(cov, x.T)])
else: # x are all only one dimension
x = np.asarray(x)
cov = cov * x * x.T
assert jac.size / nf / nobs == cov.size / len(x)
cov = np.tile(cov, (nobs, 1, 1))
# propagate uncertainty using different methods
if method.lower() == 'dense':
j, c = jflatten(jac), jflatten(cov)
cov = prop_unc((j, c))
# sparse
elif method.lower() == 'sparse':
j, c = jtosparse(jac), jtosparse(cov)
cov = j.dot(c).dot(j.transpose())
cov = cov.todense()
# pool
elif method.lower() == 'pool':
try:
p = Pool()
cov = np.array(p.map(prop_unc, zip(jac, cov)))
finally:
p.terminate()
# loop is the default
else:
cov = np.array([prop_unc((jac[o], cov[o]))
for o in xrange(nobs)])
# dense and spares are flattened, unravel them into 3-D list of
# observations
if method.lower() in ['dense', 'sparse']:
cov = np.array([
cov[(nf * o):(nf * (o + 1)), (nf * o):(nf * (o + 1))]
for o in xrange(nobs)
])
# unpack returns for original function with ungrouped arguments
if None in cov_keys or len(cov_keys) > 0:
return tuple(avg.tolist() + [cov, jac])
# independent variables were already grouped
return avg, cov, jac
return wrapped_function
return wrapper | [
"def",
"unc_wrapper_args",
"(",
"*",
"covariance_keys",
")",
":",
"def",
"wrapper",
"(",
"f",
")",
":",
"@",
"wraps",
"(",
"f",
")",
"def",
"wrapped_function",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"cov",
"=",
"kwargs",
".",
"pop",
"... | Wrap function, calculate its Jacobian and calculate the covariance of the
outputs given the covariance of the specified inputs.
:param covariance_keys: indices and names of arguments corresponding to
covariance
:return: wrapped function bound to specified covariance keys
This is the outer uncertainty wrapper that allows you to specify the
arguments in the original function that correspond to the covariance. The
inner wrapper takes the original function to be wrapped. ::
def f(a, b, c, d, kw1='foo', *args, **kwargs):
pass
# arguments a, c, d and kw1 correspond to the covariance matrix
f_wrapped = unc_wrapper_args(0, 2, 3, 'kw1')(f)
cov = np.array([[0.0001, 0., 0., 0.], [0., 0.0001, 0., 0.],
[0., 0., 0.0001, 0.], [0., 0., 0., 0.0001])
y, cov, jac = f_wrapped(a, b, c, d, kw1='bar', __covariance__=cov)
The covariance keys can be indices of positional arguments or the names of
keywords argument used in calling the function. If no covariance keys are
specified then the arguments that correspond to the covariance shoud be
grouped into a sequence. If ``None`` is anywhere in ``covariance_keys`` then
all of the arguments will be used to calculate the Jacobian.
The covariance matrix must be a symmetrical matrix with positive numbers on
the diagonal that correspond to the square of the standard deviation, second
moment around the mean or root-mean-square(RMS) of the function with respect
to the arguments specified as covariance keys. The other elements are the
covariances corresponding to the arguments intersecting at that element.
Pass the covariance matrix with the keyword ``__covariance__`` and it will
be popped from the dictionary of keyword arguments provided to the wrapped
function.
The wrapped function will return the evaluation of the original function,
its Jacobian, which is the sensitivity of the return output to each
argument specified as a covariance key and the covariance propagated using
the first order terms of a Taylor series expansion around the arguments.
An optional keyword argument ``__method__`` can also be passed to the
wrapped function (not the wrapper) that specifies the method used to
calculate the dot product. The default method is ``'loop'``. The other
methods are ``'dense'``, ``'sparse'`` and ``'pool'``.
If the arguments specified as covariance keys are arrays, they should all be
the same size. These dimensions will be considered as separate observations.
Another argument, not in the covariance keys, may also create observations.
The resulting Jacobian will have dimensions of number of observations (nobs)
by number of return output (nf) by number of covariance keys (nargs). The
resulting covariance will be nobs x nf x nf. | [
"Wrap",
"function",
"calculate",
"its",
"Jacobian",
"and",
"calculate",
"the",
"covariance",
"of",
"the",
"outputs",
"given",
"the",
"covariance",
"of",
"the",
"specified",
"inputs",
".",
":",
"param",
"covariance_keys",
":",
"indices",
"and",
"names",
"of",
"... | python | train |
vaexio/vaex | packages/vaex-arrow/vaex_arrow/export.py | https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-arrow/vaex_arrow/export.py#L36-L108 | def _export_table(dataset, column_names=None, byteorder="=", shuffle=False, selection=False, progress=None, virtual=True, sort=None, ascending=True):
"""
:param DatasetLocal dataset: dataset to export
:param str path: path for file
:param lis[str] column_names: list of column names to export or None for all columns
:param str byteorder: = for native, < for little endian and > for big endian
:param bool shuffle: export rows in random order
:param bool selection: export selection or not
:param progress: progress callback that gets a progress fraction as argument and should return True to continue,
or a default progress bar when progress=True
:param: bool virtual: When True, export virtual columns
:return:
"""
column_names = column_names or dataset.get_column_names(virtual=virtual, strings=True)
for name in column_names:
if name not in dataset.columns:
warnings.warn('Exporting to arrow with virtual columns is not efficient')
N = len(dataset) if not selection else dataset.selected_length(selection)
if N == 0:
raise ValueError("Cannot export empty table")
if shuffle and sort:
raise ValueError("Cannot shuffle and sort at the same time")
if shuffle:
random_index_column = "random_index"
while random_index_column in dataset.get_column_names():
random_index_column += "_new"
partial_shuffle = shuffle and len(dataset) != N
order_array = None
if partial_shuffle:
# if we only export a portion, we need to create the full length random_index array, and
shuffle_array_full = np.random.choice(len(dataset), len(dataset), replace=False)
# then take a section of it
# shuffle_array[:] = shuffle_array_full[:N]
shuffle_array = shuffle_array_full[shuffle_array_full < N]
del shuffle_array_full
order_array = shuffle_array
elif shuffle:
shuffle_array = np.random.choice(N, N, replace=False)
order_array = shuffle_array
if sort:
if selection:
raise ValueError("sorting selections not yet supported")
logger.info("sorting...")
indices = np.argsort(dataset.evaluate(sort))
order_array = indices if ascending else indices[::-1]
logger.info("sorting done")
if selection:
full_mask = dataset.evaluate_selection_mask(selection)
else:
full_mask = None
arrow_arrays = []
for column_name in column_names:
mask = full_mask
if selection:
values = dataset.evaluate(column_name, filtered=False)
values = values[mask]
else:
values = dataset.evaluate(column_name)
if shuffle or sort:
indices = order_array
values = values[indices]
arrow_arrays.append(arrow_array_from_numpy_array(values))
if shuffle:
arrow_arrays.append(arrow_array_from_numpy_array(order_array))
column_names = column_names + [random_index_column]
table = pa.Table.from_arrays(arrow_arrays, column_names)
return table | [
"def",
"_export_table",
"(",
"dataset",
",",
"column_names",
"=",
"None",
",",
"byteorder",
"=",
"\"=\"",
",",
"shuffle",
"=",
"False",
",",
"selection",
"=",
"False",
",",
"progress",
"=",
"None",
",",
"virtual",
"=",
"True",
",",
"sort",
"=",
"None",
... | :param DatasetLocal dataset: dataset to export
:param str path: path for file
:param lis[str] column_names: list of column names to export or None for all columns
:param str byteorder: = for native, < for little endian and > for big endian
:param bool shuffle: export rows in random order
:param bool selection: export selection or not
:param progress: progress callback that gets a progress fraction as argument and should return True to continue,
or a default progress bar when progress=True
:param: bool virtual: When True, export virtual columns
:return: | [
":",
"param",
"DatasetLocal",
"dataset",
":",
"dataset",
"to",
"export",
":",
"param",
"str",
"path",
":",
"path",
"for",
"file",
":",
"param",
"lis",
"[",
"str",
"]",
"column_names",
":",
"list",
"of",
"column",
"names",
"to",
"export",
"or",
"None",
... | python | test |
bcbio/bcbio-nextgen | bcbio/variation/vcfutils.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L140-L149 | def get_indelcaller(d_or_c):
"""Retrieve string for indelcaller to use, or empty string if not specified.
"""
config = d_or_c if isinstance(d_or_c, dict) and "config" in d_or_c else d_or_c
indelcaller = config["algorithm"].get("indelcaller", "")
if not indelcaller:
indelcaller = ""
if isinstance(indelcaller, (list, tuple)):
indelcaller = indelcaller[0] if (len(indelcaller) > 0) else ""
return indelcaller | [
"def",
"get_indelcaller",
"(",
"d_or_c",
")",
":",
"config",
"=",
"d_or_c",
"if",
"isinstance",
"(",
"d_or_c",
",",
"dict",
")",
"and",
"\"config\"",
"in",
"d_or_c",
"else",
"d_or_c",
"indelcaller",
"=",
"config",
"[",
"\"algorithm\"",
"]",
".",
"get",
"("... | Retrieve string for indelcaller to use, or empty string if not specified. | [
"Retrieve",
"string",
"for",
"indelcaller",
"to",
"use",
"or",
"empty",
"string",
"if",
"not",
"specified",
"."
] | python | train |
PythonCharmers/python-future | src/future/backports/urllib/request.py | https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/urllib/request.py#L889-L901 | def is_suburi(self, base, test):
"""Check if test is below base in a URI tree
Both args must be URIs in reduced form.
"""
if base == test:
return True
if base[0] != test[0]:
return False
common = posixpath.commonprefix((base[1], test[1]))
if len(common) == len(base[1]):
return True
return False | [
"def",
"is_suburi",
"(",
"self",
",",
"base",
",",
"test",
")",
":",
"if",
"base",
"==",
"test",
":",
"return",
"True",
"if",
"base",
"[",
"0",
"]",
"!=",
"test",
"[",
"0",
"]",
":",
"return",
"False",
"common",
"=",
"posixpath",
".",
"commonprefix... | Check if test is below base in a URI tree
Both args must be URIs in reduced form. | [
"Check",
"if",
"test",
"is",
"below",
"base",
"in",
"a",
"URI",
"tree"
] | python | train |
MillionIntegrals/vel | vel/api/info.py | https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/api/info.py#L203-L210 | def on_epoch_end(self):
""" Finish epoch processing """
self.freeze_epoch_result()
for callback in self.callbacks:
callback.on_epoch_end(self)
self.training_info.history.add(self.result) | [
"def",
"on_epoch_end",
"(",
"self",
")",
":",
"self",
".",
"freeze_epoch_result",
"(",
")",
"for",
"callback",
"in",
"self",
".",
"callbacks",
":",
"callback",
".",
"on_epoch_end",
"(",
"self",
")",
"self",
".",
"training_info",
".",
"history",
".",
"add",... | Finish epoch processing | [
"Finish",
"epoch",
"processing"
] | python | train |
nvbn/thefuck | thefuck/utils.py | https://github.com/nvbn/thefuck/blob/40ab4eb62db57627bff10cf029d29c94704086a2/thefuck/utils.py#L325-L337 | def format_raw_script(raw_script):
"""Creates single script from a list of script parts.
:type raw_script: [basestring]
:rtype: basestring
"""
if six.PY2:
script = ' '.join(arg.decode('utf-8') for arg in raw_script)
else:
script = ' '.join(raw_script)
return script.strip() | [
"def",
"format_raw_script",
"(",
"raw_script",
")",
":",
"if",
"six",
".",
"PY2",
":",
"script",
"=",
"' '",
".",
"join",
"(",
"arg",
".",
"decode",
"(",
"'utf-8'",
")",
"for",
"arg",
"in",
"raw_script",
")",
"else",
":",
"script",
"=",
"' '",
".",
... | Creates single script from a list of script parts.
:type raw_script: [basestring]
:rtype: basestring | [
"Creates",
"single",
"script",
"from",
"a",
"list",
"of",
"script",
"parts",
"."
] | python | train |
Unidata/MetPy | metpy/calc/tools.py | https://github.com/Unidata/MetPy/blob/16f68a94919b9a82dcf9cada2169cf039129e67b/metpy/calc/tools.py#L101-L173 | def find_intersections(x, a, b, direction='all'):
"""Calculate the best estimate of intersection.
Calculates the best estimates of the intersection of two y-value
data sets that share a common x-value set.
Parameters
----------
x : array-like
1-dimensional array of numeric x-values
a : array-like
1-dimensional array of y-values for line 1
b : array-like
1-dimensional array of y-values for line 2
direction : string, optional
specifies direction of crossing. 'all', 'increasing' (a becoming greater than b),
or 'decreasing' (b becoming greater than a). Defaults to 'all'.
Returns
-------
A tuple (x, y) of array-like with the x and y coordinates of the
intersections of the lines.
"""
# Find the index of the points just before the intersection(s)
nearest_idx = nearest_intersection_idx(a, b)
next_idx = nearest_idx + 1
# Determine the sign of the change
sign_change = np.sign(a[next_idx] - b[next_idx])
# x-values around each intersection
_, x0 = _next_non_masked_element(x, nearest_idx)
_, x1 = _next_non_masked_element(x, next_idx)
# y-values around each intersection for the first line
_, a0 = _next_non_masked_element(a, nearest_idx)
_, a1 = _next_non_masked_element(a, next_idx)
# y-values around each intersection for the second line
_, b0 = _next_non_masked_element(b, nearest_idx)
_, b1 = _next_non_masked_element(b, next_idx)
# Calculate the x-intersection. This comes from finding the equations of the two lines,
# one through (x0, a0) and (x1, a1) and the other through (x0, b0) and (x1, b1),
# finding their intersection, and reducing with a bunch of algebra.
delta_y0 = a0 - b0
delta_y1 = a1 - b1
intersect_x = (delta_y1 * x0 - delta_y0 * x1) / (delta_y1 - delta_y0)
# Calculate the y-intersection of the lines. Just plug the x above into the equation
# for the line through the a points. One could solve for y like x above, but this
# causes weirder unit behavior and seems a little less good numerically.
intersect_y = ((intersect_x - x0) / (x1 - x0)) * (a1 - a0) + a0
# If there's no intersections, return
if len(intersect_x) == 0:
return intersect_x, intersect_y
# Check for duplicates
duplicate_mask = (np.ediff1d(intersect_x, to_end=1) != 0)
# Make a mask based on the direction of sign change desired
if direction == 'increasing':
mask = sign_change > 0
elif direction == 'decreasing':
mask = sign_change < 0
elif direction == 'all':
return intersect_x[duplicate_mask], intersect_y[duplicate_mask]
else:
raise ValueError('Unknown option for direction: {0}'.format(str(direction)))
return intersect_x[mask & duplicate_mask], intersect_y[mask & duplicate_mask] | [
"def",
"find_intersections",
"(",
"x",
",",
"a",
",",
"b",
",",
"direction",
"=",
"'all'",
")",
":",
"# Find the index of the points just before the intersection(s)",
"nearest_idx",
"=",
"nearest_intersection_idx",
"(",
"a",
",",
"b",
")",
"next_idx",
"=",
"nearest_... | Calculate the best estimate of intersection.
Calculates the best estimates of the intersection of two y-value
data sets that share a common x-value set.
Parameters
----------
x : array-like
1-dimensional array of numeric x-values
a : array-like
1-dimensional array of y-values for line 1
b : array-like
1-dimensional array of y-values for line 2
direction : string, optional
specifies direction of crossing. 'all', 'increasing' (a becoming greater than b),
or 'decreasing' (b becoming greater than a). Defaults to 'all'.
Returns
-------
A tuple (x, y) of array-like with the x and y coordinates of the
intersections of the lines. | [
"Calculate",
"the",
"best",
"estimate",
"of",
"intersection",
"."
] | python | train |
shaldengeki/python-mal | myanimelist/user.py | https://github.com/shaldengeki/python-mal/blob/2c3356411a74d88ba13f6b970388040d696f8392/myanimelist/user.py#L584-L609 | def load_reviews(self):
"""Fetches the MAL user reviews page and sets the current user's reviews attributes.
:rtype: :class:`.User`
:return: Current user object.
"""
page = 0
# collect all reviews over all pages.
review_collection = []
while True:
user_reviews = self.session.session.get(u'http://myanimelist.net/profile/' + utilities.urlencode(self.username) + u'/reviews&' + urllib.urlencode({u'p': page})).text
parse_result = self.parse_reviews(utilities.get_clean_dom(user_reviews))
if page == 0:
# only set attributes once the first time around.
self.set(parse_result)
if len(parse_result[u'reviews']) == 0:
break
review_collection.append(parse_result[u'reviews'])
page += 1
# merge the review collections into one review dict, and set it.
self.set({
'reviews': {k: v for d in review_collection for k,v in d.iteritems()}
})
return self | [
"def",
"load_reviews",
"(",
"self",
")",
":",
"page",
"=",
"0",
"# collect all reviews over all pages.",
"review_collection",
"=",
"[",
"]",
"while",
"True",
":",
"user_reviews",
"=",
"self",
".",
"session",
".",
"session",
".",
"get",
"(",
"u'http://myanimelist... | Fetches the MAL user reviews page and sets the current user's reviews attributes.
:rtype: :class:`.User`
:return: Current user object. | [
"Fetches",
"the",
"MAL",
"user",
"reviews",
"page",
"and",
"sets",
"the",
"current",
"user",
"s",
"reviews",
"attributes",
"."
] | python | train |
CxAalto/gtfspy | gtfspy/import_loaders/table_loader.py | https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/import_loaders/table_loader.py#L375-L392 | def copy(cls, conn, **where):
"""Copy data from one table to another while filtering data at the same time
Parameters
----------
conn: sqlite3 DB connection. It must have a second database
attached as "other".
**where : keyword arguments
specifying (start_ut and end_ut for filtering, see the copy_where clause in the subclasses)
"""
cur = conn.cursor()
if where and cls.copy_where:
copy_where = cls.copy_where.format(**where)
# print(copy_where)
else:
copy_where = ''
cur.execute('INSERT INTO %s '
'SELECT * FROM source.%s %s' % (cls.table, cls.table, copy_where)) | [
"def",
"copy",
"(",
"cls",
",",
"conn",
",",
"*",
"*",
"where",
")",
":",
"cur",
"=",
"conn",
".",
"cursor",
"(",
")",
"if",
"where",
"and",
"cls",
".",
"copy_where",
":",
"copy_where",
"=",
"cls",
".",
"copy_where",
".",
"format",
"(",
"*",
"*",... | Copy data from one table to another while filtering data at the same time
Parameters
----------
conn: sqlite3 DB connection. It must have a second database
attached as "other".
**where : keyword arguments
specifying (start_ut and end_ut for filtering, see the copy_where clause in the subclasses) | [
"Copy",
"data",
"from",
"one",
"table",
"to",
"another",
"while",
"filtering",
"data",
"at",
"the",
"same",
"time"
] | python | valid |
rwl/pylon | pylon/io/psat.py | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/io/psat.py#L438-L454 | def push_pv(self, tokens):
""" Creates and Generator object, populates it with data, finds its Bus
and adds it.
"""
logger.debug("Pushing PV data: %s" % tokens)
bus = self.case.buses[tokens["bus_no"]-1]
g = Generator(bus)
g.p = tokens["p"]
g.q_max = tokens["q_max"]
g.q_min = tokens["q_min"]
# Optional parameter
# if tokens.has_key("status"):
# g.online = tokens["status"]
self.case.generators.append(g) | [
"def",
"push_pv",
"(",
"self",
",",
"tokens",
")",
":",
"logger",
".",
"debug",
"(",
"\"Pushing PV data: %s\"",
"%",
"tokens",
")",
"bus",
"=",
"self",
".",
"case",
".",
"buses",
"[",
"tokens",
"[",
"\"bus_no\"",
"]",
"-",
"1",
"]",
"g",
"=",
"Genera... | Creates and Generator object, populates it with data, finds its Bus
and adds it. | [
"Creates",
"and",
"Generator",
"object",
"populates",
"it",
"with",
"data",
"finds",
"its",
"Bus",
"and",
"adds",
"it",
"."
] | python | train |
royi1000/py-libhdate | hdate/date.py | https://github.com/royi1000/py-libhdate/blob/12af759fb69f1d6403abed3762beaf5ace16a34b/hdate/date.py#L91-L95 | def hdate(self):
"""Return the hebrew date."""
if self._last_updated == "hdate":
return self._hdate
return conv.jdn_to_hdate(self._jdn) | [
"def",
"hdate",
"(",
"self",
")",
":",
"if",
"self",
".",
"_last_updated",
"==",
"\"hdate\"",
":",
"return",
"self",
".",
"_hdate",
"return",
"conv",
".",
"jdn_to_hdate",
"(",
"self",
".",
"_jdn",
")"
] | Return the hebrew date. | [
"Return",
"the",
"hebrew",
"date",
"."
] | python | train |
awslabs/sockeye | sockeye/utils.py | https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/utils.py#L395-L430 | def print_attention_text(attention_matrix: np.ndarray, source_tokens: List[str], target_tokens: List[str],
threshold: float):
"""
Prints the attention matrix to standard out.
:param attention_matrix: The attention matrix.
:param source_tokens: A list of source tokens.
:param target_tokens: A list of target tokens.
:param threshold: The threshold for including an alignment link in the result.
"""
sys.stdout.write(" ")
for _ in target_tokens:
sys.stdout.write("---")
sys.stdout.write("\n")
for i, f_i in enumerate(source_tokens): # type: ignore
sys.stdout.write(" |")
for j in range(len(target_tokens)):
align_prob = attention_matrix[j, i]
if align_prob > threshold:
sys.stdout.write("(*)")
elif align_prob > 0.4:
sys.stdout.write("(?)")
else:
sys.stdout.write(" ")
sys.stdout.write(" | %s\n" % f_i)
sys.stdout.write(" ")
for _ in target_tokens:
sys.stdout.write("---")
sys.stdout.write("\n")
for k in range(max(map(len, target_tokens))):
sys.stdout.write(" ")
for word in target_tokens:
letter = word[k] if len(word) > k else " "
sys.stdout.write(" %s " % letter)
sys.stdout.write("\n")
sys.stdout.write("\n") | [
"def",
"print_attention_text",
"(",
"attention_matrix",
":",
"np",
".",
"ndarray",
",",
"source_tokens",
":",
"List",
"[",
"str",
"]",
",",
"target_tokens",
":",
"List",
"[",
"str",
"]",
",",
"threshold",
":",
"float",
")",
":",
"sys",
".",
"stdout",
"."... | Prints the attention matrix to standard out.
:param attention_matrix: The attention matrix.
:param source_tokens: A list of source tokens.
:param target_tokens: A list of target tokens.
:param threshold: The threshold for including an alignment link in the result. | [
"Prints",
"the",
"attention",
"matrix",
"to",
"standard",
"out",
"."
] | python | train |
bcbio/bcbio-nextgen | bcbio/qc/qualimap.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/qualimap.py#L161-L172 | def _bed_to_bed6(orig_file, out_dir):
"""Convert bed to required bed6 inputs.
"""
bed6_file = os.path.join(out_dir, "%s-bed6%s" % os.path.splitext(os.path.basename(orig_file)))
if not utils.file_exists(bed6_file):
with open(bed6_file, "w") as out_handle:
for i, region in enumerate(list(x) for x in pybedtools.BedTool(orig_file)):
region = [x for x in list(region) if x]
fillers = [str(i), "1.0", "+"]
full = region + fillers[:6 - len(region)]
out_handle.write("\t".join(full) + "\n")
return bed6_file | [
"def",
"_bed_to_bed6",
"(",
"orig_file",
",",
"out_dir",
")",
":",
"bed6_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"out_dir",
",",
"\"%s-bed6%s\"",
"%",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"orig_file... | Convert bed to required bed6 inputs. | [
"Convert",
"bed",
"to",
"required",
"bed6",
"inputs",
"."
] | python | train |
UDST/orca | orca/server/server.py | https://github.com/UDST/orca/blob/07b34aeef13cc87c966b2e30cbe7e76cc9d3622c/orca/server/server.py#L290-L314 | def column_definition(table_name, col_name):
"""
Get the source of a column function.
If a column is a registered Series and not a function then all that is
returned is {'type': 'series'}.
If the column is a registered function then the JSON returned has keys
"type", "filename", "lineno", "text", and "html". "text" is the raw
text of the function, "html" has been marked up by Pygments.
"""
col_type = orca.get_table(table_name).column_type(col_name)
if col_type != 'function':
return jsonify(type=col_type)
filename, lineno, source = \
orca.get_raw_column(table_name, col_name).func_source_data()
html = highlight(source, PythonLexer(), HtmlFormatter())
return jsonify(
type='function', filename=filename, lineno=lineno, text=source,
html=html) | [
"def",
"column_definition",
"(",
"table_name",
",",
"col_name",
")",
":",
"col_type",
"=",
"orca",
".",
"get_table",
"(",
"table_name",
")",
".",
"column_type",
"(",
"col_name",
")",
"if",
"col_type",
"!=",
"'function'",
":",
"return",
"jsonify",
"(",
"type"... | Get the source of a column function.
If a column is a registered Series and not a function then all that is
returned is {'type': 'series'}.
If the column is a registered function then the JSON returned has keys
"type", "filename", "lineno", "text", and "html". "text" is the raw
text of the function, "html" has been marked up by Pygments. | [
"Get",
"the",
"source",
"of",
"a",
"column",
"function",
"."
] | python | train |
glue-viz/glue-vispy-viewers | glue_vispy_viewers/extern/vispy/ext/egl.py | https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/ext/egl.py#L259-L265 | def eglQueryString(display, name):
""" Query string from display
"""
out = _lib.eglQueryString(display, name)
if not out:
raise RuntimeError('Could not query %s' % name)
return out | [
"def",
"eglQueryString",
"(",
"display",
",",
"name",
")",
":",
"out",
"=",
"_lib",
".",
"eglQueryString",
"(",
"display",
",",
"name",
")",
"if",
"not",
"out",
":",
"raise",
"RuntimeError",
"(",
"'Could not query %s'",
"%",
"name",
")",
"return",
"out"
] | Query string from display | [
"Query",
"string",
"from",
"display"
] | python | train |
JonLiuFYI/pkdx | pkdx/pkdx/main.py | https://github.com/JonLiuFYI/pkdx/blob/269e9814df074e0df25972fad04539a644d73a3c/pkdx/pkdx/main.py#L60-L70 | def get_move_data(move):
"""Return the index number for the given move name. Check moves.json in the same directory."""
srcpath = path.dirname(__file__)
try:
f = open(path.join(srcpath, 'moves.json'), 'r')
except IOError:
get_moves()
f = open(path.join(srcpath, 'moves.json'), 'r')
finally:
with f:
return json.load(f)[move] | [
"def",
"get_move_data",
"(",
"move",
")",
":",
"srcpath",
"=",
"path",
".",
"dirname",
"(",
"__file__",
")",
"try",
":",
"f",
"=",
"open",
"(",
"path",
".",
"join",
"(",
"srcpath",
",",
"'moves.json'",
")",
",",
"'r'",
")",
"except",
"IOError",
":",
... | Return the index number for the given move name. Check moves.json in the same directory. | [
"Return",
"the",
"index",
"number",
"for",
"the",
"given",
"move",
"name",
".",
"Check",
"moves",
".",
"json",
"in",
"the",
"same",
"directory",
"."
] | python | train |
jazzband/sorl-thumbnail | sorl/thumbnail/engines/base.py | https://github.com/jazzband/sorl-thumbnail/blob/22ccd9781462a820f963f57018ad3dcef85053ed/sorl/thumbnail/engines/base.py#L119-L125 | def blur(self, image, geometry, options):
"""
Wrapper for ``_blur``
"""
if options.get('blur'):
return self._blur(image, int(options.get('blur')))
return image | [
"def",
"blur",
"(",
"self",
",",
"image",
",",
"geometry",
",",
"options",
")",
":",
"if",
"options",
".",
"get",
"(",
"'blur'",
")",
":",
"return",
"self",
".",
"_blur",
"(",
"image",
",",
"int",
"(",
"options",
".",
"get",
"(",
"'blur'",
")",
"... | Wrapper for ``_blur`` | [
"Wrapper",
"for",
"_blur"
] | python | train |
numenta/nupic | src/nupic/math/topology.py | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/math/topology.py#L81-L119 | def neighborhood(centerIndex, radius, dimensions):
"""
Get the points in the neighborhood of a point.
A point's neighborhood is the n-dimensional hypercube with sides ranging
[center - radius, center + radius], inclusive. For example, if there are two
dimensions and the radius is 3, the neighborhood is 6x6. Neighborhoods are
truncated when they are near an edge.
This is designed to be fast. In C++ it's fastest to iterate through neighbors
one by one, calculating them on-demand rather than creating a list of them.
But in Python it's faster to build up the whole list in batch via a few calls
to C code rather than calculating them on-demand with lots of calls to Python
code.
:param centerIndex: (int) The index of the point. The coordinates are
expressed as a single index by using the dimensions as a mixed radix
definition. For example, in dimensions 42x10, the point [1, 4] is index
1*420 + 4*10 = 460.
:param radius: (int) The radius of this neighborhood about the
``centerIndex``.
:param dimensions: (indexable sequence) The dimensions of the world outside
this neighborhood.
:returns: (numpy array) The points in the neighborhood, including
``centerIndex``.
"""
centerPosition = coordinatesFromIndex(centerIndex, dimensions)
intervals = []
for i, dimension in enumerate(dimensions):
left = max(0, centerPosition[i] - radius)
right = min(dimension - 1, centerPosition[i] + radius)
intervals.append(xrange(left, right + 1))
coords = numpy.array(list(itertools.product(*intervals)))
return numpy.ravel_multi_index(coords.T, dimensions) | [
"def",
"neighborhood",
"(",
"centerIndex",
",",
"radius",
",",
"dimensions",
")",
":",
"centerPosition",
"=",
"coordinatesFromIndex",
"(",
"centerIndex",
",",
"dimensions",
")",
"intervals",
"=",
"[",
"]",
"for",
"i",
",",
"dimension",
"in",
"enumerate",
"(",
... | Get the points in the neighborhood of a point.
A point's neighborhood is the n-dimensional hypercube with sides ranging
[center - radius, center + radius], inclusive. For example, if there are two
dimensions and the radius is 3, the neighborhood is 6x6. Neighborhoods are
truncated when they are near an edge.
This is designed to be fast. In C++ it's fastest to iterate through neighbors
one by one, calculating them on-demand rather than creating a list of them.
But in Python it's faster to build up the whole list in batch via a few calls
to C code rather than calculating them on-demand with lots of calls to Python
code.
:param centerIndex: (int) The index of the point. The coordinates are
expressed as a single index by using the dimensions as a mixed radix
definition. For example, in dimensions 42x10, the point [1, 4] is index
1*420 + 4*10 = 460.
:param radius: (int) The radius of this neighborhood about the
``centerIndex``.
:param dimensions: (indexable sequence) The dimensions of the world outside
this neighborhood.
:returns: (numpy array) The points in the neighborhood, including
``centerIndex``. | [
"Get",
"the",
"points",
"in",
"the",
"neighborhood",
"of",
"a",
"point",
"."
] | python | valid |
MisterWil/abodepy | abodepy/helpers/constants.py | https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/helpers/constants.py#L228-L280 | def get_generic_type(type_tag):
"""Map type tag to generic type."""
return {
# Alarm
DEVICE_ALARM: TYPE_ALARM,
# Binary Sensors - Connectivity
DEVICE_GLASS_BREAK: TYPE_CONNECTIVITY,
DEVICE_KEYPAD: TYPE_CONNECTIVITY,
DEVICE_REMOTE_CONTROLLER: TYPE_CONNECTIVITY,
DEVICE_SIREN: TYPE_CONNECTIVITY,
DEVICE_STATUS_DISPLAY: TYPE_CONNECTIVITY,
# Binary Sensors - Opening
DEVICE_DOOR_CONTACT: TYPE_OPENING,
# Cameras
DEVICE_MOTION_CAMERA: TYPE_CAMERA,
DEVICE_MOTION_VIDEO_CAMERA: TYPE_CAMERA,
DEVICE_IP_CAM: TYPE_CAMERA,
DEVICE_OUTDOOR_MOTION_CAMERA: TYPE_CAMERA,
# Covers
DEVICE_SECURE_BARRIER: TYPE_COVER,
# Lights (Dimmers)
DEVICE_DIMMER: TYPE_LIGHT,
DEVICE_DIMMER_METER: TYPE_LIGHT,
DEVICE_HUE: TYPE_LIGHT,
# Locks
DEVICE_DOOR_LOCK: TYPE_LOCK,
# Moisture
DEVICE_WATER_SENSOR: TYPE_CONNECTIVITY,
# Switches
DEVICE_SWITCH: TYPE_SWITCH,
DEVICE_NIGHT_SWITCH: TYPE_SWITCH,
DEVICE_POWER_SWITCH_SENSOR: TYPE_SWITCH,
DEVICE_POWER_SWITCH_METER: TYPE_SWITCH,
# Water Valve
DEVICE_VALVE: TYPE_VALVE,
# Unknown Sensors
# More data needed to determine type
DEVICE_ROOM_SENSOR: TYPE_UNKNOWN_SENSOR,
DEVICE_TEMPERATURE_SENSOR: TYPE_UNKNOWN_SENSOR,
DEVICE_MULTI_SENSOR: TYPE_UNKNOWN_SENSOR,
DEVICE_PIR: TYPE_UNKNOWN_SENSOR,
DEVICE_POVS: TYPE_UNKNOWN_SENSOR,
}.get(type_tag.lower(), None) | [
"def",
"get_generic_type",
"(",
"type_tag",
")",
":",
"return",
"{",
"# Alarm",
"DEVICE_ALARM",
":",
"TYPE_ALARM",
",",
"# Binary Sensors - Connectivity",
"DEVICE_GLASS_BREAK",
":",
"TYPE_CONNECTIVITY",
",",
"DEVICE_KEYPAD",
":",
"TYPE_CONNECTIVITY",
",",
"DEVICE_REMOTE_C... | Map type tag to generic type. | [
"Map",
"type",
"tag",
"to",
"generic",
"type",
"."
] | python | train |
peri-source/peri | peri/states.py | https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/states.py#L634-L664 | def get_update_io_tiles(self, params, values):
"""
Get the tiles corresponding to a particular section of image needed to
be updated. Inputs are the parameters and values. Returned is the
padded tile, inner tile, and slicer to go between, but accounting for
wrap with the edge of the image as necessary.
"""
# get the affected area of the model image
otile = self.get_update_tile(params, values)
if otile is None:
return [None]*3
ptile = self.get_padding_size(otile) or util.Tile(0, dim=otile.dim)
otile = util.Tile.intersection(otile, self.oshape)
if (otile.shape <= 0).any():
raise UpdateError("update triggered invalid tile size")
if (ptile.shape < 0).any() or (ptile.shape > self.oshape.shape).any():
raise UpdateError("update triggered invalid padding tile size")
# now remove the part of the tile that is outside the image and pad the
# interior part with that overhang. reflect the necessary padding back
# into the image itself for the outer slice which we will call outer
outer = otile.pad((ptile.shape+1)//2)
inner, outer = outer.reflect_overhang(self.oshape)
iotile = inner.translate(-outer.l)
outer = util.Tile.intersection(outer, self.oshape)
inner = util.Tile.intersection(inner, self.oshape)
return outer, inner, iotile | [
"def",
"get_update_io_tiles",
"(",
"self",
",",
"params",
",",
"values",
")",
":",
"# get the affected area of the model image",
"otile",
"=",
"self",
".",
"get_update_tile",
"(",
"params",
",",
"values",
")",
"if",
"otile",
"is",
"None",
":",
"return",
"[",
"... | Get the tiles corresponding to a particular section of image needed to
be updated. Inputs are the parameters and values. Returned is the
padded tile, inner tile, and slicer to go between, but accounting for
wrap with the edge of the image as necessary. | [
"Get",
"the",
"tiles",
"corresponding",
"to",
"a",
"particular",
"section",
"of",
"image",
"needed",
"to",
"be",
"updated",
".",
"Inputs",
"are",
"the",
"parameters",
"and",
"values",
".",
"Returned",
"is",
"the",
"padded",
"tile",
"inner",
"tile",
"and",
... | python | valid |
inveniosoftware-attic/invenio-knowledge | invenio_knowledge/restful.py | https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/restful.py#L382-L405 | def search_list(kb, from_=None, match_type=None,
page=None, per_page=None, unique=False):
"""Search "mapping from" for knowledge."""
# init
page = page or 1
per_page = per_page or 10
if kb.kbtype == models.KnwKB.KNWKB_TYPES['written_as']:
# get the base query
query = api.query_kb_mappings(
kbid=kb.id,
key=from_ or '',
match_type=match_type or 's'
).with_entities(models.KnwKBRVAL.m_key)
# if you want a 'unique' list
if unique:
query = query.distinct()
# run query and paginate
return [item.m_key for item in
pagination.RestfulSQLAlchemyPagination(
query, page=page or 1,
per_page=per_page or 10
).items]
return [] | [
"def",
"search_list",
"(",
"kb",
",",
"from_",
"=",
"None",
",",
"match_type",
"=",
"None",
",",
"page",
"=",
"None",
",",
"per_page",
"=",
"None",
",",
"unique",
"=",
"False",
")",
":",
"# init",
"page",
"=",
"page",
"or",
"1",
"per_page",
"=",
"p... | Search "mapping from" for knowledge. | [
"Search",
"mapping",
"from",
"for",
"knowledge",
"."
] | python | train |
pypa/pipenv | pipenv/vendor/distlib/_backport/sysconfig.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/sysconfig.py#L455-L460 | def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True):
"""Return a path corresponding to the scheme.
``scheme`` is the install scheme name.
"""
return get_paths(scheme, vars, expand)[name] | [
"def",
"get_path",
"(",
"name",
",",
"scheme",
"=",
"_get_default_scheme",
"(",
")",
",",
"vars",
"=",
"None",
",",
"expand",
"=",
"True",
")",
":",
"return",
"get_paths",
"(",
"scheme",
",",
"vars",
",",
"expand",
")",
"[",
"name",
"]"
] | Return a path corresponding to the scheme.
``scheme`` is the install scheme name. | [
"Return",
"a",
"path",
"corresponding",
"to",
"the",
"scheme",
"."
] | python | train |
RediSearch/redisearch-py | redisearch/client.py | https://github.com/RediSearch/redisearch-py/blob/f65d1dd078713cbe9b83584e86655a254d0531ab/redisearch/client.py#L306-L323 | def search(self, query):
"""
Search the index for a given query, and return a result of documents
### Parameters
- **query**: the search query. Either a text for simple queries with default parameters, or a Query object for complex queries.
See RediSearch's documentation on query format
- **snippet_sizes**: A dictionary of {field: snippet_size} used to trim and format the result. e.g.e {'body': 500}
"""
args, query = self._mk_query_args(query)
st = time.time()
res = self.redis.execute_command(self.SEARCH_CMD, *args)
return Result(res,
not query._no_content,
duration=(time.time() - st) * 1000.0,
has_payload=query._with_payloads) | [
"def",
"search",
"(",
"self",
",",
"query",
")",
":",
"args",
",",
"query",
"=",
"self",
".",
"_mk_query_args",
"(",
"query",
")",
"st",
"=",
"time",
".",
"time",
"(",
")",
"res",
"=",
"self",
".",
"redis",
".",
"execute_command",
"(",
"self",
".",... | Search the index for a given query, and return a result of documents
### Parameters
- **query**: the search query. Either a text for simple queries with default parameters, or a Query object for complex queries.
See RediSearch's documentation on query format
- **snippet_sizes**: A dictionary of {field: snippet_size} used to trim and format the result. e.g.e {'body': 500} | [
"Search",
"the",
"index",
"for",
"a",
"given",
"query",
"and",
"return",
"a",
"result",
"of",
"documents"
] | python | valid |
NatLibFi/Skosify | skosify/skosify.py | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/skosify.py#L767-L791 | def check_hierarchy(rdf, break_cycles, keep_related, mark_top_concepts,
eliminate_redundancy):
"""Check for, and optionally fix, problems in the skos:broader hierarchy
using a recursive depth first search algorithm.
:param Graph rdf: An rdflib.graph.Graph object.
:param bool fix_cycles: Break cycles.
:param bool fix_disjoint_relations: Remoe skos:related overlapping with
skos:broaderTransitive.
:param bool fix_redundancy: Remove skos:broader between two concepts otherwise
connected by skos:broaderTransitive.
"""
starttime = time.time()
if check.hierarchy_cycles(rdf, break_cycles):
logging.info(
"Some concepts not reached in initial cycle detection. "
"Re-checking for loose concepts.")
setup_top_concepts(rdf, mark_top_concepts)
check.disjoint_relations(rdf, not keep_related)
check.hierarchical_redundancy(rdf, eliminate_redundancy)
endtime = time.time()
logging.debug("check_hierarchy took %f seconds", (endtime - starttime)) | [
"def",
"check_hierarchy",
"(",
"rdf",
",",
"break_cycles",
",",
"keep_related",
",",
"mark_top_concepts",
",",
"eliminate_redundancy",
")",
":",
"starttime",
"=",
"time",
".",
"time",
"(",
")",
"if",
"check",
".",
"hierarchy_cycles",
"(",
"rdf",
",",
"break_cy... | Check for, and optionally fix, problems in the skos:broader hierarchy
using a recursive depth first search algorithm.
:param Graph rdf: An rdflib.graph.Graph object.
:param bool fix_cycles: Break cycles.
:param bool fix_disjoint_relations: Remoe skos:related overlapping with
skos:broaderTransitive.
:param bool fix_redundancy: Remove skos:broader between two concepts otherwise
connected by skos:broaderTransitive. | [
"Check",
"for",
"and",
"optionally",
"fix",
"problems",
"in",
"the",
"skos",
":",
"broader",
"hierarchy",
"using",
"a",
"recursive",
"depth",
"first",
"search",
"algorithm",
"."
] | python | train |
dwavesystems/dimod | dimod/binary_quadratic_model.py | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/binary_quadratic_model.py#L1154-L1212 | def contract_variables(self, u, v):
"""Enforce u, v being the same variable in a binary quadratic model.
The resulting variable is labeled 'u'. Values of interactions between `v` and
variables that `u` interacts with are added to the corresponding interactions
of `u`.
Args:
u (variable):
Variable in the binary quadratic model.
v (variable):
Variable in the binary quadratic model.
Examples:
This example creates a binary quadratic model representing the K4 complete graph
and contracts node (variable) 3 into node 2. The interactions between
3 and its neighbors 1 and 4 are added to the corresponding interactions
between 2 and those same neighbors.
>>> import dimod
...
>>> linear = {1: 1, 2: 2, 3: 3, 4: 4}
>>> quadratic = {(1, 2): 12, (1, 3): 13, (1, 4): 14,
... (2, 3): 23, (2, 4): 24,
... (3, 4): 34}
>>> bqm = dimod.BinaryQuadraticModel(linear, quadratic, 0.5, dimod.SPIN)
>>> bqm.contract_variables(2, 3)
>>> 3 in bqm.linear
False
>>> bqm.quadratic[(1, 2)]
25
"""
adj = self.adj
if u not in adj:
raise ValueError("{} is not a variable in the binary quadratic model".format(u))
if v not in adj:
raise ValueError("{} is not a variable in the binary quadratic model".format(v))
# if there is an interaction between u, v it becomes linear for u
if v in adj[u]:
if self.vartype is Vartype.BINARY:
self.add_variable(u, adj[u][v])
elif self.vartype is Vartype.SPIN:
self.add_offset(adj[u][v])
else:
raise RuntimeError("unexpected vartype")
self.remove_interaction(u, v)
# all of the interactions that v has become interactions for u
neighbors = list(adj[v])
for w in neighbors:
self.add_interaction(u, w, adj[v][w])
self.remove_interaction(v, w)
# finally remove v
self.remove_variable(v) | [
"def",
"contract_variables",
"(",
"self",
",",
"u",
",",
"v",
")",
":",
"adj",
"=",
"self",
".",
"adj",
"if",
"u",
"not",
"in",
"adj",
":",
"raise",
"ValueError",
"(",
"\"{} is not a variable in the binary quadratic model\"",
".",
"format",
"(",
"u",
")",
... | Enforce u, v being the same variable in a binary quadratic model.
The resulting variable is labeled 'u'. Values of interactions between `v` and
variables that `u` interacts with are added to the corresponding interactions
of `u`.
Args:
u (variable):
Variable in the binary quadratic model.
v (variable):
Variable in the binary quadratic model.
Examples:
This example creates a binary quadratic model representing the K4 complete graph
and contracts node (variable) 3 into node 2. The interactions between
3 and its neighbors 1 and 4 are added to the corresponding interactions
between 2 and those same neighbors.
>>> import dimod
...
>>> linear = {1: 1, 2: 2, 3: 3, 4: 4}
>>> quadratic = {(1, 2): 12, (1, 3): 13, (1, 4): 14,
... (2, 3): 23, (2, 4): 24,
... (3, 4): 34}
>>> bqm = dimod.BinaryQuadraticModel(linear, quadratic, 0.5, dimod.SPIN)
>>> bqm.contract_variables(2, 3)
>>> 3 in bqm.linear
False
>>> bqm.quadratic[(1, 2)]
25 | [
"Enforce",
"u",
"v",
"being",
"the",
"same",
"variable",
"in",
"a",
"binary",
"quadratic",
"model",
"."
] | python | train |
cjdrake/pyeda | pyeda/parsing/dimacs.py | https://github.com/cjdrake/pyeda/blob/554ee53aa678f4b61bcd7e07ba2c74ddc749d665/pyeda/parsing/dimacs.py#L266-L269 | def punct(self, text):
"""Push punctuation onto the token queue."""
cls = self.PUNCTUATION[text]
self.push_token(cls(text, self.lineno, self.offset)) | [
"def",
"punct",
"(",
"self",
",",
"text",
")",
":",
"cls",
"=",
"self",
".",
"PUNCTUATION",
"[",
"text",
"]",
"self",
".",
"push_token",
"(",
"cls",
"(",
"text",
",",
"self",
".",
"lineno",
",",
"self",
".",
"offset",
")",
")"
] | Push punctuation onto the token queue. | [
"Push",
"punctuation",
"onto",
"the",
"token",
"queue",
"."
] | python | train |
LogicalDash/LiSE | LiSE/LiSE/rule.py | https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/LiSE/LiSE/rule.py#L260-L278 | def _fun_names_iter(self, functyp, val):
"""Iterate over the names of the functions in ``val``,
adding them to ``funcstore`` if they are missing;
or if the items in ``val`` are already the names of functions
in ``funcstore``, iterate over those.
"""
funcstore = getattr(self.engine, functyp)
for v in val:
if callable(v):
# Overwrites anything already on the funcstore, is that bad?
setattr(funcstore, v.__name__, v)
yield v.__name__
elif v not in funcstore:
raise KeyError("Function {} not present in {}".format(
v, funcstore._tab
))
else:
yield v | [
"def",
"_fun_names_iter",
"(",
"self",
",",
"functyp",
",",
"val",
")",
":",
"funcstore",
"=",
"getattr",
"(",
"self",
".",
"engine",
",",
"functyp",
")",
"for",
"v",
"in",
"val",
":",
"if",
"callable",
"(",
"v",
")",
":",
"# Overwrites anything already ... | Iterate over the names of the functions in ``val``,
adding them to ``funcstore`` if they are missing;
or if the items in ``val`` are already the names of functions
in ``funcstore``, iterate over those. | [
"Iterate",
"over",
"the",
"names",
"of",
"the",
"functions",
"in",
"val",
"adding",
"them",
"to",
"funcstore",
"if",
"they",
"are",
"missing",
";",
"or",
"if",
"the",
"items",
"in",
"val",
"are",
"already",
"the",
"names",
"of",
"functions",
"in",
"funcs... | python | train |
Yelp/threat_intel | threat_intel/util/http.py | https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/http.py#L51-L57 | def init_poolmanager(self, connections, maxsize, block=False, **pool_kwargs):
"""Called to initialize the HTTPAdapter when no proxy is used."""
try:
pool_kwargs['ssl_version'] = ssl.PROTOCOL_TLS
except AttributeError:
pool_kwargs['ssl_version'] = ssl.PROTOCOL_SSLv23
return super(SSLAdapter, self).init_poolmanager(connections, maxsize, block, **pool_kwargs) | [
"def",
"init_poolmanager",
"(",
"self",
",",
"connections",
",",
"maxsize",
",",
"block",
"=",
"False",
",",
"*",
"*",
"pool_kwargs",
")",
":",
"try",
":",
"pool_kwargs",
"[",
"'ssl_version'",
"]",
"=",
"ssl",
".",
"PROTOCOL_TLS",
"except",
"AttributeError",... | Called to initialize the HTTPAdapter when no proxy is used. | [
"Called",
"to",
"initialize",
"the",
"HTTPAdapter",
"when",
"no",
"proxy",
"is",
"used",
"."
] | python | train |
SheffieldML/GPy | GPy/util/mocap.py | https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/util/mocap.py#L328-L428 | def read_bonedata(self, fid):
"""Read bone data from an acclaim skeleton file stream."""
bone_count = 0
lin = self.read_line(fid)
while lin[0]!=':':
parts = lin.split()
if parts[0] == 'begin':
bone_count += 1
self.vertices.append(vertex(name = '', id=np.NaN,
meta={'name': [],
'id': [],
'offset': [],
'orientation': [],
'axis': [0., 0., 0.],
'axis_order': [],
'C': np.eye(3),
'Cinv': np.eye(3),
'channels': [],
'bodymass': [],
'confmass': [],
'order': [],
'rot_ind': [],
'pos_ind': [],
'limits': [],
'xyz': np.array([0., 0., 0.]),
'rot': np.eye(3)}))
lin = self.read_line(fid)
elif parts[0]=='id':
self.vertices[bone_count].id = int(parts[1])
lin = self.read_line(fid)
self.vertices[bone_count].children = []
elif parts[0]=='name':
self.vertices[bone_count].name = parts[1]
lin = self.read_line(fid)
elif parts[0]=='direction':
direction = np.array([float(parts[1]), float(parts[2]), float(parts[3])])
lin = self.read_line(fid)
elif parts[0]=='length':
lgth = float(parts[1])
lin = self.read_line(fid)
elif parts[0]=='axis':
self.vertices[bone_count].meta['axis'] = np.array([float(parts[1]),
float(parts[2]),
float(parts[3])])
# order is reversed compared to bvh
self.vertices[bone_count].meta['axis_order'] = parts[-1][::-1].lower()
lin = self.read_line(fid)
elif parts[0]=='dof':
order = []
for i in range(1, len(parts)):
if parts[i]== 'rx':
chan = 'Xrotation'
order.append('x')
elif parts[i] =='ry':
chan = 'Yrotation'
order.append('y')
elif parts[i] == 'rz':
chan = 'Zrotation'
order.append('z')
elif parts[i] == 'tx':
chan = 'Xposition'
elif parts[i] == 'ty':
chan = 'Yposition'
elif parts[i] == 'tz':
chan = 'Zposition'
elif parts[i] == 'l':
chan = 'length'
self.vertices[bone_count].meta['channels'].append(chan)
# order is reversed compared to bvh
self.vertices[bone_count].meta['order'] = order[::-1]
lin = self.read_line(fid)
elif parts[0]=='limits':
self.vertices[bone_count].meta['limits'] = [[float(parts[1][1:]), float(parts[2][:-1])]]
lin = self.read_line(fid)
while lin !='end':
parts = lin.split()
self.vertices[bone_count].meta['limits'].append([float(parts[0][1:]), float(parts[1][:-1])])
lin = self.read_line(fid)
self.vertices[bone_count].meta['limits'] = np.array(self.vertices[bone_count].meta['limits'])
elif parts[0]=='end':
self.vertices[bone_count].meta['offset'] = direction*lgth
lin = self.read_line(fid)
return lin | [
"def",
"read_bonedata",
"(",
"self",
",",
"fid",
")",
":",
"bone_count",
"=",
"0",
"lin",
"=",
"self",
".",
"read_line",
"(",
"fid",
")",
"while",
"lin",
"[",
"0",
"]",
"!=",
"':'",
":",
"parts",
"=",
"lin",
".",
"split",
"(",
")",
"if",
"parts",... | Read bone data from an acclaim skeleton file stream. | [
"Read",
"bone",
"data",
"from",
"an",
"acclaim",
"skeleton",
"file",
"stream",
"."
] | python | train |
caktus/django-timepiece | timepiece/utils/__init__.py | https://github.com/caktus/django-timepiece/blob/52515dec027664890efbc535429e1ba1ee152f40/timepiece/utils/__init__.py#L94-L97 | def get_year_start(day=None):
"""Returns January 1 of the given year."""
day = add_timezone(day or datetime.date.today())
return day.replace(month=1).replace(day=1) | [
"def",
"get_year_start",
"(",
"day",
"=",
"None",
")",
":",
"day",
"=",
"add_timezone",
"(",
"day",
"or",
"datetime",
".",
"date",
".",
"today",
"(",
")",
")",
"return",
"day",
".",
"replace",
"(",
"month",
"=",
"1",
")",
".",
"replace",
"(",
"day"... | Returns January 1 of the given year. | [
"Returns",
"January",
"1",
"of",
"the",
"given",
"year",
"."
] | python | train |
tensorflow/tensor2tensor | tensor2tensor/envs/trajectory.py | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/envs/trajectory.py#L286-L315 | def observations_np(self, boundary=20):
"""Pads the observations in all the trajectories and returns them.
Args:
boundary: integer, Observations will be padded to (n * boundary) + 1 where
n is an integer.
Returns:
a tuple(padded_observations, time_steps), with shapes:
padded_observations: (self.batch_size, n * boundary + 1) + OBS
time_steps: integer list of length = self.batch_size
"""
list_observations_np_ts = [t.observations_np for t in self.trajectories]
# Every element in `list_observations_np_ts` is shaped (t,) + OBS
OBS = list_observations_np_ts[0].shape[1:] # pylint: disable=invalid-name
num_time_steps = [t.num_time_steps for t in self.trajectories]
t_max = max(num_time_steps)
# t_max is rounded to the next multiple of `boundary`
boundary = int(boundary)
bucket_length = boundary * int(np.ceil(float(t_max) / boundary))
def padding_config(obs):
# We're padding the first axis only, since that is the time-step.
num_to_pad = bucket_length + 1 - obs.shape[0]
return [(0, num_to_pad)] + [(0, 0)] * len(OBS)
return np.stack([
np.pad(obs, padding_config(obs), "constant")
for obs in list_observations_np_ts]), num_time_steps | [
"def",
"observations_np",
"(",
"self",
",",
"boundary",
"=",
"20",
")",
":",
"list_observations_np_ts",
"=",
"[",
"t",
".",
"observations_np",
"for",
"t",
"in",
"self",
".",
"trajectories",
"]",
"# Every element in `list_observations_np_ts` is shaped (t,) + OBS",
"OBS... | Pads the observations in all the trajectories and returns them.
Args:
boundary: integer, Observations will be padded to (n * boundary) + 1 where
n is an integer.
Returns:
a tuple(padded_observations, time_steps), with shapes:
padded_observations: (self.batch_size, n * boundary + 1) + OBS
time_steps: integer list of length = self.batch_size | [
"Pads",
"the",
"observations",
"in",
"all",
"the",
"trajectories",
"and",
"returns",
"them",
"."
] | python | train |
python-rope/rope | rope/contrib/findit.py | https://github.com/python-rope/rope/blob/1c9f9cd5964b099a99a9111e998f0dc728860688/rope/contrib/findit.py#L73-L94 | def find_definition(project, code, offset, resource=None, maxfixes=1):
"""Return the definition location of the python name at `offset`
A `Location` object is returned if the definition location can be
determined, otherwise ``None`` is returned.
"""
fixer = fixsyntax.FixSyntax(project, code, resource, maxfixes)
pyname = fixer.pyname_at(offset)
if pyname is not None:
module, lineno = pyname.get_definition_location()
name = rope.base.worder.Worder(code).get_word_at(offset)
if lineno is not None:
start = module.lines.get_line_start(lineno)
def check_offset(occurrence):
if occurrence.offset < start:
return False
pyname_filter = occurrences.PyNameFilter(pyname)
finder = occurrences.Finder(project, name,
[check_offset, pyname_filter])
for occurrence in finder.find_occurrences(pymodule=module):
return Location(occurrence) | [
"def",
"find_definition",
"(",
"project",
",",
"code",
",",
"offset",
",",
"resource",
"=",
"None",
",",
"maxfixes",
"=",
"1",
")",
":",
"fixer",
"=",
"fixsyntax",
".",
"FixSyntax",
"(",
"project",
",",
"code",
",",
"resource",
",",
"maxfixes",
")",
"p... | Return the definition location of the python name at `offset`
A `Location` object is returned if the definition location can be
determined, otherwise ``None`` is returned. | [
"Return",
"the",
"definition",
"location",
"of",
"the",
"python",
"name",
"at",
"offset"
] | python | train |
fhcrc/taxtastic | taxtastic/subcommands/refpkg_intersection.py | https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/subcommands/refpkg_intersection.py#L49-L54 | def filter_ranks(results):
"""
Find just the first rank for all the results for a given tax_id.
"""
for _, group in itertools.groupby(results, operator.itemgetter(0)):
yield next(group) | [
"def",
"filter_ranks",
"(",
"results",
")",
":",
"for",
"_",
",",
"group",
"in",
"itertools",
".",
"groupby",
"(",
"results",
",",
"operator",
".",
"itemgetter",
"(",
"0",
")",
")",
":",
"yield",
"next",
"(",
"group",
")"
] | Find just the first rank for all the results for a given tax_id. | [
"Find",
"just",
"the",
"first",
"rank",
"for",
"all",
"the",
"results",
"for",
"a",
"given",
"tax_id",
"."
] | python | train |
KelSolaar/Umbra | umbra/components/factory/script_editor/script_editor.py | https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/components/factory/script_editor/script_editor.py#L1847-L1859 | def __Script_Editor_Output_plainTextEdit_refresh_ui(self):
"""
Updates the **Script_Editor_Output_plainTextEdit** Widget.
"""
memory_handler_stack_depth = len(self.__engine.logging_session_handler_stream.stream)
if memory_handler_stack_depth != self.__memory_handler_stack_depth:
for line in self.__engine.logging_session_handler_stream.stream[
self.__memory_handler_stack_depth:memory_handler_stack_depth]:
self.Script_Editor_Output_plainTextEdit.moveCursor(QTextCursor.End)
self.Script_Editor_Output_plainTextEdit.insertPlainText(line)
self.__Script_Editor_Output_plainTextEdit_set_default_view_state()
self.__memory_handler_stack_depth = memory_handler_stack_depth | [
"def",
"__Script_Editor_Output_plainTextEdit_refresh_ui",
"(",
"self",
")",
":",
"memory_handler_stack_depth",
"=",
"len",
"(",
"self",
".",
"__engine",
".",
"logging_session_handler_stream",
".",
"stream",
")",
"if",
"memory_handler_stack_depth",
"!=",
"self",
".",
"__... | Updates the **Script_Editor_Output_plainTextEdit** Widget. | [
"Updates",
"the",
"**",
"Script_Editor_Output_plainTextEdit",
"**",
"Widget",
"."
] | python | train |
quantopian/alphalens | alphalens/tears.py | https://github.com/quantopian/alphalens/blob/d43eac871bb061e956df936794d3dd514da99e44/alphalens/tears.py#L325-L382 | def create_information_tear_sheet(factor_data,
group_neutral=False,
by_group=False):
"""
Creates a tear sheet for information analysis of a factor.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
group_neutral : bool
Demean forward returns by group before computing IC.
by_group : bool
If True, display graphs separately for each group.
"""
ic = perf.factor_information_coefficient(factor_data, group_neutral)
plotting.plot_information_table(ic)
columns_wide = 2
fr_cols = len(ic.columns)
rows_when_wide = (((fr_cols - 1) // columns_wide) + 1)
vertical_sections = fr_cols + 3 * rows_when_wide + 2 * fr_cols
gf = GridFigure(rows=vertical_sections, cols=columns_wide)
ax_ic_ts = [gf.next_row() for _ in range(fr_cols)]
plotting.plot_ic_ts(ic, ax=ax_ic_ts)
ax_ic_hqq = [gf.next_cell() for _ in range(fr_cols * 2)]
plotting.plot_ic_hist(ic, ax=ax_ic_hqq[::2])
plotting.plot_ic_qq(ic, ax=ax_ic_hqq[1::2])
if not by_group:
mean_monthly_ic = \
perf.mean_information_coefficient(factor_data,
group_adjust=group_neutral,
by_group=False,
by_time="M")
ax_monthly_ic_heatmap = [gf.next_cell() for x in range(fr_cols)]
plotting.plot_monthly_ic_heatmap(mean_monthly_ic,
ax=ax_monthly_ic_heatmap)
if by_group:
mean_group_ic = \
perf.mean_information_coefficient(factor_data,
group_adjust=group_neutral,
by_group=True)
plotting.plot_ic_by_group(mean_group_ic, ax=gf.next_row())
plt.show()
gf.close() | [
"def",
"create_information_tear_sheet",
"(",
"factor_data",
",",
"group_neutral",
"=",
"False",
",",
"by_group",
"=",
"False",
")",
":",
"ic",
"=",
"perf",
".",
"factor_information_coefficient",
"(",
"factor_data",
",",
"group_neutral",
")",
"plotting",
".",
"plot... | Creates a tear sheet for information analysis of a factor.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
group_neutral : bool
Demean forward returns by group before computing IC.
by_group : bool
If True, display graphs separately for each group. | [
"Creates",
"a",
"tear",
"sheet",
"for",
"information",
"analysis",
"of",
"a",
"factor",
"."
] | python | train |
dmwm/DBS | Server/Python/src/dbs/dao/Oracle/BlockParent/List.py | https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/dao/Oracle/BlockParent/List.py#L27-L44 | def execute(self, conn, block_name="", transaction = False):
"""
block: /a/b/c#d
"""
if not conn:
dbsExceptionHandler("dbsException-failed-connect2host", "Oracle/BlockParent/List. Expects db connection from upper layer.", self.logger.exception)
sql = self.sql
if isinstance(block_name, basestring):
binds = {'block_name' :block_name}
elif type(block_name) is list:
binds = [{'block_name':x} for x in block_name]
else:
msg = "Oracle/BlockParent/List. Block_name must be provided either as a string or as a list."
dbsExceptionHandler('dbsException-invalid-input', msg, self.logger.exception)
result = self.dbi.processData(sql, binds, conn, transaction)
return self.formatDict(result) | [
"def",
"execute",
"(",
"self",
",",
"conn",
",",
"block_name",
"=",
"\"\"",
",",
"transaction",
"=",
"False",
")",
":",
"if",
"not",
"conn",
":",
"dbsExceptionHandler",
"(",
"\"dbsException-failed-connect2host\"",
",",
"\"Oracle/BlockParent/List. Expects db connection... | block: /a/b/c#d | [
"block",
":",
"/",
"a",
"/",
"b",
"/",
"c#d"
] | python | train |
MolSSI-BSE/basis_set_exchange | basis_set_exchange/curate/compare_report.py | https://github.com/MolSSI-BSE/basis_set_exchange/blob/e79110aaeb65f392ed5032420322dee3336948f7/basis_set_exchange/curate/compare_report.py#L82-L143 | def potentials_difference(p1, p2):
"""
Computes and prints the differences between two lists of potentials
If the shells contain a different number primitives,
or the lists are of different length, inf is returned.
Otherwise, the maximum relative difference is returned.
"""
max_rdiff = 0.0
np = len(p1)
if len(p2) != np:
print("Different number of potentials")
return float('inf')
pots1 = sort_potentials(p1)
pots2 = sort_potentials(p2)
for n in range(np):
pot1 = pots1[n]
pot2 = pots2[n]
if pot1['angular_momentum'] != pot2['angular_momentum']:
print("Different angular momentum for potential {}".format(n))
return float('inf')
nprim = len(pot1['gaussian_exponents'])
if len(pot2['gaussian_exponents']) != nprim:
print("Different number of primitives for potential {}".format(n))
return float('inf')
ngen = len(pot1['coefficients'])
if len(pot2['coefficients']) != ngen:
print("Different number of general contractions for potential {}".format(n))
return float('inf')
for p in range(nprim):
e1 = pot1['gaussian_exponents'][p]
e2 = pot2['gaussian_exponents'][p]
r = _reldiff(e1, e2)
if r > 0.0:
print(" Gaussian Exponent {:3}: {:20} {:20} -> {:16.8e}".format(p, e1, e2, r))
max_rdiff = max(max_rdiff, r)
e1 = pot1['r_exponents'][p]
e2 = pot2['r_exponents'][p]
r = _reldiff(e1, e2)
if r > 0.0:
print(" R Exponent {:3}: {:20} {:20} -> {:16.8e}".format(p, e1, e2, r))
max_rdiff = max(max_rdiff, r)
for g in range(ngen):
c1 = pot1['coefficients'][g][p]
c2 = pot2['coefficients'][g][p]
r = _reldiff(c1, c2)
if r > 0.0:
print(" Coefficient {:3}: {:20} {:20} -> {:16.8e}".format(p, c1, c2, r))
max_rdiff = max(max_rdiff, r)
print()
print("Max relative difference for these potentials: {}".format(max_rdiff))
return max_rdiff | [
"def",
"potentials_difference",
"(",
"p1",
",",
"p2",
")",
":",
"max_rdiff",
"=",
"0.0",
"np",
"=",
"len",
"(",
"p1",
")",
"if",
"len",
"(",
"p2",
")",
"!=",
"np",
":",
"print",
"(",
"\"Different number of potentials\"",
")",
"return",
"float",
"(",
"'... | Computes and prints the differences between two lists of potentials
If the shells contain a different number primitives,
or the lists are of different length, inf is returned.
Otherwise, the maximum relative difference is returned. | [
"Computes",
"and",
"prints",
"the",
"differences",
"between",
"two",
"lists",
"of",
"potentials"
] | python | train |
twilio/twilio-python | twilio/rest/autopilot/v1/assistant/__init__.py | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/autopilot/v1/assistant/__init__.py#L400-L409 | def dialogues(self):
"""
Access the dialogues
:returns: twilio.rest.autopilot.v1.assistant.dialogue.DialogueList
:rtype: twilio.rest.autopilot.v1.assistant.dialogue.DialogueList
"""
if self._dialogues is None:
self._dialogues = DialogueList(self._version, assistant_sid=self._solution['sid'], )
return self._dialogues | [
"def",
"dialogues",
"(",
"self",
")",
":",
"if",
"self",
".",
"_dialogues",
"is",
"None",
":",
"self",
".",
"_dialogues",
"=",
"DialogueList",
"(",
"self",
".",
"_version",
",",
"assistant_sid",
"=",
"self",
".",
"_solution",
"[",
"'sid'",
"]",
",",
")... | Access the dialogues
:returns: twilio.rest.autopilot.v1.assistant.dialogue.DialogueList
:rtype: twilio.rest.autopilot.v1.assistant.dialogue.DialogueList | [
"Access",
"the",
"dialogues"
] | python | train |
waqasbhatti/astrobase | astrobase/varbase/signals.py | https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/varbase/signals.py#L69-L328 | def prewhiten_magseries(times, mags, errs,
whitenperiod,
whitenparams,
sigclip=3.0,
magsarefluxes=False,
plotfit=None,
plotfitphasedlconly=True,
rescaletomedian=True):
'''Removes a periodic sinusoidal signal generated using whitenparams from
the input magnitude time series.
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to prewhiten.
whitenperiod : float
The period of the sinusoidal signal to remove.
whitenparams : list of floats
This contains the Fourier amplitude and phase coefficients of the
sinusoidal signal to remove::
[ampl_1, ampl_2, ampl_3, ..., ampl_X,
pha_1, pha_2, pha_3, ..., pha_X]
where `X` is the Fourier order. These are usually the output of a
previous Fourier fit to the light curve (from
:py:func:`astrobase.lcfit.sinusoidal.fourier_fit_magseries` for
example).
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
If True, will treat the input values of `mags` as fluxes for purposes of
plotting the fit and sig-clipping.
plotfit : str or False
If this is a string, this function will make a plot showing the effect
of the pre-whitening on the mag/flux time-series and write the plot to
the path specified here.
plotfitphasedlconly : bool
If True, will plot only the phased LC for showing the effect of
pre-whitening, and skip plotting the unphased LC.
rescaletomedian : bool
If this is True, then we add back the constant median term of the
magnitudes to the final pre-whitened mag series.
Returns
-------
dict
Returns a dict of the form::
{'wtimes':times array after pre-whitening,
'wphase':phase array after pre-whitening,
'wmags':mags array after pre-whitening,
'werrs':errs array after pre-whitening,
'whitenparams':the input pre-whitening params used,
'whitenperiod':the input pre-whitening period used,
'fitplotfile':the output plot file if plotfit was set}
'''
stimes, smags, serrs = sigclip_magseries(times, mags, errs,
sigclip=sigclip,
magsarefluxes=magsarefluxes)
median_mag = np.median(smags)
# phase the mag series using the given period and epoch = min(stimes)
mintime = np.min(stimes)
# calculate the unsorted phase, then sort it
iphase = (
(stimes - mintime)/whitenperiod -
np.floor((stimes - mintime)/whitenperiod)
)
phasesortind = np.argsort(iphase)
# these are the final quantities to use for the Fourier fits
phase = iphase[phasesortind]
pmags = smags[phasesortind]
perrs = serrs[phasesortind]
# get the times sorted in phase order (useful to get the fit mag minimum
# with respect to phase -- the light curve minimum)
ptimes = stimes[phasesortind]
# now subtract the harmonic series from the phased LC
# these are still in phase order
wmags = pmags - _fourier_func(whitenparams, phase, pmags)
# resort everything by time order
wtimeorder = np.argsort(ptimes)
wtimes = ptimes[wtimeorder]
wphase = phase[wtimeorder]
wmags = wmags[wtimeorder]
werrs = perrs[wtimeorder]
if rescaletomedian:
wmags = wmags + median_mag
# prepare the returndict
returndict = {'wtimes':wtimes, # these are in the new time order
'wphase':wphase,
'wmags':wmags,
'werrs':werrs,
'whitenparams':whitenparams,
'whitenperiod':whitenperiod}
# make the fit plot if required
if plotfit and (isinstance(plotfit, str) or isinstance(plotfit, Strio)):
if plotfitphasedlconly:
plt.figure(figsize=(10,4.8))
else:
plt.figure(figsize=(16,9.6))
if plotfitphasedlconly:
# phased series before whitening
plt.subplot(121)
plt.plot(phase,pmags,
marker='.',
color='k',
linestyle='None',
markersize=2.0,
markeredgewidth=0)
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('fluxes')
plt.xlabel('phase')
plt.title('phased LC before pre-whitening')
# phased series after whitening
plt.subplot(122)
plt.plot(wphase,wmags,
marker='.',
color='g',
linestyle='None',
markersize=2.0,
markeredgewidth=0)
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('fluxes')
plt.xlabel('phase')
plt.title('phased LC after pre-whitening')
else:
# time series before whitening
plt.subplot(221)
plt.plot(stimes,smags,
marker='.',
color='k',
linestyle='None',
markersize=2.0,
markeredgewidth=0)
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('fluxes')
plt.xlabel('JD')
plt.title('LC before pre-whitening')
# time series after whitening
plt.subplot(222)
plt.plot(wtimes,wmags,
marker='.',
color='g',
linestyle='None',
markersize=2.0,
markeredgewidth=0)
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('fluxes')
plt.xlabel('JD')
plt.title('LC after pre-whitening with period: %.6f' % whitenperiod)
# phased series before whitening
plt.subplot(223)
plt.plot(phase,pmags,
marker='.',
color='k',
linestyle='None',
markersize=2.0,
markeredgewidth=0)
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('fluxes')
plt.xlabel('phase')
plt.title('phased LC before pre-whitening')
# phased series after whitening
plt.subplot(224)
plt.plot(wphase,wmags,
marker='.',
color='g',
linestyle='None',
markersize=2.0,
markeredgewidth=0)
if not magsarefluxes:
plt.gca().invert_yaxis()
plt.ylabel('magnitude')
else:
plt.ylabel('fluxes')
plt.xlabel('phase')
plt.title('phased LC after pre-whitening')
plt.tight_layout()
plt.savefig(plotfit, format='png', pad_inches=0.0)
plt.close()
if isinstance(plotfit, str) or isinstance(plotfit, Strio):
returndict['fitplotfile'] = plotfit
return returndict | [
"def",
"prewhiten_magseries",
"(",
"times",
",",
"mags",
",",
"errs",
",",
"whitenperiod",
",",
"whitenparams",
",",
"sigclip",
"=",
"3.0",
",",
"magsarefluxes",
"=",
"False",
",",
"plotfit",
"=",
"None",
",",
"plotfitphasedlconly",
"=",
"True",
",",
"rescal... | Removes a periodic sinusoidal signal generated using whitenparams from
the input magnitude time series.
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to prewhiten.
whitenperiod : float
The period of the sinusoidal signal to remove.
whitenparams : list of floats
This contains the Fourier amplitude and phase coefficients of the
sinusoidal signal to remove::
[ampl_1, ampl_2, ampl_3, ..., ampl_X,
pha_1, pha_2, pha_3, ..., pha_X]
where `X` is the Fourier order. These are usually the output of a
previous Fourier fit to the light curve (from
:py:func:`astrobase.lcfit.sinusoidal.fourier_fit_magseries` for
example).
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
If True, will treat the input values of `mags` as fluxes for purposes of
plotting the fit and sig-clipping.
plotfit : str or False
If this is a string, this function will make a plot showing the effect
of the pre-whitening on the mag/flux time-series and write the plot to
the path specified here.
plotfitphasedlconly : bool
If True, will plot only the phased LC for showing the effect of
pre-whitening, and skip plotting the unphased LC.
rescaletomedian : bool
If this is True, then we add back the constant median term of the
magnitudes to the final pre-whitened mag series.
Returns
-------
dict
Returns a dict of the form::
{'wtimes':times array after pre-whitening,
'wphase':phase array after pre-whitening,
'wmags':mags array after pre-whitening,
'werrs':errs array after pre-whitening,
'whitenparams':the input pre-whitening params used,
'whitenperiod':the input pre-whitening period used,
'fitplotfile':the output plot file if plotfit was set} | [
"Removes",
"a",
"periodic",
"sinusoidal",
"signal",
"generated",
"using",
"whitenparams",
"from",
"the",
"input",
"magnitude",
"time",
"series",
"."
] | python | valid |
globality-corp/microcosm-flask | microcosm_flask/audit.py | https://github.com/globality-corp/microcosm-flask/blob/c2eaf57f03e7d041eea343751a4a90fcc80df418/microcosm_flask/audit.py#L309-L332 | def _audit_request(options, func, request_context, *args, **kwargs): # noqa: C901
"""
Run a request function under audit.
"""
logger = getLogger("audit")
request_info = RequestInfo(options, func, request_context)
response = None
request_info.capture_request()
try:
# process the request
with elapsed_time(request_info.timing):
response = func(*args, **kwargs)
except Exception as error:
request_info.capture_error(error)
raise
else:
request_info.capture_response(response)
return response
finally:
if not should_skip_logging(func):
request_info.log(logger) | [
"def",
"_audit_request",
"(",
"options",
",",
"func",
",",
"request_context",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: C901",
"logger",
"=",
"getLogger",
"(",
"\"audit\"",
")",
"request_info",
"=",
"RequestInfo",
"(",
"options",
",",
"... | Run a request function under audit. | [
"Run",
"a",
"request",
"function",
"under",
"audit",
"."
] | python | train |
KE-works/pykechain | pykechain/extra_utils.py | https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/extra_utils.py#L279-L298 | def map_property_instances(original_part, new_part):
"""
Map the id of the original part with the `Part` object of the newly created one.
Updated the singleton `mapping dictionary` with the new mapping table values.
:param original_part: `Part` object to be copied/moved
:type original_part: :class:`Part`
:param new_part: `Part` object copied/moved
:type new_part: :class:`Part`
:return: None
"""
# Map the original part with the new one
get_mapping_dictionary()[original_part.id] = new_part
# Do the same for each Property of original part instance, using the 'model' id and the get_mapping_dictionary
for prop_original in original_part.properties:
get_mapping_dictionary()[prop_original.id] = [prop_new for prop_new in new_part.properties if
get_mapping_dictionary()[prop_original._json_data['model']].id ==
prop_new._json_data['model']][0] | [
"def",
"map_property_instances",
"(",
"original_part",
",",
"new_part",
")",
":",
"# Map the original part with the new one",
"get_mapping_dictionary",
"(",
")",
"[",
"original_part",
".",
"id",
"]",
"=",
"new_part",
"# Do the same for each Property of original part instance, u... | Map the id of the original part with the `Part` object of the newly created one.
Updated the singleton `mapping dictionary` with the new mapping table values.
:param original_part: `Part` object to be copied/moved
:type original_part: :class:`Part`
:param new_part: `Part` object copied/moved
:type new_part: :class:`Part`
:return: None | [
"Map",
"the",
"id",
"of",
"the",
"original",
"part",
"with",
"the",
"Part",
"object",
"of",
"the",
"newly",
"created",
"one",
"."
] | python | train |
chaoss/grimoirelab-elk | grimoire_elk/raw/gitlab.py | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/raw/gitlab.py#L77-L91 | def get_arthur_params_from_url(cls, url):
""" Get the arthur params given a URL for the data source """
params = {}
args = cls.get_perceval_params_from_url(url)
parser = GitLabCommand.setup_cmd_parser()
parsed_args = parser.parse(*args)
params['owner'] = parsed_args.owner
params['repository'] = parsed_args.repository
# include only blacklist ids information
params['blacklist_ids'] = parsed_args.blacklist_ids
return params | [
"def",
"get_arthur_params_from_url",
"(",
"cls",
",",
"url",
")",
":",
"params",
"=",
"{",
"}",
"args",
"=",
"cls",
".",
"get_perceval_params_from_url",
"(",
"url",
")",
"parser",
"=",
"GitLabCommand",
".",
"setup_cmd_parser",
"(",
")",
"parsed_args",
"=",
"... | Get the arthur params given a URL for the data source | [
"Get",
"the",
"arthur",
"params",
"given",
"a",
"URL",
"for",
"the",
"data",
"source"
] | python | train |
PvtHaggard/pydarksky | pydarksky/darksky.py | https://github.com/PvtHaggard/pydarksky/blob/c2d68d311bf0a58125fbfe31eff124356899c75b/pydarksky/darksky.py#L88-L114 | def url(self):
# type:() -> str
"""
Build and returns a URL used to make a Dark Sky API call.
"""
url = "https://api.darksky.net/forecast/{key}/{lat},{lon}".format(key=self.api_key,
lat=self.latitude,
lon=self.longitude)
if isinstance(self._date, datetime):
url += ",{:%Y-%m-%dT%H:%M:%S}".format(self._date)
url += "?units={}".format(self.units)
if self.lang != "auto":
url += "&lang={}".format(self.lang)
if len(self._exclude) > 0:
url += "&exclude="
for e in self._exclude:
url += "{},".format(e)
url = url.strip(",")
if self._extend:
url += "&extend=hourly"
return url | [
"def",
"url",
"(",
"self",
")",
":",
"# type:() -> str",
"url",
"=",
"\"https://api.darksky.net/forecast/{key}/{lat},{lon}\"",
".",
"format",
"(",
"key",
"=",
"self",
".",
"api_key",
",",
"lat",
"=",
"self",
".",
"latitude",
",",
"lon",
"=",
"self",
".",
"lo... | Build and returns a URL used to make a Dark Sky API call. | [
"Build",
"and",
"returns",
"a",
"URL",
"used",
"to",
"make",
"a",
"Dark",
"Sky",
"API",
"call",
"."
] | python | train |
frasertweedale/ledgertools | ltlib/xn.py | https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/xn.py#L353-L355 | def process(self, rules, uio, prevxn=None):
"""Matches rules and applies outcomes"""
self.apply_outcomes(self.match_rules(rules), uio, prevxn=prevxn) | [
"def",
"process",
"(",
"self",
",",
"rules",
",",
"uio",
",",
"prevxn",
"=",
"None",
")",
":",
"self",
".",
"apply_outcomes",
"(",
"self",
".",
"match_rules",
"(",
"rules",
")",
",",
"uio",
",",
"prevxn",
"=",
"prevxn",
")"
] | Matches rules and applies outcomes | [
"Matches",
"rules",
"and",
"applies",
"outcomes"
] | python | train |
clalancette/pycdlib | pycdlib/rockridge.py | https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/rockridge.py#L183-L216 | def append_field(self, fieldname):
# type: (str) -> None
'''
Mark a field as present in the Rock Ridge records.
Parameters:
fieldname - The name of the field to mark as present; should be one
of 'PX', 'PN', 'SL', 'NM', 'CL', 'PL', 'RE', or 'TF'.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('RR record not yet initialized!')
if fieldname == 'PX':
bit = 0
elif fieldname == 'PN':
bit = 1
elif fieldname == 'SL':
bit = 2
elif fieldname == 'NM':
bit = 3
elif fieldname == 'CL':
bit = 4
elif fieldname == 'PL':
bit = 5
elif fieldname == 'RE':
bit = 6
elif fieldname == 'TF':
bit = 7
else:
raise pycdlibexception.PyCdlibInternalError('Unknown RR field name %s' % (fieldname))
self.rr_flags |= (1 << bit) | [
"def",
"append_field",
"(",
"self",
",",
"fieldname",
")",
":",
"# type: (str) -> None",
"if",
"not",
"self",
".",
"_initialized",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInternalError",
"(",
"'RR record not yet initialized!'",
")",
"if",
"fieldname",
"==",
"... | Mark a field as present in the Rock Ridge records.
Parameters:
fieldname - The name of the field to mark as present; should be one
of 'PX', 'PN', 'SL', 'NM', 'CL', 'PL', 'RE', or 'TF'.
Returns:
Nothing. | [
"Mark",
"a",
"field",
"as",
"present",
"in",
"the",
"Rock",
"Ridge",
"records",
"."
] | python | train |
chrislit/abydos | abydos/distance/_mra.py | https://github.com/chrislit/abydos/blob/165466b3ff6afd8024a4c8660421b0c4e7773db9/abydos/distance/_mra.py#L46-L111 | def dist_abs(self, src, tar):
"""Return the MRA comparison rating of two strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
int
MRA comparison rating
Examples
--------
>>> cmp = MRA()
>>> cmp.dist_abs('cat', 'hat')
5
>>> cmp.dist_abs('Niall', 'Neil')
6
>>> cmp.dist_abs('aluminum', 'Catalan')
0
>>> cmp.dist_abs('ATCG', 'TAGC')
5
"""
if src == tar:
return 6
if src == '' or tar == '':
return 0
src = list(mra(src))
tar = list(mra(tar))
if abs(len(src) - len(tar)) > 2:
return 0
length_sum = len(src) + len(tar)
if length_sum < 5:
min_rating = 5
elif length_sum < 8:
min_rating = 4
elif length_sum < 12:
min_rating = 3
else:
min_rating = 2
for _ in range(2):
new_src = []
new_tar = []
minlen = min(len(src), len(tar))
for i in range(minlen):
if src[i] != tar[i]:
new_src.append(src[i])
new_tar.append(tar[i])
src = new_src + src[minlen:]
tar = new_tar + tar[minlen:]
src.reverse()
tar.reverse()
similarity = 6 - max(len(src), len(tar))
if similarity >= min_rating:
return similarity
return 0 | [
"def",
"dist_abs",
"(",
"self",
",",
"src",
",",
"tar",
")",
":",
"if",
"src",
"==",
"tar",
":",
"return",
"6",
"if",
"src",
"==",
"''",
"or",
"tar",
"==",
"''",
":",
"return",
"0",
"src",
"=",
"list",
"(",
"mra",
"(",
"src",
")",
")",
"tar",... | Return the MRA comparison rating of two strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
int
MRA comparison rating
Examples
--------
>>> cmp = MRA()
>>> cmp.dist_abs('cat', 'hat')
5
>>> cmp.dist_abs('Niall', 'Neil')
6
>>> cmp.dist_abs('aluminum', 'Catalan')
0
>>> cmp.dist_abs('ATCG', 'TAGC')
5 | [
"Return",
"the",
"MRA",
"comparison",
"rating",
"of",
"two",
"strings",
"."
] | python | valid |
silver-castle/mach9 | mach9/http.py | https://github.com/silver-castle/mach9/blob/7a623aab3c70d89d36ade6901b6307e115400c5e/mach9/http.py#L191-L200 | def get_request_body_chunk(self, content: bytes, closed: bool,
more_content: bool) -> Dict[str, Any]:
'''
http://channels.readthedocs.io/en/stable/asgi/www.html#request-body-chunk
'''
return {
'content': content,
'closed': closed,
'more_content': more_content
} | [
"def",
"get_request_body_chunk",
"(",
"self",
",",
"content",
":",
"bytes",
",",
"closed",
":",
"bool",
",",
"more_content",
":",
"bool",
")",
"->",
"Dict",
"[",
"str",
",",
"Any",
"]",
":",
"return",
"{",
"'content'",
":",
"content",
",",
"'closed'",
... | http://channels.readthedocs.io/en/stable/asgi/www.html#request-body-chunk | [
"http",
":",
"//",
"channels",
".",
"readthedocs",
".",
"io",
"/",
"en",
"/",
"stable",
"/",
"asgi",
"/",
"www",
".",
"html#request",
"-",
"body",
"-",
"chunk"
] | python | train |
textX/textX | textx/lang.py | https://github.com/textX/textX/blob/5796ac38116ad86584392dbecdbf923ede746361/textx/lang.py#L885-L945 | def language_from_str(language_def, metamodel):
"""
Constructs parser and initializes metamodel from language description
given in textX language.
Args:
language_def (str): A language description in textX.
metamodel (TextXMetaModel): A metamodel to initialize.
Returns:
Parser for the new language.
"""
if type(language_def) is not text:
raise TextXError("textX accepts only unicode strings.")
if metamodel.debug:
metamodel.dprint("*** PARSING LANGUAGE DEFINITION ***")
# Check the cache for already conctructed textX parser
if metamodel.debug in textX_parsers:
parser = textX_parsers[metamodel.debug]
else:
# Create parser for TextX grammars using
# the arpeggio grammar specified in this module
parser = ParserPython(textx_model, comment_def=comment,
ignore_case=False,
reduce_tree=False,
memoization=metamodel.memoization,
debug=metamodel.debug,
file=metamodel.file)
# Cache it for subsequent calls
textX_parsers[metamodel.debug] = parser
# Parse language description with textX parser
try:
parse_tree = parser.parse(language_def)
except NoMatch as e:
line, col = parser.pos_to_linecol(e.position)
raise TextXSyntaxError(text(e), line, col)
# Construct new parser and meta-model based on the given language
# description.
lang_parser = visit_parse_tree(parse_tree,
TextXVisitor(parser, metamodel))
# Meta-model is constructed. Validate its semantics.
metamodel.validate()
# Here we connect meta-model and language parser for convenience.
lang_parser.metamodel = metamodel
metamodel._parser_blueprint = lang_parser
if metamodel.debug:
# Create dot file for debuging purposes
PMDOTExporter().exportFile(
lang_parser.parser_model,
"{}_parser_model.dot".format(metamodel.rootcls.__name__))
return lang_parser | [
"def",
"language_from_str",
"(",
"language_def",
",",
"metamodel",
")",
":",
"if",
"type",
"(",
"language_def",
")",
"is",
"not",
"text",
":",
"raise",
"TextXError",
"(",
"\"textX accepts only unicode strings.\"",
")",
"if",
"metamodel",
".",
"debug",
":",
"meta... | Constructs parser and initializes metamodel from language description
given in textX language.
Args:
language_def (str): A language description in textX.
metamodel (TextXMetaModel): A metamodel to initialize.
Returns:
Parser for the new language. | [
"Constructs",
"parser",
"and",
"initializes",
"metamodel",
"from",
"language",
"description",
"given",
"in",
"textX",
"language",
"."
] | python | train |
materialsvirtuallab/monty | monty/serialization.py | https://github.com/materialsvirtuallab/monty/blob/d99d6f3c68372d83489d28ff515566c93cd569e2/monty/serialization.py#L47-L83 | def loadfn(fn, *args, **kwargs):
"""
Loads json/yaml/msgpack directly from a filename instead of a
File-like object. For YAML, ruamel.yaml must be installed. The file type is
automatically detected. YAML is assumed if the filename contains "yaml"
(lower or upper case). Otherwise, json is always assumed.
Args:
fn (str/Path): filename or pathlib.Path.
\*args: Any of the args supported by json/yaml.load.
\*\*kwargs: Any of the kwargs supported by json/yaml.load.
Returns:
(object) Result of json/yaml/msgpack.load.
"""
if "mpk" in os.path.basename(fn).lower():
if msgpack is None:
raise RuntimeError(
"Loading of message pack files is not "
"possible as msgpack-python is not installed.")
if "object_hook" not in kwargs:
kwargs["object_hook"] = object_hook
with zopen(fn, "rb") as fp:
return msgpack.load(fp, *args, **kwargs)
else:
with zopen(fn) as fp:
if "yaml" in os.path.basename(fn).lower():
if yaml is None:
raise RuntimeError("Loading of YAML files is not "
"possible as ruamel.yaml is not installed.")
if "Loader" not in kwargs:
kwargs["Loader"] = Loader
return yaml.load(fp, *args, **kwargs)
else:
if "cls" not in kwargs:
kwargs["cls"] = MontyDecoder
return json.load(fp, *args, **kwargs) | [
"def",
"loadfn",
"(",
"fn",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"\"mpk\"",
"in",
"os",
".",
"path",
".",
"basename",
"(",
"fn",
")",
".",
"lower",
"(",
")",
":",
"if",
"msgpack",
"is",
"None",
":",
"raise",
"RuntimeError",
... | Loads json/yaml/msgpack directly from a filename instead of a
File-like object. For YAML, ruamel.yaml must be installed. The file type is
automatically detected. YAML is assumed if the filename contains "yaml"
(lower or upper case). Otherwise, json is always assumed.
Args:
fn (str/Path): filename or pathlib.Path.
\*args: Any of the args supported by json/yaml.load.
\*\*kwargs: Any of the kwargs supported by json/yaml.load.
Returns:
(object) Result of json/yaml/msgpack.load. | [
"Loads",
"json",
"/",
"yaml",
"/",
"msgpack",
"directly",
"from",
"a",
"filename",
"instead",
"of",
"a",
"File",
"-",
"like",
"object",
".",
"For",
"YAML",
"ruamel",
".",
"yaml",
"must",
"be",
"installed",
".",
"The",
"file",
"type",
"is",
"automatically... | python | train |
peerplays-network/python-peerplays | peerplays/peerplays.py | https://github.com/peerplays-network/python-peerplays/blob/188f04238e7e21d5f73e9b01099eea44289ef6b7/peerplays/peerplays.py#L1212-L1280 | def betting_market_group_create(
self,
description,
event_id="0.0.0",
rules_id="0.0.0",
asset=None,
delay_before_settling=0,
never_in_play=False,
resolution_constraint="exactly_one_winner",
account=None,
**kwargs
):
""" Create an betting market. This needs to be **proposed**.
:param list description: Internationalized list of descriptions
:param str event_id: Event ID to create this for (defaults to
*relative* id ``0.0.0``)
:param str rule_id: Rule ID to create this with (defaults to
*relative* id ``0.0.0``)
:param peerplays.asset.Asset asset: Asset to be used for this
market
:param int delay_before_settling: Delay in seconds before settling
(defaults to 0 seconds - immediatelly)
:param bool never_in_play: Set this market group as *never in play*
(defaults to *False*)
:param str account: (optional) the account to allow access
to (defaults to ``default_account``)
"""
if not asset:
asset = self.rpc.chain_params["core_symbol"]
if not account:
if "default_account" in self.config:
account = self.config["default_account"]
if not account:
raise ValueError("You need to provide an account")
account = Account(account, blockchain_instance=self)
asset = Asset(asset, blockchain_instance=self)
if event_id[0] == "1":
# Test if object exists
Event(event_id)
else:
# Test if object is proposed
test_proposal_in_buffer(
kwargs.get("append_to", self.propbuffer), "event_create", event_id
)
if rules_id[0] == "1":
# Test if object exists
Rule(rules_id)
else:
# Test if object is proposed
test_proposal_in_buffer(
kwargs.get("append_to", self.propbuffer),
"betting_market_rules_create",
rules_id,
)
op = operations.Betting_market_group_create(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"description": description,
"event_id": event_id,
"rules_id": rules_id,
"asset_id": asset["id"],
"never_in_play": bool(never_in_play),
"delay_before_settling": int(delay_before_settling),
"resolution_constraint": resolution_constraint,
"prefix": self.prefix,
}
)
return self.finalizeOp(op, account["name"], "active", **kwargs) | [
"def",
"betting_market_group_create",
"(",
"self",
",",
"description",
",",
"event_id",
"=",
"\"0.0.0\"",
",",
"rules_id",
"=",
"\"0.0.0\"",
",",
"asset",
"=",
"None",
",",
"delay_before_settling",
"=",
"0",
",",
"never_in_play",
"=",
"False",
",",
"resolution_c... | Create an betting market. This needs to be **proposed**.
:param list description: Internationalized list of descriptions
:param str event_id: Event ID to create this for (defaults to
*relative* id ``0.0.0``)
:param str rule_id: Rule ID to create this with (defaults to
*relative* id ``0.0.0``)
:param peerplays.asset.Asset asset: Asset to be used for this
market
:param int delay_before_settling: Delay in seconds before settling
(defaults to 0 seconds - immediatelly)
:param bool never_in_play: Set this market group as *never in play*
(defaults to *False*)
:param str account: (optional) the account to allow access
to (defaults to ``default_account``) | [
"Create",
"an",
"betting",
"market",
".",
"This",
"needs",
"to",
"be",
"**",
"proposed",
"**",
"."
] | python | train |
watson-developer-cloud/python-sdk | ibm_watson/natural_language_understanding_v1.py | https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/natural_language_understanding_v1.py#L3270-L3277 | def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'emotion') and self.emotion is not None:
_dict['emotion'] = self.emotion._to_dict()
return _dict | [
"def",
"_to_dict",
"(",
"self",
")",
":",
"_dict",
"=",
"{",
"}",
"if",
"hasattr",
"(",
"self",
",",
"'text'",
")",
"and",
"self",
".",
"text",
"is",
"not",
"None",
":",
"_dict",
"[",
"'text'",
"]",
"=",
"self",
".",
"text",
"if",
"hasattr",
"(",... | Return a json dictionary representing this model. | [
"Return",
"a",
"json",
"dictionary",
"representing",
"this",
"model",
"."
] | python | train |
UCSBarchlab/PyRTL | pyrtl/conditional.py | https://github.com/UCSBarchlab/PyRTL/blob/0988e5c9c10ededd5e1f58d5306603f9edf4b3e2/pyrtl/conditional.py#L215-L262 | def _current_select():
""" Function to calculate the current "predicate" in the current context.
Returns a tuple of information: (predicate, pred_set).
The value pred_set is a set([ (predicate, bool), ... ]) as described in
the _reset_conditional_state
"""
# helper to create the conjuction of predicates
def and_with_possible_none(a, b):
assert(a is not None or b is not None)
if a is None:
return b
if b is None:
return a
return a & b
def between_otherwise_and_current(predlist):
lastother = None
for i, p in enumerate(predlist[:-1]):
if p is otherwise:
lastother = i
if lastother is None:
return predlist[:-1]
else:
return predlist[lastother+1:-1]
select = None
pred_set = set()
# for all conditions except the current children (which should be [])
for predlist in _conditions_list_stack[:-1]:
# negate all of the predicates between "otherwise" and the current one
for predicate in between_otherwise_and_current(predlist):
select = and_with_possible_none(select, ~predicate)
pred_set.add((predicate, True))
# include the predicate for the current one (not negated)
if predlist[-1] is not otherwise:
predicate = predlist[-1]
select = and_with_possible_none(select, predicate)
pred_set.add((predicate, False))
if select is None:
raise PyrtlError('problem with conditional assignment')
if len(select) != 1:
raise PyrtlInternalError('conditional predicate with length greater than 1')
return select, pred_set | [
"def",
"_current_select",
"(",
")",
":",
"# helper to create the conjuction of predicates",
"def",
"and_with_possible_none",
"(",
"a",
",",
"b",
")",
":",
"assert",
"(",
"a",
"is",
"not",
"None",
"or",
"b",
"is",
"not",
"None",
")",
"if",
"a",
"is",
"None",
... | Function to calculate the current "predicate" in the current context.
Returns a tuple of information: (predicate, pred_set).
The value pred_set is a set([ (predicate, bool), ... ]) as described in
the _reset_conditional_state | [
"Function",
"to",
"calculate",
"the",
"current",
"predicate",
"in",
"the",
"current",
"context",
"."
] | python | train |
i3visio/osrframework | osrframework/thirdparties/pipl_com/lib/containers.py | https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/thirdparties/pipl_com/lib/containers.py#L248-L254 | def from_dict(d):
"""Transform the dict to a person object and return the person."""
query_params_match = d.get('@query_params_match')
sources = [Source.from_dict(source) for source in d.get('sources', [])]
fields = Person.fields_from_dict(d)
return Person(fields=fields, sources=sources,
query_params_match=query_params_match) | [
"def",
"from_dict",
"(",
"d",
")",
":",
"query_params_match",
"=",
"d",
".",
"get",
"(",
"'@query_params_match'",
")",
"sources",
"=",
"[",
"Source",
".",
"from_dict",
"(",
"source",
")",
"for",
"source",
"in",
"d",
".",
"get",
"(",
"'sources'",
",",
"... | Transform the dict to a person object and return the person. | [
"Transform",
"the",
"dict",
"to",
"a",
"person",
"object",
"and",
"return",
"the",
"person",
"."
] | python | train |
blockstack/virtualchain | virtualchain/lib/indexer.py | https://github.com/blockstack/virtualchain/blob/fcfc970064ca7dfcab26ebd3ab955870a763ea39/virtualchain/lib/indexer.py#L1748-L1786 | def state_engine_replay_block(existing_state_engine, new_state_engine, block_height, expected_snapshots={}):
"""
Extract the existing chain state transactions from the existing state engine at a particular block height,
parse them using the new state engine, and process them using the new state engine.
Returns the consensus hash of the block on success.
"""
assert new_state_engine.lastblock + 1 == block_height, 'Block height mismatch: {} + 1 != {}'.format(new_state_engine.lastblock, block_height)
db_con = StateEngine.db_open(existing_state_engine.impl, existing_state_engine.working_dir)
chainstate_block = existing_state_engine.db_chainstate_get_block(db_con, block_height)
db_con.close()
log.debug("{} transactions accepted at block {} in chainstate {}; replaying in {}".format(len(chainstate_block), block_height, existing_state_engine.working_dir, new_state_engine.working_dir))
parsed_txs = dict([(txdata['txid'], transactions.tx_parse(txdata['tx_hex'], blockchain=existing_state_engine.impl.get_blockchain())) for txdata in chainstate_block])
txs = [
{
'txid': txdata['txid'],
'txindex': txdata['txindex'],
'nulldata': '{}{}{}'.format(existing_state_engine.impl.get_magic_bytes().encode('hex'), txdata['opcode'].encode('hex'), txdata['data_hex']),
'ins': parsed_txs[txdata['txid']]['ins'],
'outs': parsed_txs[txdata['txid']]['outs'],
'senders': txdata['senders'],
'fee': txdata['fee'],
'hex': txdata['tx_hex'],
'tx_merkle_path': txdata['tx_merkle_path'],
}
for txdata in chainstate_block]
new_state_engine.db_set_indexing(True, new_state_engine.impl, new_state_engine.working_dir)
ops = new_state_engine.parse_block(block_height, txs)
consensus_hash = new_state_engine.process_block(block_height, ops, expected_snapshots=expected_snapshots)
new_state_engine.db_set_indexing(False, new_state_engine.impl, new_state_engine.working_dir)
return consensus_hash | [
"def",
"state_engine_replay_block",
"(",
"existing_state_engine",
",",
"new_state_engine",
",",
"block_height",
",",
"expected_snapshots",
"=",
"{",
"}",
")",
":",
"assert",
"new_state_engine",
".",
"lastblock",
"+",
"1",
"==",
"block_height",
",",
"'Block height mism... | Extract the existing chain state transactions from the existing state engine at a particular block height,
parse them using the new state engine, and process them using the new state engine.
Returns the consensus hash of the block on success. | [
"Extract",
"the",
"existing",
"chain",
"state",
"transactions",
"from",
"the",
"existing",
"state",
"engine",
"at",
"a",
"particular",
"block",
"height",
"parse",
"them",
"using",
"the",
"new",
"state",
"engine",
"and",
"process",
"them",
"using",
"the",
"new"... | python | train |
sorgerlab/indra | indra/assemblers/cyjs/assembler.py | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/cyjs/assembler.py#L366-L396 | def _get_node_groups(self):
"""Return a list of node id lists that are topologically identical.
First construct a node_dict which is keyed to the node id and
has a value which is a dict with keys 'sources' and 'targets'.
The 'sources' and 'targets' each contain a list of tuples
(i, polarity, source) edge of the node. node_dict is then processed
by _get_node_key() which returns a tuple of (s,t) where s,t are
sorted tuples of the ids for the source and target nodes. (s,t) is
then used as a key in node_key_dict where the values are the node
ids. node_groups is restricted to groups greater than 1 node.
"""
node_dict = {node['data']['id']: {'sources': [], 'targets': []}
for node in self._nodes}
for edge in self._edges:
# Add edge as a source for its target node
edge_data = (edge['data']['i'], edge['data']['polarity'],
edge['data']['source'])
node_dict[edge['data']['target']]['sources'].append(edge_data)
# Add edge as target for its source node
edge_data = (edge['data']['i'], edge['data']['polarity'],
edge['data']['target'])
node_dict[edge['data']['source']]['targets'].append(edge_data)
# Make a dictionary of nodes based on source/target as a key
node_key_dict = collections.defaultdict(lambda: [])
for node_id, node_d in node_dict.items():
key = self._get_node_key(node_d)
node_key_dict[key].append(node_id)
# Constrain the groups to ones that have more than 1 member
node_groups = [g for g in node_key_dict.values() if (len(g) > 1)]
return node_groups | [
"def",
"_get_node_groups",
"(",
"self",
")",
":",
"node_dict",
"=",
"{",
"node",
"[",
"'data'",
"]",
"[",
"'id'",
"]",
":",
"{",
"'sources'",
":",
"[",
"]",
",",
"'targets'",
":",
"[",
"]",
"}",
"for",
"node",
"in",
"self",
".",
"_nodes",
"}",
"f... | Return a list of node id lists that are topologically identical.
First construct a node_dict which is keyed to the node id and
has a value which is a dict with keys 'sources' and 'targets'.
The 'sources' and 'targets' each contain a list of tuples
(i, polarity, source) edge of the node. node_dict is then processed
by _get_node_key() which returns a tuple of (s,t) where s,t are
sorted tuples of the ids for the source and target nodes. (s,t) is
then used as a key in node_key_dict where the values are the node
ids. node_groups is restricted to groups greater than 1 node. | [
"Return",
"a",
"list",
"of",
"node",
"id",
"lists",
"that",
"are",
"topologically",
"identical",
"."
] | python | train |
BlueBrain/NeuroM | neurom/fst/_neuronfunc.py | https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuronfunc.py#L180-L206 | def sholl_crossings(neurites, center, radii):
'''calculate crossings of neurites
Args:
nrn(morph): morphology on which to perform Sholl analysis
radii(iterable of floats): radii for which crossings will be counted
Returns:
Array of same length as radii, with a count of the number of crossings
for the respective radius
'''
def _count_crossings(neurite, radius):
'''count_crossings of segments in neurite with radius'''
r2 = radius ** 2
count = 0
for start, end in iter_segments(neurite):
start_dist2, end_dist2 = (morphmath.point_dist2(center, start),
morphmath.point_dist2(center, end))
count += int(start_dist2 <= r2 <= end_dist2 or
end_dist2 <= r2 <= start_dist2)
return count
return np.array([sum(_count_crossings(neurite, r)
for neurite in iter_neurites(neurites))
for r in radii]) | [
"def",
"sholl_crossings",
"(",
"neurites",
",",
"center",
",",
"radii",
")",
":",
"def",
"_count_crossings",
"(",
"neurite",
",",
"radius",
")",
":",
"'''count_crossings of segments in neurite with radius'''",
"r2",
"=",
"radius",
"**",
"2",
"count",
"=",
"0",
"... | calculate crossings of neurites
Args:
nrn(morph): morphology on which to perform Sholl analysis
radii(iterable of floats): radii for which crossings will be counted
Returns:
Array of same length as radii, with a count of the number of crossings
for the respective radius | [
"calculate",
"crossings",
"of",
"neurites"
] | python | train |
gunthercox/ChatterBot | chatterbot/logic/unit_conversion.py | https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/logic/unit_conversion.py#L85-L104 | def get_valid_units(self, ureg, from_unit, target_unit):
"""
Returns the firt match `pint.unit.Unit` object for from_unit and
target_unit strings from a possible variation of metric unit names
supported by pint library.
:param ureg: unit registry which units are defined and handled
:type ureg: `pint.registry.UnitRegistry`
:param from_unit: source metric unit
:type from_unit: str
:param from_unit: target metric unit
:type from_unit: str
"""
from_unit_variations = [from_unit.lower(), from_unit.upper()]
target_unit_variations = [target_unit.lower(), target_unit.upper()]
from_unit = self.get_unit(ureg, from_unit_variations)
target_unit = self.get_unit(ureg, target_unit_variations)
return from_unit, target_unit | [
"def",
"get_valid_units",
"(",
"self",
",",
"ureg",
",",
"from_unit",
",",
"target_unit",
")",
":",
"from_unit_variations",
"=",
"[",
"from_unit",
".",
"lower",
"(",
")",
",",
"from_unit",
".",
"upper",
"(",
")",
"]",
"target_unit_variations",
"=",
"[",
"t... | Returns the firt match `pint.unit.Unit` object for from_unit and
target_unit strings from a possible variation of metric unit names
supported by pint library.
:param ureg: unit registry which units are defined and handled
:type ureg: `pint.registry.UnitRegistry`
:param from_unit: source metric unit
:type from_unit: str
:param from_unit: target metric unit
:type from_unit: str | [
"Returns",
"the",
"firt",
"match",
"pint",
".",
"unit",
".",
"Unit",
"object",
"for",
"from_unit",
"and",
"target_unit",
"strings",
"from",
"a",
"possible",
"variation",
"of",
"metric",
"unit",
"names",
"supported",
"by",
"pint",
"library",
"."
] | python | train |
pantsbuild/pants | src/python/pants/backend/jvm/tasks/junit_run.py | https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/backend/jvm/tasks/junit_run.py#L413-L431 | def _parse(self, test_spec_str):
"""Parses a test specification string into an object that can yield corresponding tests.
Tests can be specified in one of four forms:
* [classname]
* [classname]#[methodname]
* [fully qualified classname]#[methodname]
* [fully qualified classname]#[methodname]
:param string test_spec: A test specification.
:returns: A Test object.
:rtype: :class:`Test`
"""
components = test_spec_str.split('#', 2)
classname = components[0]
methodname = components[1] if len(components) == 2 else None
return Test(classname=classname, methodname=methodname) | [
"def",
"_parse",
"(",
"self",
",",
"test_spec_str",
")",
":",
"components",
"=",
"test_spec_str",
".",
"split",
"(",
"'#'",
",",
"2",
")",
"classname",
"=",
"components",
"[",
"0",
"]",
"methodname",
"=",
"components",
"[",
"1",
"]",
"if",
"len",
"(",
... | Parses a test specification string into an object that can yield corresponding tests.
Tests can be specified in one of four forms:
* [classname]
* [classname]#[methodname]
* [fully qualified classname]#[methodname]
* [fully qualified classname]#[methodname]
:param string test_spec: A test specification.
:returns: A Test object.
:rtype: :class:`Test` | [
"Parses",
"a",
"test",
"specification",
"string",
"into",
"an",
"object",
"that",
"can",
"yield",
"corresponding",
"tests",
"."
] | python | train |
ic-labs/django-icekit | glamkit_collections/contrib/work_creator/models.py | https://github.com/ic-labs/django-icekit/blob/c507ea5b1864303732c53ad7c5800571fca5fa94/glamkit_collections/contrib/work_creator/models.py#L212-L233 | def derive_and_set_name_fields_and_slug(
self, set_name_sort=True, set_slug=True
):
"""
Derive subordinate name_* field values from the `name_full` field
unless these fields are set in their own right.
This method is called during `save()`
"""
# name_full is the primary required name field. It must be set.
if is_empty(self.name_full):
if not is_empty(self.name_display):
self.name_full = self.name_display
else:
raise ValueError(
u"%s.name_full cannot be empty at save" % type(self).__name__)
# if empty, `name_sort` == `name_full`
if set_name_sort and is_empty(self.name_sort):
self.name_sort = self.derive_sort_name()
# if empty, `slug` is set to slugified `name_full`
if set_slug and is_empty(self.slug):
self.slug = slugify(self.name_display or self.name_full) | [
"def",
"derive_and_set_name_fields_and_slug",
"(",
"self",
",",
"set_name_sort",
"=",
"True",
",",
"set_slug",
"=",
"True",
")",
":",
"# name_full is the primary required name field. It must be set.",
"if",
"is_empty",
"(",
"self",
".",
"name_full",
")",
":",
"if",
"n... | Derive subordinate name_* field values from the `name_full` field
unless these fields are set in their own right.
This method is called during `save()` | [
"Derive",
"subordinate",
"name_",
"*",
"field",
"values",
"from",
"the",
"name_full",
"field",
"unless",
"these",
"fields",
"are",
"set",
"in",
"their",
"own",
"right",
"."
] | python | train |
simion/pip-upgrader | pip_upgrader/cli.py | https://github.com/simion/pip-upgrader/blob/716adca65d9ed56d4d416f94ede8a8e4fa8d640a/pip_upgrader/cli.py#L47-L84 | def main():
""" Main CLI entrypoint. """
options = get_options()
Windows.enable(auto_colors=True, reset_atexit=True)
try:
# maybe check if virtualenv is not activated
check_for_virtualenv(options)
# 1. detect requirements files
filenames = RequirementsDetector(options.get('<requirements_file>')).get_filenames()
if filenames:
print(Color('{{autoyellow}}Found valid requirements file(s):{{/autoyellow}} '
'{{autocyan}}\n{}{{/autocyan}}'.format('\n'.join(filenames))))
else: # pragma: nocover
print(Color('{autoyellow}No requirements files found in current directory. CD into your project '
'or manually specify requirements files as arguments.{/autoyellow}'))
return
# 2. detect all packages inside requirements
packages = PackagesDetector(filenames).get_packages()
# 3. query pypi API, see which package has a newer version vs the one in requirements (or current env)
packages_status_map = PackagesStatusDetector(
packages, options.get('--use-default-index')).detect_available_upgrades(options)
# 4. [optionally], show interactive screen when user can choose which packages to upgrade
selected_packages = PackageInteractiveSelector(packages_status_map, options).get_packages()
# 5. having the list of packages, do the actual upgrade and replace the version inside all filenames
upgraded_packages = PackagesUpgrader(selected_packages, filenames, options).do_upgrade()
print(Color('{{autogreen}}Successfully upgraded (and updated requirements) for the following packages: '
'{}{{/autogreen}}'.format(','.join([package['name'] for package in upgraded_packages]))))
if options['--dry-run']:
print(Color('{automagenta}Actually, no, because this was a simulation using --dry-run{/automagenta}'))
except KeyboardInterrupt: # pragma: nocover
print(Color('\n{autored}Upgrade interrupted.{/autored}')) | [
"def",
"main",
"(",
")",
":",
"options",
"=",
"get_options",
"(",
")",
"Windows",
".",
"enable",
"(",
"auto_colors",
"=",
"True",
",",
"reset_atexit",
"=",
"True",
")",
"try",
":",
"# maybe check if virtualenv is not activated",
"check_for_virtualenv",
"(",
"opt... | Main CLI entrypoint. | [
"Main",
"CLI",
"entrypoint",
"."
] | python | test |
ellmetha/django-machina | machina/core/loading.py | https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/core/loading.py#L101-L109 | def _get_app_module_path(module_label):
""" Given a module label, loop over the apps specified in the INSTALLED_APPS to find the
corresponding application module path.
"""
app_name = module_label.rsplit('.', 1)[0]
for app in settings.INSTALLED_APPS:
if app.endswith('.' + app_name) or app == app_name:
return app
return None | [
"def",
"_get_app_module_path",
"(",
"module_label",
")",
":",
"app_name",
"=",
"module_label",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"[",
"0",
"]",
"for",
"app",
"in",
"settings",
".",
"INSTALLED_APPS",
":",
"if",
"app",
".",
"endswith",
"(",
"'.'",
... | Given a module label, loop over the apps specified in the INSTALLED_APPS to find the
corresponding application module path. | [
"Given",
"a",
"module",
"label",
"loop",
"over",
"the",
"apps",
"specified",
"in",
"the",
"INSTALLED_APPS",
"to",
"find",
"the",
"corresponding",
"application",
"module",
"path",
"."
] | python | train |
MonashBI/arcana | arcana/data/spec.py | https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/data/spec.py#L73-L96 | def bind(self, study, **kwargs): # @UnusedVariable
"""
Returns a copy of the AcquiredSpec bound to the given study
Parameters
----------
study : Study
A study to bind the fileset spec to (should happen in the
study __init__)
"""
if self.default is None:
raise ArcanaError(
"Attempted to bind '{}' to {} but only acquired specs with "
"a default value should be bound to studies{})".format(
self.name, study))
if self._study is not None:
# This avoids rebinding specs to sub-studies that have already
# been bound to the multi-study
bound = self
else:
bound = copy(self)
bound._study = study
bound._default = bound.default.bind(study)
return bound | [
"def",
"bind",
"(",
"self",
",",
"study",
",",
"*",
"*",
"kwargs",
")",
":",
"# @UnusedVariable",
"if",
"self",
".",
"default",
"is",
"None",
":",
"raise",
"ArcanaError",
"(",
"\"Attempted to bind '{}' to {} but only acquired specs with \"",
"\"a default value should ... | Returns a copy of the AcquiredSpec bound to the given study
Parameters
----------
study : Study
A study to bind the fileset spec to (should happen in the
study __init__) | [
"Returns",
"a",
"copy",
"of",
"the",
"AcquiredSpec",
"bound",
"to",
"the",
"given",
"study"
] | python | train |
Clinical-Genomics/housekeeper | housekeeper/cli/init.py | https://github.com/Clinical-Genomics/housekeeper/blob/a7d10d327dc9f06274bdef5504ed1b9413f2c8c1/housekeeper/cli/init.py#L11-L26 | def init(context, reset, force):
"""Setup the database."""
store = Store(context.obj['database'], context.obj['root'])
existing_tables = store.engine.table_names()
if force or reset:
if existing_tables and not force:
message = f"Delete existing tables? [{', '.join(existing_tables)}]"
click.confirm(click.style(message, fg='yellow'), abort=True)
store.drop_all()
elif existing_tables:
click.echo(click.style("Database already exists, use '--reset'", fg='red'))
context.abort()
store.create_all()
message = f"Success! New tables: {', '.join(store.engine.table_names())}"
click.echo(click.style(message, fg='green')) | [
"def",
"init",
"(",
"context",
",",
"reset",
",",
"force",
")",
":",
"store",
"=",
"Store",
"(",
"context",
".",
"obj",
"[",
"'database'",
"]",
",",
"context",
".",
"obj",
"[",
"'root'",
"]",
")",
"existing_tables",
"=",
"store",
".",
"engine",
".",
... | Setup the database. | [
"Setup",
"the",
"database",
"."
] | python | train |
boundlessgeo/gsconfig | src/geoserver/catalog.py | https://github.com/boundlessgeo/gsconfig/blob/532f561f32b91ea8debea0573c503dd20988bf40/src/geoserver/catalog.py#L194-L225 | def delete(self, config_object, purge=None, recurse=False):
"""
send a delete request
XXX [more here]
"""
rest_url = config_object.href
params = []
# purge deletes the SLD from disk when a style is deleted
if purge:
params.append("purge=" + str(purge))
# recurse deletes the resource when a layer is deleted.
if recurse:
params.append("recurse=true")
if params:
rest_url = rest_url + "?" + "&".join(params)
headers = {
"Content-type": "application/xml",
"Accept": "application/xml"
}
resp = self.http_request(rest_url, method='delete', headers=headers)
if resp.status_code != 200:
raise FailedRequestError('Failed to make DELETE request: {}, {}'.format(resp.status_code, resp.text))
self._cache.clear()
# do we really need to return anything other than None?
return (resp) | [
"def",
"delete",
"(",
"self",
",",
"config_object",
",",
"purge",
"=",
"None",
",",
"recurse",
"=",
"False",
")",
":",
"rest_url",
"=",
"config_object",
".",
"href",
"params",
"=",
"[",
"]",
"# purge deletes the SLD from disk when a style is deleted",
"if",
"pur... | send a delete request
XXX [more here] | [
"send",
"a",
"delete",
"request",
"XXX",
"[",
"more",
"here",
"]"
] | python | valid |
rbarrois/xworkflows | src/xworkflows/base.py | https://github.com/rbarrois/xworkflows/blob/4a94b04ba83cb43f61d4b0f7db6964a667c86b5b/src/xworkflows/base.py#L664-L674 | def load_parent_implems(self, parent_implems):
"""Import previously defined implementations.
Args:
parent_implems (ImplementationList): List of implementations defined
in a parent class.
"""
for trname, attr, implem in parent_implems.get_custom_implementations():
self.implementations[trname] = implem.copy()
self.transitions_at[trname] = attr
self.custom_implems.add(trname) | [
"def",
"load_parent_implems",
"(",
"self",
",",
"parent_implems",
")",
":",
"for",
"trname",
",",
"attr",
",",
"implem",
"in",
"parent_implems",
".",
"get_custom_implementations",
"(",
")",
":",
"self",
".",
"implementations",
"[",
"trname",
"]",
"=",
"implem"... | Import previously defined implementations.
Args:
parent_implems (ImplementationList): List of implementations defined
in a parent class. | [
"Import",
"previously",
"defined",
"implementations",
"."
] | python | train |
kronenthaler/mod-pbxproj | pbxproj/pbxextensions/ProjectFlags.py | https://github.com/kronenthaler/mod-pbxproj/blob/8de3cbdd3210480ddbb1fa0f50a4f4ea87de6e71/pbxproj/pbxextensions/ProjectFlags.py#L49-L57 | def add_other_cflags(self, flags, target_name=None, configuration_name=None):
"""
Adds flag values to the OTHER_CFLAGS flag.
:param flags: A string or array of strings. If none, removes all values from the flag.
:param target_name: Target name or list of target names to add the flag to or None for every target
:param configuration_name: Configuration name to add the flag to or None for every configuration
:return: void
"""
self.add_flags(XCBuildConfigurationFlags.OTHER_CFLAGS, flags, target_name, configuration_name) | [
"def",
"add_other_cflags",
"(",
"self",
",",
"flags",
",",
"target_name",
"=",
"None",
",",
"configuration_name",
"=",
"None",
")",
":",
"self",
".",
"add_flags",
"(",
"XCBuildConfigurationFlags",
".",
"OTHER_CFLAGS",
",",
"flags",
",",
"target_name",
",",
"co... | Adds flag values to the OTHER_CFLAGS flag.
:param flags: A string or array of strings. If none, removes all values from the flag.
:param target_name: Target name or list of target names to add the flag to or None for every target
:param configuration_name: Configuration name to add the flag to or None for every configuration
:return: void | [
"Adds",
"flag",
"values",
"to",
"the",
"OTHER_CFLAGS",
"flag",
".",
":",
"param",
"flags",
":",
"A",
"string",
"or",
"array",
"of",
"strings",
".",
"If",
"none",
"removes",
"all",
"values",
"from",
"the",
"flag",
".",
":",
"param",
"target_name",
":",
... | python | train |
RedHatQE/Sentaku | examples/todo_example/ux.py | https://github.com/RedHatQE/Sentaku/blob/b336cef5b6ee2db4e8dff28dcdb2be35a1f3d01c/examples/todo_example/ux.py#L52-L58 | def create_item(self, name):
"""
create a new todo list item
"""
elem = self.controlled_list.create_item(name)
if elem:
return TodoElementUX(parent=self, controlled_element=elem) | [
"def",
"create_item",
"(",
"self",
",",
"name",
")",
":",
"elem",
"=",
"self",
".",
"controlled_list",
".",
"create_item",
"(",
"name",
")",
"if",
"elem",
":",
"return",
"TodoElementUX",
"(",
"parent",
"=",
"self",
",",
"controlled_element",
"=",
"elem",
... | create a new todo list item | [
"create",
"a",
"new",
"todo",
"list",
"item"
] | python | train |
googledatalab/pydatalab | solutionbox/image_classification/mltoolbox/image/classification/_cloud.py | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_cloud.py#L40-L96 | def preprocess(train_dataset, output_dir, eval_dataset, checkpoint, pipeline_option):
"""Preprocess data in Cloud with DataFlow."""
import apache_beam as beam
import google.datalab.utils
from . import _preprocess
if checkpoint is None:
checkpoint = _util._DEFAULT_CHECKPOINT_GSURL
job_name = ('preprocess-image-classification-' +
datetime.datetime.now().strftime('%y%m%d-%H%M%S'))
staging_package_url = _util.repackage_to_staging(output_dir)
tmpdir = tempfile.mkdtemp()
# suppress DataFlow warnings about wheel package as extra package.
original_level = logging.getLogger().getEffectiveLevel()
logging.getLogger().setLevel(logging.ERROR)
try:
# Workaround for DataFlow 2.0, which doesn't work well with extra packages in GCS.
# Remove when the issue is fixed and new version of DataFlow is included in Datalab.
extra_packages = [staging_package_url, _TF_GS_URL, _PROTOBUF_GS_URL]
local_packages = [os.path.join(tmpdir, os.path.basename(p))
for p in extra_packages]
for source, dest in zip(extra_packages, local_packages):
file_io.copy(source, dest, overwrite=True)
options = {
'staging_location': os.path.join(output_dir, 'tmp', 'staging'),
'temp_location': os.path.join(output_dir, 'tmp'),
'job_name': job_name,
'project': _util.default_project(),
'extra_packages': local_packages,
'teardown_policy': 'TEARDOWN_ALWAYS',
'no_save_main_session': True
}
if pipeline_option is not None:
options.update(pipeline_option)
opts = beam.pipeline.PipelineOptions(flags=[], **options)
p = beam.Pipeline('DataflowRunner', options=opts)
_preprocess.configure_pipeline(p, train_dataset, eval_dataset,
checkpoint, output_dir, job_name)
job_results = p.run()
finally:
shutil.rmtree(tmpdir)
logging.getLogger().setLevel(original_level)
if (_util.is_in_IPython()):
import IPython
dataflow_url = 'https://console.developers.google.com/dataflow?project=%s' % \
_util.default_project()
html = 'Job "%s" submitted.' % job_name
html += '<p>Click <a href="%s" target="_blank">here</a> to track preprocessing job. <br/>' \
% dataflow_url
IPython.display.display_html(html, raw=True)
return google.datalab.utils.DataflowJob(job_results) | [
"def",
"preprocess",
"(",
"train_dataset",
",",
"output_dir",
",",
"eval_dataset",
",",
"checkpoint",
",",
"pipeline_option",
")",
":",
"import",
"apache_beam",
"as",
"beam",
"import",
"google",
".",
"datalab",
".",
"utils",
"from",
".",
"import",
"_preprocess",... | Preprocess data in Cloud with DataFlow. | [
"Preprocess",
"data",
"in",
"Cloud",
"with",
"DataFlow",
"."
] | python | train |
bokeh/bokeh | bokeh/transform.py | https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/transform.py#L216-L248 | def linear_cmap(field_name, palette, low, high, low_color=None, high_color=None, nan_color="gray"):
''' Create a ``DataSpec`` dict that applyies a client-side
``LinearColorMapper`` transformation to a ``ColumnDataSource`` column.
Args:
field_name (str) : a field name to configure ``DataSpec`` with
palette (seq[color]) : a list of colors to use for colormapping
low (float) : a minimum value of the range to map into the palette.
Values below this are clamped to ``low``.
high (float) : a maximum value of the range to map into the palette.
Values above this are clamped to ``high``.
low_color (color, optional) : color to be used if data is lower than
``low`` value. If None, values lower than ``low`` are mapped to the
first color in the palette. (default: None)
high_color (color, optional) : color to be used if data is higher than
``high`` value. If None, values higher than ``high`` are mapped to
the last color in the palette. (default: None)
nan_color (color, optional) : a default color to use when mapping data
from a column does not succeed (default: "gray")
'''
return field(field_name, LinearColorMapper(palette=palette,
low=low,
high=high,
nan_color=nan_color,
low_color=low_color,
high_color=high_color)) | [
"def",
"linear_cmap",
"(",
"field_name",
",",
"palette",
",",
"low",
",",
"high",
",",
"low_color",
"=",
"None",
",",
"high_color",
"=",
"None",
",",
"nan_color",
"=",
"\"gray\"",
")",
":",
"return",
"field",
"(",
"field_name",
",",
"LinearColorMapper",
"(... | Create a ``DataSpec`` dict that applyies a client-side
``LinearColorMapper`` transformation to a ``ColumnDataSource`` column.
Args:
field_name (str) : a field name to configure ``DataSpec`` with
palette (seq[color]) : a list of colors to use for colormapping
low (float) : a minimum value of the range to map into the palette.
Values below this are clamped to ``low``.
high (float) : a maximum value of the range to map into the palette.
Values above this are clamped to ``high``.
low_color (color, optional) : color to be used if data is lower than
``low`` value. If None, values lower than ``low`` are mapped to the
first color in the palette. (default: None)
high_color (color, optional) : color to be used if data is higher than
``high`` value. If None, values higher than ``high`` are mapped to
the last color in the palette. (default: None)
nan_color (color, optional) : a default color to use when mapping data
from a column does not succeed (default: "gray") | [
"Create",
"a",
"DataSpec",
"dict",
"that",
"applyies",
"a",
"client",
"-",
"side",
"LinearColorMapper",
"transformation",
"to",
"a",
"ColumnDataSource",
"column",
"."
] | python | train |
saxix/django-concurrency | src/concurrency/utils.py | https://github.com/saxix/django-concurrency/blob/9a289dc007b1cdf609b7dfb77a6d2868abc8097f/src/concurrency/utils.py#L13-L51 | def deprecated(replacement=None, version=None):
"""A decorator which can be used to mark functions as deprecated.
replacement is a callable that will be called with the same args
as the decorated function.
>>> import pytest
>>> @deprecated()
... def foo1(x):
... return x
...
>>> pytest.warns(DeprecationWarning, foo1, 1)
1
>>> def newfun(x):
... return 0
...
>>> @deprecated(newfun, '1.1')
... def foo2(x):
... return x
...
>>> pytest.warns(DeprecationWarning, foo2, 1)
0
>>>
"""
def outer(oldfun):
def inner(*args, **kwargs):
msg = "%s is deprecated" % oldfun.__name__
if version is not None:
msg += "will be removed in version %s;" % version
if replacement is not None:
msg += "; use %s instead" % (replacement)
warnings.warn(msg, DeprecationWarning, stacklevel=2)
if callable(replacement):
return replacement(*args, **kwargs)
else:
return oldfun(*args, **kwargs)
return inner
return outer | [
"def",
"deprecated",
"(",
"replacement",
"=",
"None",
",",
"version",
"=",
"None",
")",
":",
"def",
"outer",
"(",
"oldfun",
")",
":",
"def",
"inner",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"msg",
"=",
"\"%s is deprecated\"",
"%",
"oldfu... | A decorator which can be used to mark functions as deprecated.
replacement is a callable that will be called with the same args
as the decorated function.
>>> import pytest
>>> @deprecated()
... def foo1(x):
... return x
...
>>> pytest.warns(DeprecationWarning, foo1, 1)
1
>>> def newfun(x):
... return 0
...
>>> @deprecated(newfun, '1.1')
... def foo2(x):
... return x
...
>>> pytest.warns(DeprecationWarning, foo2, 1)
0
>>> | [
"A",
"decorator",
"which",
"can",
"be",
"used",
"to",
"mark",
"functions",
"as",
"deprecated",
".",
"replacement",
"is",
"a",
"callable",
"that",
"will",
"be",
"called",
"with",
"the",
"same",
"args",
"as",
"the",
"decorated",
"function",
".",
">>>",
"impo... | python | train |
chovanecm/sacredboard | sacredboard/app/data/pymongo/rundao.py | https://github.com/chovanecm/sacredboard/blob/47e1c99e3be3c1b099d3772bc077f5666020eb0b/sacredboard/app/data/pymongo/rundao.py#L181-L200 | def _status_filter_to_query(clause):
"""
Convert a clause querying for an experiment state RUNNING or DEAD.
Queries that check for experiment state RUNNING and DEAD need to be
replaced by the logic that decides these two states as both of them
are stored in the Mongo Database as "RUNNING". We use querying by last
heartbeat time.
:param clause: A clause whose field is "status" and "value" is one of
RUNNING, DEAD.
:return: A MongoDB clause.
"""
if clause["value"] == "RUNNING":
mongo_clause = MongoRunDAO.RUNNING_NOT_DEAD_CLAUSE
elif clause["value"] == "DEAD":
mongo_clause = MongoRunDAO.RUNNING_DEAD_RUN_CLAUSE
if clause["operator"] == "!=":
mongo_clause = {"$not": mongo_clause}
return mongo_clause | [
"def",
"_status_filter_to_query",
"(",
"clause",
")",
":",
"if",
"clause",
"[",
"\"value\"",
"]",
"==",
"\"RUNNING\"",
":",
"mongo_clause",
"=",
"MongoRunDAO",
".",
"RUNNING_NOT_DEAD_CLAUSE",
"elif",
"clause",
"[",
"\"value\"",
"]",
"==",
"\"DEAD\"",
":",
"mongo... | Convert a clause querying for an experiment state RUNNING or DEAD.
Queries that check for experiment state RUNNING and DEAD need to be
replaced by the logic that decides these two states as both of them
are stored in the Mongo Database as "RUNNING". We use querying by last
heartbeat time.
:param clause: A clause whose field is "status" and "value" is one of
RUNNING, DEAD.
:return: A MongoDB clause. | [
"Convert",
"a",
"clause",
"querying",
"for",
"an",
"experiment",
"state",
"RUNNING",
"or",
"DEAD",
"."
] | python | train |
linkhub-sdk/popbill.py | popbill/statementService.py | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/statementService.py#L159-L175 | def register(self, CorpNum, statement, UserID=None):
""" 임시저장
args
CorpNum : 팝빌회원 사업자번호
statement : 등록할 전자명세서 object. made with Statement(...)
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if statement == None:
raise PopbillException(-99999999, "등록할 전자명세서 정보가 입력되지 않았습니다.")
postData = self._stringtify(statement)
return self._httppost('/Statement', postData, CorpNum, UserID) | [
"def",
"register",
"(",
"self",
",",
"CorpNum",
",",
"statement",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"statement",
"==",
"None",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"등록할 전자명세서 정보가 입력되지 않았습니다.\")\r",
"",
"postData",
"=",
"se... | 임시저장
args
CorpNum : 팝빌회원 사업자번호
statement : 등록할 전자명세서 object. made with Statement(...)
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException | [
"임시저장",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"statement",
":",
"등록할",
"전자명세서",
"object",
".",
"made",
"with",
"Statement",
"(",
"...",
")",
"UserID",
":",
"팝빌회원",
"아이디",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"Popb... | python | train |
Fortran-FOSS-Programmers/ford | ford/sourceform.py | https://github.com/Fortran-FOSS-Programmers/ford/blob/d46a44eae20d99205292c31785f936fbed47070f/ford/sourceform.py#L417-L451 | def sort(self):
'''
Sorts components of the object.
'''
if hasattr(self,'variables'):
sort_items(self,self.variables)
if hasattr(self,'modules'):
sort_items(self,self.modules)
if hasattr(self,'submodules'):
sort_items(self,self.submodules)
if hasattr(self,'common'):
sort_items(self,self.common)
if hasattr(self,'subroutines'):
sort_items(self,self.subroutines)
if hasattr(self,'modprocedures'):
sort_items(self,self.modprocedures)
if hasattr(self,'functions'):
sort_items(self,self.functions)
if hasattr(self,'interfaces'):
sort_items(self,self.interfaces)
if hasattr(self,'absinterfaces'):
sort_items(self,self.absinterfaces)
if hasattr(self,'types'):
sort_items(self,self.types)
if hasattr(self,'programs'):
sort_items(self,self.programs)
if hasattr(self,'blockdata'):
sort_items(self,self.blockdata)
if hasattr(self,'boundprocs'):
sort_items(self,self.boundprocs)
if hasattr(self,'finalprocs'):
sort_items(self,self.finalprocs)
if hasattr(self,'args'):
#sort_items(self.args,args=True)
pass | [
"def",
"sort",
"(",
"self",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"'variables'",
")",
":",
"sort_items",
"(",
"self",
",",
"self",
".",
"variables",
")",
"if",
"hasattr",
"(",
"self",
",",
"'modules'",
")",
":",
"sort_items",
"(",
"self",
",",... | Sorts components of the object. | [
"Sorts",
"components",
"of",
"the",
"object",
"."
] | python | train |
pymc-devs/pymc | pymc/distributions.py | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L741-L751 | def arlognormal_like(x, a, sigma, rho):
R"""
Autoregressive lognormal log-likelihood.
.. math::
x_i & = a_i \exp(e_i) \\
e_i & = \rho e_{i-1} + \epsilon_i
where :math:`\epsilon_i \sim N(0,\sigma)`.
"""
return flib.arlognormal(x, np.log(a), sigma, rho, beta=1) | [
"def",
"arlognormal_like",
"(",
"x",
",",
"a",
",",
"sigma",
",",
"rho",
")",
":",
"return",
"flib",
".",
"arlognormal",
"(",
"x",
",",
"np",
".",
"log",
"(",
"a",
")",
",",
"sigma",
",",
"rho",
",",
"beta",
"=",
"1",
")"
] | R"""
Autoregressive lognormal log-likelihood.
.. math::
x_i & = a_i \exp(e_i) \\
e_i & = \rho e_{i-1} + \epsilon_i
where :math:`\epsilon_i \sim N(0,\sigma)`. | [
"R",
"Autoregressive",
"lognormal",
"log",
"-",
"likelihood",
"."
] | python | train |
jaredLunde/vital-tools | vital/tools/lists.py | https://github.com/jaredLunde/vital-tools/blob/ea924c9bbb6ec22aa66f8095f018b1ee0099ac04/vital/tools/lists.py#L91-L99 | def randrange(seq):
""" Yields random values from @seq until @seq is empty """
seq = seq.copy()
choose = rng().choice
remove = seq.remove
for x in range(len(seq)):
y = choose(seq)
remove(y)
yield y | [
"def",
"randrange",
"(",
"seq",
")",
":",
"seq",
"=",
"seq",
".",
"copy",
"(",
")",
"choose",
"=",
"rng",
"(",
")",
".",
"choice",
"remove",
"=",
"seq",
".",
"remove",
"for",
"x",
"in",
"range",
"(",
"len",
"(",
"seq",
")",
")",
":",
"y",
"="... | Yields random values from @seq until @seq is empty | [
"Yields",
"random",
"values",
"from"
] | python | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L396-L401 | def ToJsonString(self):
"""Converts FieldMask to string according to proto3 JSON spec."""
camelcase_paths = []
for path in self.paths:
camelcase_paths.append(_SnakeCaseToCamelCase(path))
return ','.join(camelcase_paths) | [
"def",
"ToJsonString",
"(",
"self",
")",
":",
"camelcase_paths",
"=",
"[",
"]",
"for",
"path",
"in",
"self",
".",
"paths",
":",
"camelcase_paths",
".",
"append",
"(",
"_SnakeCaseToCamelCase",
"(",
"path",
")",
")",
"return",
"','",
".",
"join",
"(",
"cam... | Converts FieldMask to string according to proto3 JSON spec. | [
"Converts",
"FieldMask",
"to",
"string",
"according",
"to",
"proto3",
"JSON",
"spec",
"."
] | python | train |
planetarypy/pvl | pvl/_collections.py | https://github.com/planetarypy/pvl/blob/ed92b284c4208439b033d28c9c176534c0faac0e/pvl/_collections.py#L322-L324 | def insert_before(self, key, new_item, instance=0):
"""Insert an item before a key"""
self._insert_item(key, new_item, instance, False) | [
"def",
"insert_before",
"(",
"self",
",",
"key",
",",
"new_item",
",",
"instance",
"=",
"0",
")",
":",
"self",
".",
"_insert_item",
"(",
"key",
",",
"new_item",
",",
"instance",
",",
"False",
")"
] | Insert an item before a key | [
"Insert",
"an",
"item",
"before",
"a",
"key"
] | python | train |
lepture/flask-oauthlib | flask_oauthlib/contrib/client/__init__.py | https://github.com/lepture/flask-oauthlib/blob/9e6f152a5bb360e7496210da21561c3e6d41b0e1/flask_oauthlib/contrib/client/__init__.py#L34-L55 | def add_remote_app(self, remote_app, name=None, **kwargs):
"""Adds remote application and applies custom attributes on it.
If the application instance's name is different from the argument
provided name, or the keyword arguments is not empty, then the
application instance will not be modified but be copied as a
prototype.
:param remote_app: the remote application instance.
:type remote_app: the subclasses of :class:`BaseApplication`
:params kwargs: the overriding attributes for the application instance.
"""
if name is None:
name = remote_app.name
if name != remote_app.name or kwargs:
remote_app = copy.copy(remote_app)
remote_app.name = name
vars(remote_app).update(kwargs)
if not hasattr(remote_app, 'clients'):
remote_app.clients = cached_clients
self.remote_apps[name] = remote_app
return remote_app | [
"def",
"add_remote_app",
"(",
"self",
",",
"remote_app",
",",
"name",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"remote_app",
".",
"name",
"if",
"name",
"!=",
"remote_app",
".",
"name",
"or",
"kwarg... | Adds remote application and applies custom attributes on it.
If the application instance's name is different from the argument
provided name, or the keyword arguments is not empty, then the
application instance will not be modified but be copied as a
prototype.
:param remote_app: the remote application instance.
:type remote_app: the subclasses of :class:`BaseApplication`
:params kwargs: the overriding attributes for the application instance. | [
"Adds",
"remote",
"application",
"and",
"applies",
"custom",
"attributes",
"on",
"it",
"."
] | python | test |
google/apitools | apitools/gen/util.py | https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/gen/util.py#L91-L102 | def CleanName(name):
"""Perform generic name cleaning."""
name = re.sub('[^_A-Za-z0-9]', '_', name)
if name[0].isdigit():
name = '_%s' % name
while keyword.iskeyword(name):
name = '%s_' % name
# If we end up with __ as a prefix, we'll run afoul of python
# field renaming, so we manually correct for it.
if name.startswith('__'):
name = 'f%s' % name
return name | [
"def",
"CleanName",
"(",
"name",
")",
":",
"name",
"=",
"re",
".",
"sub",
"(",
"'[^_A-Za-z0-9]'",
",",
"'_'",
",",
"name",
")",
"if",
"name",
"[",
"0",
"]",
".",
"isdigit",
"(",
")",
":",
"name",
"=",
"'_%s'",
"%",
"name",
"while",
"keyword",
"."... | Perform generic name cleaning. | [
"Perform",
"generic",
"name",
"cleaning",
"."
] | python | train |
paramiko/paramiko | paramiko/client.py | https://github.com/paramiko/paramiko/blob/cf7d49d66f3b1fbc8b0853518a54050182b3b5eb/paramiko/client.py#L110-L127 | def load_host_keys(self, filename):
"""
Load host keys from a local host-key file. Host keys read with this
method will be checked after keys loaded via `load_system_host_keys`,
but will be saved back by `save_host_keys` (so they can be modified).
The missing host key policy `.AutoAddPolicy` adds keys to this set and
saves them, when connecting to a previously-unknown server.
This method can be called multiple times. Each new set of host keys
will be merged with the existing set (new replacing old if there are
conflicts). When automatically saving, the last hostname is used.
:param str filename: the filename to read
:raises: ``IOError`` -- if the filename could not be read
"""
self._host_keys_filename = filename
self._host_keys.load(filename) | [
"def",
"load_host_keys",
"(",
"self",
",",
"filename",
")",
":",
"self",
".",
"_host_keys_filename",
"=",
"filename",
"self",
".",
"_host_keys",
".",
"load",
"(",
"filename",
")"
] | Load host keys from a local host-key file. Host keys read with this
method will be checked after keys loaded via `load_system_host_keys`,
but will be saved back by `save_host_keys` (so they can be modified).
The missing host key policy `.AutoAddPolicy` adds keys to this set and
saves them, when connecting to a previously-unknown server.
This method can be called multiple times. Each new set of host keys
will be merged with the existing set (new replacing old if there are
conflicts). When automatically saving, the last hostname is used.
:param str filename: the filename to read
:raises: ``IOError`` -- if the filename could not be read | [
"Load",
"host",
"keys",
"from",
"a",
"local",
"host",
"-",
"key",
"file",
".",
"Host",
"keys",
"read",
"with",
"this",
"method",
"will",
"be",
"checked",
"after",
"keys",
"loaded",
"via",
"load_system_host_keys",
"but",
"will",
"be",
"saved",
"back",
"by",... | python | train |
nion-software/nionswift-io | nionswift_plugin/TIFF_IO/tifffile.py | https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L9234-L9334 | def olympusini_metadata(inistr):
"""Return OlympusSIS metadata from INI string.
No documentation is available.
"""
def keyindex(key):
# split key into name and index
index = 0
i = len(key.rstrip('0123456789'))
if i < len(key):
index = int(key[i:]) - 1
key = key[:i]
return key, index
result = {}
bands = []
zpos = None
tpos = None
for line in inistr.splitlines():
line = line.strip()
if line == '' or line[0] == ';':
continue
if line[0] == '[' and line[-1] == ']':
section_name = line[1:-1]
result[section_name] = section = {}
if section_name == 'Dimension':
result['axes'] = axes = []
result['shape'] = shape = []
elif section_name == 'ASD':
result[section_name] = []
elif section_name == 'Z':
if 'Dimension' in result:
result[section_name]['ZPos'] = zpos = []
elif section_name == 'Time':
if 'Dimension' in result:
result[section_name]['TimePos'] = tpos = []
elif section_name == 'Band':
nbands = result['Dimension']['Band']
bands = [{'LUT': []} for i in range(nbands)]
result[section_name] = bands
iband = 0
else:
key, value = line.split('=')
if value.strip() == '':
value = None
elif ',' in value:
value = tuple(astype(v) for v in value.split(','))
else:
value = astype(value)
if section_name == 'Dimension':
section[key] = value
axes.append(key)
shape.append(value)
elif section_name == 'ASD':
if key == 'Count':
result['ASD'] = [{}] * value
else:
key, index = keyindex(key)
result['ASD'][index][key] = value
elif section_name == 'Band':
if key[:3] == 'LUT':
lut = bands[iband]['LUT']
value = struct.pack('<I', value)
lut.append(
[ord(value[0:1]), ord(value[1:2]), ord(value[2:3])])
else:
key, iband = keyindex(key)
bands[iband][key] = value
elif key[:4] == 'ZPos' and zpos is not None:
zpos.append(value)
elif key[:7] == 'TimePos' and tpos is not None:
tpos.append(value)
else:
section[key] = value
if 'axes' in result:
sisaxes = {'Band': 'C'}
axes = []
shape = []
for i, x in zip(result['shape'], result['axes']):
if i > 1:
axes.append(sisaxes.get(x, x[0].upper()))
shape.append(i)
result['axes'] = ''.join(axes)
result['shape'] = tuple(shape)
try:
result['Z']['ZPos'] = numpy.array(
result['Z']['ZPos'][:result['Dimension']['Z']], 'float64')
except Exception:
pass
try:
result['Time']['TimePos'] = numpy.array(
result['Time']['TimePos'][:result['Dimension']['Time']], 'int32')
except Exception:
pass
for band in bands:
band['LUT'] = numpy.array(band['LUT'], 'uint8')
return result | [
"def",
"olympusini_metadata",
"(",
"inistr",
")",
":",
"def",
"keyindex",
"(",
"key",
")",
":",
"# split key into name and index",
"index",
"=",
"0",
"i",
"=",
"len",
"(",
"key",
".",
"rstrip",
"(",
"'0123456789'",
")",
")",
"if",
"i",
"<",
"len",
"(",
... | Return OlympusSIS metadata from INI string.
No documentation is available. | [
"Return",
"OlympusSIS",
"metadata",
"from",
"INI",
"string",
"."
] | python | train |
albert12132/templar | templar/markdown.py | https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/markdown.py#L120-L137 | def get_variables(text):
"""Extracts variables that can be used in templating engines.
Each variable is defined on a single line in the following way:
~ var: text
The ~ must be at the start of a newline, followed by at least one
space. var can be any sequence of characters that does not contain
a ":". text can be any sequence of characters.
RETURNS:
text -- str; text with all variable definitions removed
variables -- dict; variable to value mappings
"""
variables = {var: value for var, value in re_vars.findall(text)}
text = re_vars.sub('', text)
return text, variables | [
"def",
"get_variables",
"(",
"text",
")",
":",
"variables",
"=",
"{",
"var",
":",
"value",
"for",
"var",
",",
"value",
"in",
"re_vars",
".",
"findall",
"(",
"text",
")",
"}",
"text",
"=",
"re_vars",
".",
"sub",
"(",
"''",
",",
"text",
")",
"return"... | Extracts variables that can be used in templating engines.
Each variable is defined on a single line in the following way:
~ var: text
The ~ must be at the start of a newline, followed by at least one
space. var can be any sequence of characters that does not contain
a ":". text can be any sequence of characters.
RETURNS:
text -- str; text with all variable definitions removed
variables -- dict; variable to value mappings | [
"Extracts",
"variables",
"that",
"can",
"be",
"used",
"in",
"templating",
"engines",
"."
] | python | train |
fastai/fastai | fastai/data_block.py | https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L267-L274 | def label_from_df(self, cols:IntsOrStrs=1, label_cls:Callable=None, **kwargs):
"Label `self.items` from the values in `cols` in `self.inner_df`."
labels = self.inner_df.iloc[:,df_names_to_idx(cols, self.inner_df)]
assert labels.isna().sum().sum() == 0, f"You have NaN values in column(s) {cols} of your dataframe, please fix it."
if is_listy(cols) and len(cols) > 1 and (label_cls is None or label_cls == MultiCategoryList):
new_kwargs,label_cls = dict(one_hot=True, classes= cols),MultiCategoryList
kwargs = {**new_kwargs, **kwargs}
return self._label_from_list(_maybe_squeeze(labels), label_cls=label_cls, **kwargs) | [
"def",
"label_from_df",
"(",
"self",
",",
"cols",
":",
"IntsOrStrs",
"=",
"1",
",",
"label_cls",
":",
"Callable",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"labels",
"=",
"self",
".",
"inner_df",
".",
"iloc",
"[",
":",
",",
"df_names_to_idx",
"... | Label `self.items` from the values in `cols` in `self.inner_df`. | [
"Label",
"self",
".",
"items",
"from",
"the",
"values",
"in",
"cols",
"in",
"self",
".",
"inner_df",
"."
] | python | train |
fr33jc/bang | bang/deployers/__init__.py | https://github.com/fr33jc/bang/blob/8f000713f88d2a9a8c1193b63ca10a6578560c16/bang/deployers/__init__.py#L25-L56 | def get_stage_deployers(keys, stack):
"""
Returns a list of deployer objects that *create* cloud resources. Each
member of the list is responsible for provisioning a single stack resource
(e.g. a virtual server, a security group, a bucket, etc...).
:param keys: A list of top-level configuration keys for which to create
deployers.
:type keys: :class:`~collections.Iterable`
:param config: A stack object.
:type config: :class:`~bang.stack.Stack`
:rtype: :class:`list` of :class:`~bang.deployers.deployer.Deployer`
"""
config = stack.config
creds = config[A.DEPLOYER_CREDS]
deployers = []
for res_type in keys:
res_configs = config.get(res_type)
if not res_configs:
continue
log.debug("Found config for resource type, %s" % res_type)
for res_config in res_configs:
if A.PROVIDER in res_config:
ds = cloud.get_deployers(res_config, res_type, stack, creds)
else:
ds = [default.ServerDeployer(stack, res_config)]
if ds:
deployers.extend(ds)
return deployers | [
"def",
"get_stage_deployers",
"(",
"keys",
",",
"stack",
")",
":",
"config",
"=",
"stack",
".",
"config",
"creds",
"=",
"config",
"[",
"A",
".",
"DEPLOYER_CREDS",
"]",
"deployers",
"=",
"[",
"]",
"for",
"res_type",
"in",
"keys",
":",
"res_configs",
"=",
... | Returns a list of deployer objects that *create* cloud resources. Each
member of the list is responsible for provisioning a single stack resource
(e.g. a virtual server, a security group, a bucket, etc...).
:param keys: A list of top-level configuration keys for which to create
deployers.
:type keys: :class:`~collections.Iterable`
:param config: A stack object.
:type config: :class:`~bang.stack.Stack`
:rtype: :class:`list` of :class:`~bang.deployers.deployer.Deployer` | [
"Returns",
"a",
"list",
"of",
"deployer",
"objects",
"that",
"*",
"create",
"*",
"cloud",
"resources",
".",
"Each",
"member",
"of",
"the",
"list",
"is",
"responsible",
"for",
"provisioning",
"a",
"single",
"stack",
"resource",
"(",
"e",
".",
"g",
".",
"a... | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.