repo stringlengths 7 54 | path stringlengths 4 192 | url stringlengths 87 284 | code stringlengths 78 104k | code_tokens list | docstring stringlengths 1 46.9k | docstring_tokens list | language stringclasses 1
value | partition stringclasses 3
values |
|---|---|---|---|---|---|---|---|---|
rwl/pylon | pyreto/continuous/task.py | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pyreto/continuous/task.py#L87-L104 | def _getActorLimits(self):
""" Returns a list of 2-tuples, e.g. [(-3.14, 3.14), (-0.001, 0.001)],
one tuple per parameter, giving min and max for that parameter.
"""
actorLimits = []
for _ in range(self.env.numOffbids):
for _ in self.env.generators:
actorLimits.append((0.0, self.env.maxMarkup))
for _ in range(self.env.numOffbids):
for _ in self.env.generators:
if self.env.maxWithhold is not None:
actorLimits.append((0.0, self.env.maxWithhold))
logger.debug("Actor limits: %s" % actorLimits)
return actorLimits | [
"def",
"_getActorLimits",
"(",
"self",
")",
":",
"actorLimits",
"=",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"self",
".",
"env",
".",
"numOffbids",
")",
":",
"for",
"_",
"in",
"self",
".",
"env",
".",
"generators",
":",
"actorLimits",
".",
"append"... | Returns a list of 2-tuples, e.g. [(-3.14, 3.14), (-0.001, 0.001)],
one tuple per parameter, giving min and max for that parameter. | [
"Returns",
"a",
"list",
"of",
"2",
"-",
"tuples",
"e",
".",
"g",
".",
"[",
"(",
"-",
"3",
".",
"14",
"3",
".",
"14",
")",
"(",
"-",
"0",
".",
"001",
"0",
".",
"001",
")",
"]",
"one",
"tuple",
"per",
"parameter",
"giving",
"min",
"and",
"max... | python | train |
chainer/chainerui | chainerui/models/result.py | https://github.com/chainer/chainerui/blob/87ad25e875bc332bfdad20197fd3d0cb81a078e8/chainerui/models/result.py#L48-L58 | def create(cls, path_name=None, name=None, project_id=None,
log_modified_at=None, crawlable=True):
"""Initialize an instance and save it to db."""
result = cls(path_name, name, project_id, log_modified_at, crawlable)
db.session.add(result)
db.session.commit()
crawl_result(result, True)
return result | [
"def",
"create",
"(",
"cls",
",",
"path_name",
"=",
"None",
",",
"name",
"=",
"None",
",",
"project_id",
"=",
"None",
",",
"log_modified_at",
"=",
"None",
",",
"crawlable",
"=",
"True",
")",
":",
"result",
"=",
"cls",
"(",
"path_name",
",",
"name",
"... | Initialize an instance and save it to db. | [
"Initialize",
"an",
"instance",
"and",
"save",
"it",
"to",
"db",
"."
] | python | train |
secdev/scapy | scapy/contrib/automotive/someip.py | https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/contrib/automotive/someip.py#L190-L216 | def fragment(self, fragsize=1392):
"""Fragment SOME/IP-TP"""
fnb = 0
fl = self
lst = list()
while fl.underlayer is not None:
fnb += 1
fl = fl.underlayer
for p in fl:
s = raw(p[fnb].payload)
nb = (len(s) + fragsize) // fragsize
for i in range(nb):
q = p.copy()
del q[fnb].payload
q[fnb].len = SOMEIP.LEN_OFFSET_TP + \
len(s[i * fragsize:(i + 1) * fragsize])
q[fnb].more_seg = 1
if i == nb - 1:
q[fnb].more_seg = 0
q[fnb].offset += i * fragsize // 16
r = conf.raw_layer(load=s[i * fragsize:(i + 1) * fragsize])
r.overload_fields = p[fnb].payload.overload_fields.copy()
q.add_payload(r)
lst.append(q)
return lst | [
"def",
"fragment",
"(",
"self",
",",
"fragsize",
"=",
"1392",
")",
":",
"fnb",
"=",
"0",
"fl",
"=",
"self",
"lst",
"=",
"list",
"(",
")",
"while",
"fl",
".",
"underlayer",
"is",
"not",
"None",
":",
"fnb",
"+=",
"1",
"fl",
"=",
"fl",
".",
"under... | Fragment SOME/IP-TP | [
"Fragment",
"SOME",
"/",
"IP",
"-",
"TP"
] | python | train |
aws/sagemaker-python-sdk | src/sagemaker/chainer/estimator.py | https://github.com/aws/sagemaker-python-sdk/blob/a9e724c7d3f5572b68c3903548c792a59d99799a/src/sagemaker/chainer/estimator.py#L99-L111 | def hyperparameters(self):
"""Return hyperparameters used by your custom Chainer code during training."""
hyperparameters = super(Chainer, self).hyperparameters()
additional_hyperparameters = {Chainer._use_mpi: self.use_mpi,
Chainer._num_processes: self.num_processes,
Chainer._process_slots_per_host: self.process_slots_per_host,
Chainer._additional_mpi_options: self.additional_mpi_options}
# remove unset keys.
additional_hyperparameters = {k: v for k, v in additional_hyperparameters.items() if v}
hyperparameters.update(Framework._json_encode_hyperparameters(additional_hyperparameters))
return hyperparameters | [
"def",
"hyperparameters",
"(",
"self",
")",
":",
"hyperparameters",
"=",
"super",
"(",
"Chainer",
",",
"self",
")",
".",
"hyperparameters",
"(",
")",
"additional_hyperparameters",
"=",
"{",
"Chainer",
".",
"_use_mpi",
":",
"self",
".",
"use_mpi",
",",
"Chain... | Return hyperparameters used by your custom Chainer code during training. | [
"Return",
"hyperparameters",
"used",
"by",
"your",
"custom",
"Chainer",
"code",
"during",
"training",
"."
] | python | train |
crgwbr/asymmetric-jwt-auth | src/asymmetric_jwt_auth/__init__.py | https://github.com/crgwbr/asymmetric-jwt-auth/blob/eae1a6474b6141edce4d6be2dc1746e18bbf5318/src/asymmetric_jwt_auth/__init__.py#L21-L49 | def generate_key_pair(size=2048, public_exponent=65537, as_string=True):
"""
Generate a public/private key pair.
:param size: Optional. Describes how many bits long the key should be, larger keys provide more security,
currently 1024 and below are considered breakable, and 2048 or 4096 are reasonable default
key sizes for new keys. Defaults to 2048.
:param public_exponent: Optional. Indicates what one mathematical property of the key generation will be.
65537 is the default and should almost always be used.
:param as_string: Optional. If True, return tuple of strings. If false, return tuple of RSA key objects.
Defaults to True.
:return: (PrivateKey<string>, PublicKey<string>)
:return: (
`RSAPrivateKey <https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/#cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey>`_,
`RSAPublicKey <https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/#cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey>`_)
"""
private = rsa.generate_private_key(
public_exponent=public_exponent,
key_size=size,
backend=default_backend()
)
public = private.public_key()
if not as_string:
return private, public
pem_private = private.private_bytes(Encoding.PEM, PrivateFormat.PKCS8, NoEncryption()).decode(ENCODING)
pem_public = public.public_bytes(Encoding.PEM, PublicFormat.SubjectPublicKeyInfo).decode(ENCODING)
return pem_private, pem_public | [
"def",
"generate_key_pair",
"(",
"size",
"=",
"2048",
",",
"public_exponent",
"=",
"65537",
",",
"as_string",
"=",
"True",
")",
":",
"private",
"=",
"rsa",
".",
"generate_private_key",
"(",
"public_exponent",
"=",
"public_exponent",
",",
"key_size",
"=",
"size... | Generate a public/private key pair.
:param size: Optional. Describes how many bits long the key should be, larger keys provide more security,
currently 1024 and below are considered breakable, and 2048 or 4096 are reasonable default
key sizes for new keys. Defaults to 2048.
:param public_exponent: Optional. Indicates what one mathematical property of the key generation will be.
65537 is the default and should almost always be used.
:param as_string: Optional. If True, return tuple of strings. If false, return tuple of RSA key objects.
Defaults to True.
:return: (PrivateKey<string>, PublicKey<string>)
:return: (
`RSAPrivateKey <https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/#cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey>`_,
`RSAPublicKey <https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/#cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey>`_) | [
"Generate",
"a",
"public",
"/",
"private",
"key",
"pair",
"."
] | python | train |
maljovec/topopy | topopy/MergeTree.py | https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/MergeTree.py#L103-L133 | def build(self, X, Y, w=None, edges=None):
""" Assigns data to this object and builds the Merge Tree
@ In, X, an m-by-n array of values specifying m
n-dimensional samples
@ In, Y, a m vector of values specifying the output
responses corresponding to the m samples specified by X
@ In, w, an optional m vector of values specifying the
weights associated to each of the m samples used. Default of
None means all points will be equally weighted
@ In, edges, an optional list of custom edges to use as a
starting point for pruning, or in place of a computed graph.
"""
super(MergeTree, self).build(X, Y, w, edges)
if self.debug:
sys.stdout.write("Merge Tree Computation: ")
start = time.clock()
self.__tree = MergeTreeFloat(
vectorFloat(self.Xnorm.flatten()),
vectorFloat(self.Y),
str(self.gradient),
self.graph_rep.full_graph(),
self.debug,
)
self._internal_build()
if self.debug:
end = time.clock()
sys.stdout.write("%f s\n" % (end - start)) | [
"def",
"build",
"(",
"self",
",",
"X",
",",
"Y",
",",
"w",
"=",
"None",
",",
"edges",
"=",
"None",
")",
":",
"super",
"(",
"MergeTree",
",",
"self",
")",
".",
"build",
"(",
"X",
",",
"Y",
",",
"w",
",",
"edges",
")",
"if",
"self",
".",
"deb... | Assigns data to this object and builds the Merge Tree
@ In, X, an m-by-n array of values specifying m
n-dimensional samples
@ In, Y, a m vector of values specifying the output
responses corresponding to the m samples specified by X
@ In, w, an optional m vector of values specifying the
weights associated to each of the m samples used. Default of
None means all points will be equally weighted
@ In, edges, an optional list of custom edges to use as a
starting point for pruning, or in place of a computed graph. | [
"Assigns",
"data",
"to",
"this",
"object",
"and",
"builds",
"the",
"Merge",
"Tree"
] | python | train |
Azure/msrestazure-for-python | msrestazure/tools.py | https://github.com/Azure/msrestazure-for-python/blob/5f99262305692525d03ca87d2c5356b05c5aa874/msrestazure/tools.py#L149-L162 | def _populate_alternate_kwargs(kwargs):
""" Translates the parsed arguments into a format used by generic ARM commands
such as the resource and lock commands.
"""
resource_namespace = kwargs['namespace']
resource_type = kwargs.get('child_type_{}'.format(kwargs['last_child_num'])) or kwargs['type']
resource_name = kwargs.get('child_name_{}'.format(kwargs['last_child_num'])) or kwargs['name']
_get_parents_from_parts(kwargs)
kwargs['resource_namespace'] = resource_namespace
kwargs['resource_type'] = resource_type
kwargs['resource_name'] = resource_name
return kwargs | [
"def",
"_populate_alternate_kwargs",
"(",
"kwargs",
")",
":",
"resource_namespace",
"=",
"kwargs",
"[",
"'namespace'",
"]",
"resource_type",
"=",
"kwargs",
".",
"get",
"(",
"'child_type_{}'",
".",
"format",
"(",
"kwargs",
"[",
"'last_child_num'",
"]",
")",
")",
... | Translates the parsed arguments into a format used by generic ARM commands
such as the resource and lock commands. | [
"Translates",
"the",
"parsed",
"arguments",
"into",
"a",
"format",
"used",
"by",
"generic",
"ARM",
"commands",
"such",
"as",
"the",
"resource",
"and",
"lock",
"commands",
"."
] | python | train |
Yipit/eventlib | eventlib/core.py | https://github.com/Yipit/eventlib/blob/0cf29e5251a59fcbfc727af5f5157a3bb03832e2/eventlib/core.py#L124-L155 | def process(event_name, data):
"""Iterates over the event handler registry and execute each found
handler.
It takes the event name and its its `data`, passing the return of
`ejson.loads(data)` to the found handlers.
"""
deserialized = loads(data)
event_cls = find_event(event_name)
event = event_cls(event_name, deserialized)
try:
event.clean()
except ValidationError as exc:
if os.environ.get('EVENTLIB_RAISE_ERRORS'):
raise
else:
logger.warning(
"The event system just got an exception while cleaning "
"data for the event '{}'\ndata: {}\nexc: {}".format(
event_name, data, str(exc)))
return
for handler in find_handlers(event_name):
try:
handler(deserialized)
except Exception as exc:
logger.warning(
(u'One of the handlers for the event "{}" has failed with the '
u'following exception: {}').format(event_name, str(exc)))
if getsetting('DEBUG'):
raise exc
event._broadcast() | [
"def",
"process",
"(",
"event_name",
",",
"data",
")",
":",
"deserialized",
"=",
"loads",
"(",
"data",
")",
"event_cls",
"=",
"find_event",
"(",
"event_name",
")",
"event",
"=",
"event_cls",
"(",
"event_name",
",",
"deserialized",
")",
"try",
":",
"event",... | Iterates over the event handler registry and execute each found
handler.
It takes the event name and its its `data`, passing the return of
`ejson.loads(data)` to the found handlers. | [
"Iterates",
"over",
"the",
"event",
"handler",
"registry",
"and",
"execute",
"each",
"found",
"handler",
"."
] | python | train |
Erotemic/utool | utool/util_dict.py | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dict.py#L1060-L1064 | def dict_assign(dict_, keys, vals):
""" simple method for assigning or setting values with a similar interface
to dict_take """
for key, val in zip(keys, vals):
dict_[key] = val | [
"def",
"dict_assign",
"(",
"dict_",
",",
"keys",
",",
"vals",
")",
":",
"for",
"key",
",",
"val",
"in",
"zip",
"(",
"keys",
",",
"vals",
")",
":",
"dict_",
"[",
"key",
"]",
"=",
"val"
] | simple method for assigning or setting values with a similar interface
to dict_take | [
"simple",
"method",
"for",
"assigning",
"or",
"setting",
"values",
"with",
"a",
"similar",
"interface",
"to",
"dict_take"
] | python | train |
RudolfCardinal/pythonlib | cardinal_pythonlib/django/fields/helpers.py | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/django/fields/helpers.py#L36-L53 | def valid_choice(strvalue: str, choices: Iterable[Tuple[str, str]]) -> bool:
"""
Checks that value is one of the valid option in choices, where choices
is a list/tuple of 2-tuples (option, description).
Note that parameters sent by URLconf are always strings
(https://docs.djangoproject.com/en/1.8/topics/http/urls/)
but Python is happy with a string-to-integer-PK lookup, e.g.
.. code-block:: python
Study.objects.get(pk=1)
Study.objects.get(pk="1") # also works
Choices can be non-string, though, so we compare against a string version
of the choice.
"""
return strvalue in [str(x[0]) for x in choices] | [
"def",
"valid_choice",
"(",
"strvalue",
":",
"str",
",",
"choices",
":",
"Iterable",
"[",
"Tuple",
"[",
"str",
",",
"str",
"]",
"]",
")",
"->",
"bool",
":",
"return",
"strvalue",
"in",
"[",
"str",
"(",
"x",
"[",
"0",
"]",
")",
"for",
"x",
"in",
... | Checks that value is one of the valid option in choices, where choices
is a list/tuple of 2-tuples (option, description).
Note that parameters sent by URLconf are always strings
(https://docs.djangoproject.com/en/1.8/topics/http/urls/)
but Python is happy with a string-to-integer-PK lookup, e.g.
.. code-block:: python
Study.objects.get(pk=1)
Study.objects.get(pk="1") # also works
Choices can be non-string, though, so we compare against a string version
of the choice. | [
"Checks",
"that",
"value",
"is",
"one",
"of",
"the",
"valid",
"option",
"in",
"choices",
"where",
"choices",
"is",
"a",
"list",
"/",
"tuple",
"of",
"2",
"-",
"tuples",
"(",
"option",
"description",
")",
"."
] | python | train |
tonioo/sievelib | sievelib/managesieve.py | https://github.com/tonioo/sievelib/blob/88822d1f1daf30ef3dd9ac74911301b0773ef3c8/sievelib/managesieve.py#L183-L217 | def __read_response(self, nblines=-1):
"""Read a response from the server.
In the usual case, we read lines until we find one that looks
like a response (OK|NO|BYE\s*(.+)?).
If *nblines* > 0, we read excactly nblines before returning.
:param nblines: number of lines to read (default : -1)
:rtype: tuple
:return: a tuple of the form (code, data, response). If
nblines is provided, code and data can be equal to None.
"""
resp, code, data = (b"", None, None)
cpt = 0
while True:
try:
line = self.__read_line()
except Response as inst:
code = inst.code
data = inst.data
break
except Literal as inst:
resp += self.__read_block(inst.value)
if not resp.endswith(CRLF):
resp += self.__read_line() + CRLF
continue
if not len(line):
continue
resp += line + CRLF
cpt += 1
if nblines != -1 and cpt == nblines:
break
return (code, data, resp) | [
"def",
"__read_response",
"(",
"self",
",",
"nblines",
"=",
"-",
"1",
")",
":",
"resp",
",",
"code",
",",
"data",
"=",
"(",
"b\"\"",
",",
"None",
",",
"None",
")",
"cpt",
"=",
"0",
"while",
"True",
":",
"try",
":",
"line",
"=",
"self",
".",
"__... | Read a response from the server.
In the usual case, we read lines until we find one that looks
like a response (OK|NO|BYE\s*(.+)?).
If *nblines* > 0, we read excactly nblines before returning.
:param nblines: number of lines to read (default : -1)
:rtype: tuple
:return: a tuple of the form (code, data, response). If
nblines is provided, code and data can be equal to None. | [
"Read",
"a",
"response",
"from",
"the",
"server",
"."
] | python | train |
fastai/fastai | fastai/vision/learner.py | https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/learner.py#L53-L62 | def create_body(arch:Callable, pretrained:bool=True, cut:Optional[Union[int, Callable]]=None):
"Cut off the body of a typically pretrained `model` at `cut` (int) or cut the model as specified by `cut(model)` (function)."
model = arch(pretrained)
cut = ifnone(cut, cnn_config(arch)['cut'])
if cut is None:
ll = list(enumerate(model.children()))
cut = next(i for i,o in reversed(ll) if has_pool_type(o))
if isinstance(cut, int): return nn.Sequential(*list(model.children())[:cut])
elif isinstance(cut, Callable): return cut(model)
else: raise NamedError("cut must be either integer or a function") | [
"def",
"create_body",
"(",
"arch",
":",
"Callable",
",",
"pretrained",
":",
"bool",
"=",
"True",
",",
"cut",
":",
"Optional",
"[",
"Union",
"[",
"int",
",",
"Callable",
"]",
"]",
"=",
"None",
")",
":",
"model",
"=",
"arch",
"(",
"pretrained",
")",
... | Cut off the body of a typically pretrained `model` at `cut` (int) or cut the model as specified by `cut(model)` (function). | [
"Cut",
"off",
"the",
"body",
"of",
"a",
"typically",
"pretrained",
"model",
"at",
"cut",
"(",
"int",
")",
"or",
"cut",
"the",
"model",
"as",
"specified",
"by",
"cut",
"(",
"model",
")",
"(",
"function",
")",
"."
] | python | train |
J535D165/recordlinkage | recordlinkage/algorithms/nb_sklearn.py | https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/algorithms/nb_sklearn.py#L298-L302 | def _count(self, X, Y):
"""Count and smooth feature occurrences."""
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0) | [
"def",
"_count",
"(",
"self",
",",
"X",
",",
"Y",
")",
":",
"self",
".",
"feature_count_",
"+=",
"safe_sparse_dot",
"(",
"Y",
".",
"T",
",",
"X",
")",
"self",
".",
"class_count_",
"+=",
"Y",
".",
"sum",
"(",
"axis",
"=",
"0",
")"
] | Count and smooth feature occurrences. | [
"Count",
"and",
"smooth",
"feature",
"occurrences",
"."
] | python | train |
jaraco/jaraco.util | jaraco/util/numbers.py | https://github.com/jaraco/jaraco.util/blob/f21071c64f165a5cf844db15e39356e1a47f4b02/jaraco/util/numbers.py#L10-L37 | def coerce(value):
"""
coerce takes a value and attempts to convert it to a float,
or int.
If none of the conversions are successful, the original value is
returned.
>>> coerce('3')
3
>>> coerce('3.0')
3.0
>>> coerce('foo')
'foo'
>>> coerce({})
{}
>>> coerce('{}')
'{}'
"""
with contextlib2.suppress(Exception):
loaded = json.loads(value)
assert isinstance(loaded, numbers.Number)
return loaded
return value | [
"def",
"coerce",
"(",
"value",
")",
":",
"with",
"contextlib2",
".",
"suppress",
"(",
"Exception",
")",
":",
"loaded",
"=",
"json",
".",
"loads",
"(",
"value",
")",
"assert",
"isinstance",
"(",
"loaded",
",",
"numbers",
".",
"Number",
")",
"return",
"l... | coerce takes a value and attempts to convert it to a float,
or int.
If none of the conversions are successful, the original value is
returned.
>>> coerce('3')
3
>>> coerce('3.0')
3.0
>>> coerce('foo')
'foo'
>>> coerce({})
{}
>>> coerce('{}')
'{}' | [
"coerce",
"takes",
"a",
"value",
"and",
"attempts",
"to",
"convert",
"it",
"to",
"a",
"float",
"or",
"int",
"."
] | python | test |
CodyKochmann/strict_functions | strict_functions/trace3.py | https://github.com/CodyKochmann/strict_functions/blob/adaf78084c66929552d80c95f980e7e0c4331478/strict_functions/trace3.py#L42-L44 | def get_locals(f:Frame) -> str:
''' returns a formatted view of the local variables in a frame '''
return pformat({i:f.f_locals[i] for i in f.f_locals if not i.startswith('__')}) | [
"def",
"get_locals",
"(",
"f",
":",
"Frame",
")",
"->",
"str",
":",
"return",
"pformat",
"(",
"{",
"i",
":",
"f",
".",
"f_locals",
"[",
"i",
"]",
"for",
"i",
"in",
"f",
".",
"f_locals",
"if",
"not",
"i",
".",
"startswith",
"(",
"'__'",
")",
"}"... | returns a formatted view of the local variables in a frame | [
"returns",
"a",
"formatted",
"view",
"of",
"the",
"local",
"variables",
"in",
"a",
"frame"
] | python | train |
numenta/htmresearch | htmresearch/frameworks/layers/sequence_object_machine.py | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/sequence_object_machine.py#L199-L239 | def _getSDRPairs(self, pairs, noise=None, includeRandomLocation=False):
"""
This method takes a list of (location, feature) index pairs (one pair per
cortical column), and returns a sensation dict in the correct format,
adding noise if necessary.
"""
sensations = {}
for col in xrange(self.numColumns):
locationID, featureID = pairs[col]
# generate random location if requested
if includeRandomLocation:
locationID = random.randint(0, self.numLocations-1)
location = self.locations[col][locationID]
# generate union of locations if requested
elif isinstance(locationID, tuple):
location = set()
for idx in list(locationID):
location = location | self.locations[col][idx]
else:
location = self.locations[col][locationID]
# generate empty feature if requested
if featureID == -1:
feature = set()
# generate union of features if requested
elif isinstance(featureID, tuple):
feature = set()
for idx in list(featureID):
feature = feature | self.features[col][idx]
else:
feature = self.features[col][featureID]
if noise is not None:
location = self._addNoise(location, noise, self.externalInputSize)
feature = self._addNoise(feature, noise, self.sensorInputSize)
sensations[col] = (location, feature)
return sensations | [
"def",
"_getSDRPairs",
"(",
"self",
",",
"pairs",
",",
"noise",
"=",
"None",
",",
"includeRandomLocation",
"=",
"False",
")",
":",
"sensations",
"=",
"{",
"}",
"for",
"col",
"in",
"xrange",
"(",
"self",
".",
"numColumns",
")",
":",
"locationID",
",",
"... | This method takes a list of (location, feature) index pairs (one pair per
cortical column), and returns a sensation dict in the correct format,
adding noise if necessary. | [
"This",
"method",
"takes",
"a",
"list",
"of",
"(",
"location",
"feature",
")",
"index",
"pairs",
"(",
"one",
"pair",
"per",
"cortical",
"column",
")",
"and",
"returns",
"a",
"sensation",
"dict",
"in",
"the",
"correct",
"format",
"adding",
"noise",
"if",
... | python | train |
InQuest/python-sandboxapi | sandboxapi/falcon.py | https://github.com/InQuest/python-sandboxapi/blob/9bad73f453e25d7d23e7b4b1ae927f44a35a5bc3/sandboxapi/falcon.py#L45-L71 | def analyze(self, handle, filename):
"""Submit a file for analysis.
:type handle: File handle
:param handle: Handle to file to upload for analysis.
:type filename: str
:param filename: File name.
:rtype: str
:return: File hash as a string
"""
# multipart post files.
files = {"file" : (filename, handle)}
# ensure the handle is at offset 0.
handle.seek(0)
response = self._request("/submit/file", method='POST', files=files)
try:
if response.status_code == 201:
# good response
return response.json()['job_id']
else:
raise sandboxapi.SandboxError("api error in analyze: {r}".format(r=response.content.decode('utf-8')))
except (ValueError, KeyError) as e:
raise sandboxapi.SandboxError("error in analyze: {e}".format(e=e)) | [
"def",
"analyze",
"(",
"self",
",",
"handle",
",",
"filename",
")",
":",
"# multipart post files.",
"files",
"=",
"{",
"\"file\"",
":",
"(",
"filename",
",",
"handle",
")",
"}",
"# ensure the handle is at offset 0.",
"handle",
".",
"seek",
"(",
"0",
")",
"re... | Submit a file for analysis.
:type handle: File handle
:param handle: Handle to file to upload for analysis.
:type filename: str
:param filename: File name.
:rtype: str
:return: File hash as a string | [
"Submit",
"a",
"file",
"for",
"analysis",
"."
] | python | train |
sdispater/orator | orator/query/builder.py | https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/query/builder.py#L107-L125 | def select_raw(self, expression, bindings=None):
"""
Add a new raw select expression to the query
:param expression: The raw expression
:type expression: str
:param bindings: The expression bindings
:type bindings: list
:return: The current QueryBuilder instance
:rtype: QueryBuilder
"""
self.add_select(QueryExpression(expression))
if bindings:
self.add_binding(bindings, "select")
return self | [
"def",
"select_raw",
"(",
"self",
",",
"expression",
",",
"bindings",
"=",
"None",
")",
":",
"self",
".",
"add_select",
"(",
"QueryExpression",
"(",
"expression",
")",
")",
"if",
"bindings",
":",
"self",
".",
"add_binding",
"(",
"bindings",
",",
"\"select\... | Add a new raw select expression to the query
:param expression: The raw expression
:type expression: str
:param bindings: The expression bindings
:type bindings: list
:return: The current QueryBuilder instance
:rtype: QueryBuilder | [
"Add",
"a",
"new",
"raw",
"select",
"expression",
"to",
"the",
"query"
] | python | train |
neithere/argh | argh/constants.py | https://github.com/neithere/argh/blob/dcd3253f2994400a6a58a700c118c53765bc50a4/argh/constants.py#L55-L91 | def _expand_help(self, action):
"""
This method is copied verbatim from ArgumentDefaultsHelpFormatter with
a couple of lines added just before the end. Reason: we need to
`repr()` default values instead of simply inserting them as is.
This helps notice, for example, an empty string as the default value;
moreover, it prevents breaking argparse due to logical quirks inside
of its formatters.
Ideally this could be achieved by simply defining
:attr:`DEFAULT_ARGUMENT_TEMPLATE` as ``{default!r}`` but unfortunately
argparse only supports the old printf syntax.
"""
params = dict(vars(action), prog=self._prog)
for name in list(params):
if params[name] is argparse.SUPPRESS:
del params[name]
for name in list(params):
if hasattr(params[name], '__name__'):
params[name] = params[name].__name__
if params.get('choices') is not None:
choices_str = ', '.join([str(c) for c in params['choices']])
params['choices'] = choices_str
# XXX this is added in Argh vs. argparse.ArgumentDefaultsHelpFormatter
# (avoiding empty strings, otherwise Argparse would die with
# an IndexError in _format_action)
#
if 'default' in params:
if params['default'] is None:
params['default'] = '-'
else:
params['default'] = repr(params['default'])
#
# /
return self._get_help_string(action) % params | [
"def",
"_expand_help",
"(",
"self",
",",
"action",
")",
":",
"params",
"=",
"dict",
"(",
"vars",
"(",
"action",
")",
",",
"prog",
"=",
"self",
".",
"_prog",
")",
"for",
"name",
"in",
"list",
"(",
"params",
")",
":",
"if",
"params",
"[",
"name",
"... | This method is copied verbatim from ArgumentDefaultsHelpFormatter with
a couple of lines added just before the end. Reason: we need to
`repr()` default values instead of simply inserting them as is.
This helps notice, for example, an empty string as the default value;
moreover, it prevents breaking argparse due to logical quirks inside
of its formatters.
Ideally this could be achieved by simply defining
:attr:`DEFAULT_ARGUMENT_TEMPLATE` as ``{default!r}`` but unfortunately
argparse only supports the old printf syntax. | [
"This",
"method",
"is",
"copied",
"verbatim",
"from",
"ArgumentDefaultsHelpFormatter",
"with",
"a",
"couple",
"of",
"lines",
"added",
"just",
"before",
"the",
"end",
".",
"Reason",
":",
"we",
"need",
"to",
"repr",
"()",
"default",
"values",
"instead",
"of",
... | python | test |
bykof/billomapy | billomapy/billomapy.py | https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L837-L850 | def get_all_tags_of_article(self, article_id):
"""
Get all tags of article
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param article_id: the article id
:return: list
"""
return self._iterate_through_pages(
get_function=self.get_tags_of_article_per_page,
resource=ARTICLE_TAGS,
**{'article_id': article_id}
) | [
"def",
"get_all_tags_of_article",
"(",
"self",
",",
"article_id",
")",
":",
"return",
"self",
".",
"_iterate_through_pages",
"(",
"get_function",
"=",
"self",
".",
"get_tags_of_article_per_page",
",",
"resource",
"=",
"ARTICLE_TAGS",
",",
"*",
"*",
"{",
"'article_... | Get all tags of article
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param article_id: the article id
:return: list | [
"Get",
"all",
"tags",
"of",
"article",
"This",
"will",
"iterate",
"over",
"all",
"pages",
"until",
"it",
"gets",
"all",
"elements",
".",
"So",
"if",
"the",
"rate",
"limit",
"exceeded",
"it",
"will",
"throw",
"an",
"Exception",
"and",
"you",
"will",
"get"... | python | train |
timothydmorton/VESPA | vespa/transit_basic.py | https://github.com/timothydmorton/VESPA/blob/0446b54d48009f3655cfd1a3957ceea21d3adcaa/vespa/transit_basic.py#L745-L753 | def fit_traptransit(ts,fs,p0):
"""
Fits trapezoid model to provided ts,fs
"""
pfit,success = leastsq(traptransit_resid,p0,args=(ts,fs))
if success not in [1,2,3,4]:
raise NoFitError
#logging.debug('success = {}'.format(success))
return pfit | [
"def",
"fit_traptransit",
"(",
"ts",
",",
"fs",
",",
"p0",
")",
":",
"pfit",
",",
"success",
"=",
"leastsq",
"(",
"traptransit_resid",
",",
"p0",
",",
"args",
"=",
"(",
"ts",
",",
"fs",
")",
")",
"if",
"success",
"not",
"in",
"[",
"1",
",",
"2",
... | Fits trapezoid model to provided ts,fs | [
"Fits",
"trapezoid",
"model",
"to",
"provided",
"ts",
"fs"
] | python | train |
jmgilman/Neolib | neolib/pyamf/util/pure.py | https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/util/pure.py#L477-L485 | def read_utf8_string(self, length):
"""
Reads a UTF-8 string from the stream.
@rtype: C{unicode}
"""
s = struct.unpack("%s%ds" % (self.endian, length), self.read(length))[0]
return s.decode('utf-8') | [
"def",
"read_utf8_string",
"(",
"self",
",",
"length",
")",
":",
"s",
"=",
"struct",
".",
"unpack",
"(",
"\"%s%ds\"",
"%",
"(",
"self",
".",
"endian",
",",
"length",
")",
",",
"self",
".",
"read",
"(",
"length",
")",
")",
"[",
"0",
"]",
"return",
... | Reads a UTF-8 string from the stream.
@rtype: C{unicode} | [
"Reads",
"a",
"UTF",
"-",
"8",
"string",
"from",
"the",
"stream",
"."
] | python | train |
ChristianTremblay/BAC0 | BAC0/sql/sql.py | https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/sql/sql.py#L147-L153 | def his_from_sql(self, db_name, point):
"""
Retrive point histories from SQL database
"""
his = self._read_from_sql('select * from "%s"' % "history", db_name)
his.index = his["index"].apply(Timestamp)
return his.set_index("index")[point] | [
"def",
"his_from_sql",
"(",
"self",
",",
"db_name",
",",
"point",
")",
":",
"his",
"=",
"self",
".",
"_read_from_sql",
"(",
"'select * from \"%s\"'",
"%",
"\"history\"",
",",
"db_name",
")",
"his",
".",
"index",
"=",
"his",
"[",
"\"index\"",
"]",
".",
"a... | Retrive point histories from SQL database | [
"Retrive",
"point",
"histories",
"from",
"SQL",
"database"
] | python | train |
ffcalculator/fantasydata-python | fantasy_data/FantasyData.py | https://github.com/ffcalculator/fantasydata-python/blob/af90cac1e80d8356cffaa80621ee513201f6c661/fantasy_data/FantasyData.py#L161-L166 | def get_projected_player_game_stats_by_player(self, season, week, player_id):
"""
Projected Player Game Stats by Player
"""
result = self._method_call("PlayerGameProjectionStatsByPlayerID/{season}/{week}/{player_id}", "projections", season=season, week=week, player_id=player_id)
return result | [
"def",
"get_projected_player_game_stats_by_player",
"(",
"self",
",",
"season",
",",
"week",
",",
"player_id",
")",
":",
"result",
"=",
"self",
".",
"_method_call",
"(",
"\"PlayerGameProjectionStatsByPlayerID/{season}/{week}/{player_id}\"",
",",
"\"projections\"",
",",
"s... | Projected Player Game Stats by Player | [
"Projected",
"Player",
"Game",
"Stats",
"by",
"Player"
] | python | train |
nickmckay/LiPD-utilities | Python/lipd/timeseries.py | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/timeseries.py#L340-L369 | def _extract_table(table_data, current, pc, ts, tt):
"""
Use the given table data to create a time series entry for each column in the table.
:param dict table_data: Table data
:param dict current: LiPD root data
:param str pc: paleoData or chronData
:param list ts: Time series (so far)
:param bool summary: Summary Table or not
:return list ts: Time series (so far)
"""
current["tableType"] = tt
# Get root items for this table
current = _extract_table_root(table_data, current, pc)
# Add in modelNumber and tableNumber if this is "ens" or "summ" table
current = _extract_table_model(table_data, current, tt)
# Add age, depth, and year columns to root if available
_table_tmp = _extract_special(current, table_data)
try:
# Start creating entries using dictionary copies.
for _col_name, _col_data in table_data["columns"].items():
# Add column data onto root items. Copy so we don't ruin original data
_col_tmp = _extract_columns(_col_data, copy.deepcopy(_table_tmp), pc)
try:
ts.append(_col_tmp)
except Exception as e:
logger_ts.warn("extract_table: Unable to create ts entry, {}".format(e))
except Exception as e:
logger_ts.error("extract_table: {}".format(e))
return ts | [
"def",
"_extract_table",
"(",
"table_data",
",",
"current",
",",
"pc",
",",
"ts",
",",
"tt",
")",
":",
"current",
"[",
"\"tableType\"",
"]",
"=",
"tt",
"# Get root items for this table",
"current",
"=",
"_extract_table_root",
"(",
"table_data",
",",
"current",
... | Use the given table data to create a time series entry for each column in the table.
:param dict table_data: Table data
:param dict current: LiPD root data
:param str pc: paleoData or chronData
:param list ts: Time series (so far)
:param bool summary: Summary Table or not
:return list ts: Time series (so far) | [
"Use",
"the",
"given",
"table",
"data",
"to",
"create",
"a",
"time",
"series",
"entry",
"for",
"each",
"column",
"in",
"the",
"table",
"."
] | python | train |
hasgeek/coaster | coaster/sqlalchemy/statemanager.py | https://github.com/hasgeek/coaster/blob/07f7eb5d5f516e22fa14fdf4dc70e0ae13ee398d/coaster/sqlalchemy/statemanager.py#L709-L738 | def add_conditional_state(self, name, state, validator, class_validator=None, cache_for=None, label=None):
"""
Add a conditional state that combines an existing state with a validator
that must also pass. The validator receives the object on which the property
is present as a parameter.
:param str name: Name of the new state
:param ManagedState state: Existing state that this is based on
:param validator: Function that will be called with the host object as a parameter
:param class_validator: Function that will be called when the state is queried
on the class instead of the instance. Falls back to ``validator`` if not specified.
Receives the class as the parameter
:param cache_for: Integer or function that indicates how long ``validator``'s
result can be cached (not applicable to ``class_validator``). ``None`` implies
no cache, ``0`` implies indefinite cache (until invalidated by a transition)
and any other integer is the number of seconds for which to cache the assertion
:param label: Label for this state (string or 2-tuple)
TODO: `cache_for`'s implementation is currently pending a test case demonstrating
how it will be used.
"""
# We'll accept a ManagedState with grouped values, but not a ManagedStateGroup
if not isinstance(state, ManagedState):
raise TypeError("Not a managed state: %s" % repr(state))
elif state.statemanager != self:
raise ValueError("State %s is not associated with this state manager" % repr(state))
if isinstance(label, tuple) and len(label) == 2:
label = NameTitle(*label)
self._add_state_internal(name, state.value, label=label,
validator=validator, class_validator=class_validator, cache_for=cache_for) | [
"def",
"add_conditional_state",
"(",
"self",
",",
"name",
",",
"state",
",",
"validator",
",",
"class_validator",
"=",
"None",
",",
"cache_for",
"=",
"None",
",",
"label",
"=",
"None",
")",
":",
"# We'll accept a ManagedState with grouped values, but not a ManagedStat... | Add a conditional state that combines an existing state with a validator
that must also pass. The validator receives the object on which the property
is present as a parameter.
:param str name: Name of the new state
:param ManagedState state: Existing state that this is based on
:param validator: Function that will be called with the host object as a parameter
:param class_validator: Function that will be called when the state is queried
on the class instead of the instance. Falls back to ``validator`` if not specified.
Receives the class as the parameter
:param cache_for: Integer or function that indicates how long ``validator``'s
result can be cached (not applicable to ``class_validator``). ``None`` implies
no cache, ``0`` implies indefinite cache (until invalidated by a transition)
and any other integer is the number of seconds for which to cache the assertion
:param label: Label for this state (string or 2-tuple)
TODO: `cache_for`'s implementation is currently pending a test case demonstrating
how it will be used. | [
"Add",
"a",
"conditional",
"state",
"that",
"combines",
"an",
"existing",
"state",
"with",
"a",
"validator",
"that",
"must",
"also",
"pass",
".",
"The",
"validator",
"receives",
"the",
"object",
"on",
"which",
"the",
"property",
"is",
"present",
"as",
"a",
... | python | train |
JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v20/ardupilotmega.py | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v20/ardupilotmega.py#L10652-L10662 | def remote_log_data_block_send(self, target_system, target_component, seqno, data, force_mavlink1=False):
'''
Send a block of log data to remote location
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
seqno : log data block sequence number (uint32_t)
data : log data block (uint8_t)
'''
return self.send(self.remote_log_data_block_encode(target_system, target_component, seqno, data), force_mavlink1=force_mavlink1) | [
"def",
"remote_log_data_block_send",
"(",
"self",
",",
"target_system",
",",
"target_component",
",",
"seqno",
",",
"data",
",",
"force_mavlink1",
"=",
"False",
")",
":",
"return",
"self",
".",
"send",
"(",
"self",
".",
"remote_log_data_block_encode",
"(",
"targ... | Send a block of log data to remote location
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
seqno : log data block sequence number (uint32_t)
data : log data block (uint8_t) | [
"Send",
"a",
"block",
"of",
"log",
"data",
"to",
"remote",
"location"
] | python | train |
05bit/peewee-async | peewee_async.py | https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L193-L204 | async def get_or_create(self, model_, defaults=None, **kwargs):
"""Try to get an object or create it with the specified defaults.
Return 2-tuple containing the model instance and a boolean
indicating whether the instance was created.
"""
try:
return (await self.get(model_, **kwargs)), False
except model_.DoesNotExist:
data = defaults or {}
data.update({k: v for k, v in kwargs.items() if '__' not in k})
return (await self.create(model_, **data)), True | [
"async",
"def",
"get_or_create",
"(",
"self",
",",
"model_",
",",
"defaults",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"(",
"await",
"self",
".",
"get",
"(",
"model_",
",",
"*",
"*",
"kwargs",
")",
")",
",",
"False",
... | Try to get an object or create it with the specified defaults.
Return 2-tuple containing the model instance and a boolean
indicating whether the instance was created. | [
"Try",
"to",
"get",
"an",
"object",
"or",
"create",
"it",
"with",
"the",
"specified",
"defaults",
"."
] | python | train |
RedHatQE/Sentaku | examples/todo_example/ux.py | https://github.com/RedHatQE/Sentaku/blob/b336cef5b6ee2db4e8dff28dcdb2be35a1f3d01c/examples/todo_example/ux.py#L44-L50 | def get_by(self, name):
"""
find a todo list element by name
"""
item = self.controlled_list.get_by(name)
if item:
return TodoElementUX(parent=self, controlled_element=item) | [
"def",
"get_by",
"(",
"self",
",",
"name",
")",
":",
"item",
"=",
"self",
".",
"controlled_list",
".",
"get_by",
"(",
"name",
")",
"if",
"item",
":",
"return",
"TodoElementUX",
"(",
"parent",
"=",
"self",
",",
"controlled_element",
"=",
"item",
")"
] | find a todo list element by name | [
"find",
"a",
"todo",
"list",
"element",
"by",
"name"
] | python | train |
ontio/ontology-python-sdk | ontology/smart_contract/native_contract/ontid.py | https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/smart_contract/native_contract/ontid.py#L110-L146 | def parse_ddo(ont_id: str, serialized_ddo: str or bytes) -> dict:
"""
This interface is used to deserialize a hexadecimal string into a DDO object in the from of dict.
:param ont_id: the unique ID for identity.
:param serialized_ddo: an serialized description object of ONT ID in form of str or bytes.
:return: a description object of ONT ID in the from of dict.
"""
if len(serialized_ddo) == 0:
return dict()
if isinstance(serialized_ddo, str):
stream = StreamManager.get_stream(bytearray.fromhex(serialized_ddo))
elif isinstance(serialized_ddo, bytes):
stream = StreamManager.get_stream(serialized_ddo)
else:
raise SDKException(ErrorCode.params_type_error('bytes or str parameter is required.'))
reader = BinaryReader(stream)
try:
public_key_bytes = reader.read_var_bytes()
except SDKException:
public_key_bytes = b''
try:
attribute_bytes = reader.read_var_bytes()
except SDKException:
attribute_bytes = b''
try:
recovery_bytes = reader.read_var_bytes()
except SDKException:
recovery_bytes = b''
if len(recovery_bytes) != 0:
b58_recovery = Address(recovery_bytes).b58encode()
else:
b58_recovery = ''
pub_keys = OntId.parse_pub_keys(ont_id, public_key_bytes)
attribute_list = OntId.parse_attributes(attribute_bytes)
ddo = dict(Owners=pub_keys, Attributes=attribute_list, Recovery=b58_recovery, OntId=ont_id)
return ddo | [
"def",
"parse_ddo",
"(",
"ont_id",
":",
"str",
",",
"serialized_ddo",
":",
"str",
"or",
"bytes",
")",
"->",
"dict",
":",
"if",
"len",
"(",
"serialized_ddo",
")",
"==",
"0",
":",
"return",
"dict",
"(",
")",
"if",
"isinstance",
"(",
"serialized_ddo",
","... | This interface is used to deserialize a hexadecimal string into a DDO object in the from of dict.
:param ont_id: the unique ID for identity.
:param serialized_ddo: an serialized description object of ONT ID in form of str or bytes.
:return: a description object of ONT ID in the from of dict. | [
"This",
"interface",
"is",
"used",
"to",
"deserialize",
"a",
"hexadecimal",
"string",
"into",
"a",
"DDO",
"object",
"in",
"the",
"from",
"of",
"dict",
"."
] | python | train |
swisscom/cleanerversion | versions/admin.py | https://github.com/swisscom/cleanerversion/blob/becadbab5d7b474a0e9a596b99e97682402d2f2c/versions/admin.py#L211-L224 | def will_not_clone(self, request, *args, **kwargs):
"""
Add save but not clone capability in the changeview
"""
paths = request.path_info.split('/')
index_of_object_id = paths.index("will_not_clone") - 1
object_id = paths[index_of_object_id]
self.change_view(request, object_id)
admin_wordInUrl = index_of_object_id - 3
# This gets the adminsite for the app, and the model name and joins
# together with /
path = '/' + '/'.join(paths[admin_wordInUrl:index_of_object_id])
return HttpResponseRedirect(path) | [
"def",
"will_not_clone",
"(",
"self",
",",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"paths",
"=",
"request",
".",
"path_info",
".",
"split",
"(",
"'/'",
")",
"index_of_object_id",
"=",
"paths",
".",
"index",
"(",
"\"will_not_clone\... | Add save but not clone capability in the changeview | [
"Add",
"save",
"but",
"not",
"clone",
"capability",
"in",
"the",
"changeview"
] | python | train |
vatlab/SoS | src/sos/eval.py | https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/eval.py#L96-L98 | def SoS_eval(expr: str, extra_dict: dict = {}) -> Any:
'''Evaluate an expression with sos dict.'''
return eval(expr, env.sos_dict.dict(), extra_dict) | [
"def",
"SoS_eval",
"(",
"expr",
":",
"str",
",",
"extra_dict",
":",
"dict",
"=",
"{",
"}",
")",
"->",
"Any",
":",
"return",
"eval",
"(",
"expr",
",",
"env",
".",
"sos_dict",
".",
"dict",
"(",
")",
",",
"extra_dict",
")"
] | Evaluate an expression with sos dict. | [
"Evaluate",
"an",
"expression",
"with",
"sos",
"dict",
"."
] | python | train |
Arkq/flake8-requirements | src/flake8_requirements/checker.py | https://github.com/Arkq/flake8-requirements/blob/d7cb84af2429a63635528b531111a5da527bf2d1/src/flake8_requirements/checker.py#L322-L385 | def run(self):
"""Run checker."""
def split(module):
"""Split module into submodules."""
return tuple(module.split("."))
def modcmp(lib=(), test=()):
"""Compare import modules."""
if len(lib) > len(test):
return False
return all(a == b for a, b in zip(lib, test))
mods_1st_party = set()
mods_3rd_party = set()
# Get 1st party modules (used for absolute imports).
modules = [project2module(self.setup.keywords.get('name', ""))]
if modules[0] in self.known_modules:
modules = self.known_modules[modules[0]]
mods_1st_party.update(split(x) for x in modules)
requirements = self.requirements
if self.setup.redirected:
# Use requirements from setup if available.
requirements = self.setup.get_requirements(
setup=self.processing_setup_py,
tests=True,
)
# Get 3rd party module names based on requirements.
for requirement in requirements:
modules = [project2module(requirement.project_name)]
if modules[0] in KNOWN_3RD_PARTIES:
modules = KNOWN_3RD_PARTIES[modules[0]]
if modules[0] in self.known_modules:
modules = self.known_modules[modules[0]]
mods_3rd_party.update(split(x) for x in modules)
# When processing setup.py file, forcefully add setuptools to the
# project requirements. Setuptools might be required to build the
# project, even though it is not listed as a requirement - this
# package is required to run setup.py, so listing it as a setup
# requirement would be pointless.
if self.processing_setup_py:
mods_3rd_party.add(split("setuptools"))
for node in ImportVisitor(self.tree).imports:
_mod = split(node.mod)
_alt = split(node.alt)
if any([_mod[0] == x for x in STDLIB]):
continue
if any([modcmp(x, _mod) or modcmp(x, _alt)
for x in mods_1st_party]):
continue
if any([modcmp(x, _mod) or modcmp(x, _alt)
for x in mods_3rd_party]):
continue
yield (
node.line,
node.offset,
ERRORS['I900'].format(pkg=node.mod),
Flake8Checker,
) | [
"def",
"run",
"(",
"self",
")",
":",
"def",
"split",
"(",
"module",
")",
":",
"\"\"\"Split module into submodules.\"\"\"",
"return",
"tuple",
"(",
"module",
".",
"split",
"(",
"\".\"",
")",
")",
"def",
"modcmp",
"(",
"lib",
"=",
"(",
")",
",",
"test",
... | Run checker. | [
"Run",
"checker",
"."
] | python | train |
rwl/godot | godot/xdot_parser.py | https://github.com/rwl/godot/blob/013687c9e8983d2aa2ceebb8a76c5c4f1e37c90f/godot/xdot_parser.py#L378-L384 | def _proc_bspline(self, tokens, filled):
""" Returns the components of a B-spline (Bezier curve). """
pts = [(p["x"], p["y"]) for p in tokens["points"]]
component = BSpline(pen=self.pen, points=pts, filled=filled)
return component | [
"def",
"_proc_bspline",
"(",
"self",
",",
"tokens",
",",
"filled",
")",
":",
"pts",
"=",
"[",
"(",
"p",
"[",
"\"x\"",
"]",
",",
"p",
"[",
"\"y\"",
"]",
")",
"for",
"p",
"in",
"tokens",
"[",
"\"points\"",
"]",
"]",
"component",
"=",
"BSpline",
"("... | Returns the components of a B-spline (Bezier curve). | [
"Returns",
"the",
"components",
"of",
"a",
"B",
"-",
"spline",
"(",
"Bezier",
"curve",
")",
"."
] | python | test |
aouyar/PyMunin | pysysinfo/postgresql.py | https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/postgresql.py#L88-L101 | def _createStatsDict(self, headers, rows):
"""Utility method that returns database stats as a nested dictionary.
@param headers: List of columns in query result.
@param rows: List of rows in query result.
@return: Nested dictionary of values.
First key is the database name and the second key is the
statistics counter name.
"""
dbstats = {}
for row in rows:
dbstats[row[0]] = dict(zip(headers[1:], row[1:]))
return dbstats | [
"def",
"_createStatsDict",
"(",
"self",
",",
"headers",
",",
"rows",
")",
":",
"dbstats",
"=",
"{",
"}",
"for",
"row",
"in",
"rows",
":",
"dbstats",
"[",
"row",
"[",
"0",
"]",
"]",
"=",
"dict",
"(",
"zip",
"(",
"headers",
"[",
"1",
":",
"]",
",... | Utility method that returns database stats as a nested dictionary.
@param headers: List of columns in query result.
@param rows: List of rows in query result.
@return: Nested dictionary of values.
First key is the database name and the second key is the
statistics counter name. | [
"Utility",
"method",
"that",
"returns",
"database",
"stats",
"as",
"a",
"nested",
"dictionary",
"."
] | python | train |
Unidata/MetPy | metpy/io/_nexrad_msgs/parse_spec.py | https://github.com/Unidata/MetPy/blob/16f68a94919b9a82dcf9cada2169cf039129e67b/metpy/io/_nexrad_msgs/parse_spec.py#L126-L135 | def fix_var_name(var_name):
"""Clean up and apply standard formatting to variable names."""
name = var_name.strip()
for char in '(). /#,':
name = name.replace(char, '_')
name = name.replace('+', 'pos_')
name = name.replace('-', 'neg_')
if name.endswith('_'):
name = name[:-1]
return name | [
"def",
"fix_var_name",
"(",
"var_name",
")",
":",
"name",
"=",
"var_name",
".",
"strip",
"(",
")",
"for",
"char",
"in",
"'(). /#,'",
":",
"name",
"=",
"name",
".",
"replace",
"(",
"char",
",",
"'_'",
")",
"name",
"=",
"name",
".",
"replace",
"(",
"... | Clean up and apply standard formatting to variable names. | [
"Clean",
"up",
"and",
"apply",
"standard",
"formatting",
"to",
"variable",
"names",
"."
] | python | train |
Chilipp/psy-simple | psy_simple/colors.py | https://github.com/Chilipp/psy-simple/blob/7d916406a6d3c3c27c0b7102f98fef07a4da0a61/psy_simple/colors.py#L155-L198 | def get_cmap(name, lut=None):
"""
Returns the specified colormap.
Parameters
----------
name: str or :class:`matplotlib.colors.Colormap`
If a colormap, it returned unchanged.
%(cmap_note)s
lut: int
An integer giving the number of entries desired in the lookup table
Returns
-------
matplotlib.colors.Colormap
The colormap specified by `name`
See Also
--------
show_colormaps: A function to display all available colormaps
Notes
-----
Different from the :func::`matpltolib.pyplot.get_cmap` function, this
function changes the number of colors if `name` is a
:class:`matplotlib.colors.Colormap` instance to match the given `lut`."""
if name in rcParams['colors.cmaps']:
colors = rcParams['colors.cmaps'][name]
lut = lut or len(colors)
return FixedColorMap.from_list(name=name, colors=colors, N=lut)
elif name in _cmapnames:
colors = _cmapnames[name]
lut = lut or len(colors)
return FixedColorMap.from_list(name=name, colors=colors, N=lut)
else:
cmap = mpl_get_cmap(name)
# Note: we could include the `lut` in the call of mpl_get_cmap, but
# this raises a ValueError for colormaps like 'viridis' in mpl version
# 1.5. Besides the mpl_get_cmap function does not modify the lut if
# it does not match
if lut is not None and cmap.N != lut:
cmap = FixedColorMap.from_list(
name=cmap.name, colors=cmap(np.linspace(0, 1, lut)), N=lut)
return cmap | [
"def",
"get_cmap",
"(",
"name",
",",
"lut",
"=",
"None",
")",
":",
"if",
"name",
"in",
"rcParams",
"[",
"'colors.cmaps'",
"]",
":",
"colors",
"=",
"rcParams",
"[",
"'colors.cmaps'",
"]",
"[",
"name",
"]",
"lut",
"=",
"lut",
"or",
"len",
"(",
"colors"... | Returns the specified colormap.
Parameters
----------
name: str or :class:`matplotlib.colors.Colormap`
If a colormap, it returned unchanged.
%(cmap_note)s
lut: int
An integer giving the number of entries desired in the lookup table
Returns
-------
matplotlib.colors.Colormap
The colormap specified by `name`
See Also
--------
show_colormaps: A function to display all available colormaps
Notes
-----
Different from the :func::`matpltolib.pyplot.get_cmap` function, this
function changes the number of colors if `name` is a
:class:`matplotlib.colors.Colormap` instance to match the given `lut`. | [
"Returns",
"the",
"specified",
"colormap",
"."
] | python | train |
bioasp/iggy | src/sif_parser.py | https://github.com/bioasp/iggy/blob/451dee74f277d822d64cf8f3859c94b2f2b6d4db/src/sif_parser.py#L99-L118 | def p_identlist(self, t):
'''identlist : IDENT
| NOT IDENT
| IDENT AND identlist
| NOT IDENT AND identlist
'''
if len(t)==5 :
#print(t[1],t[2],t[3],t[4])
t[0] = t[1]+t[2]+t[3]+t[4]
elif len(t)==4 :
#print(t[1],t[2],t[3])
t[0] = t[1]+t[2]+t[3]
elif len(t)==3 :
#print(t[1],t[2])
t[0] = t[1]+t[2]
elif len(t)==2 :
#print(t[0],t[1])
t[0]=t[1]
else:
print("Syntax error at '",str(t),"'") | [
"def",
"p_identlist",
"(",
"self",
",",
"t",
")",
":",
"if",
"len",
"(",
"t",
")",
"==",
"5",
":",
"#print(t[1],t[2],t[3],t[4])",
"t",
"[",
"0",
"]",
"=",
"t",
"[",
"1",
"]",
"+",
"t",
"[",
"2",
"]",
"+",
"t",
"[",
"3",
"]",
"+",
"t",
"[",
... | identlist : IDENT
| NOT IDENT
| IDENT AND identlist
| NOT IDENT AND identlist | [
"identlist",
":",
"IDENT",
"|",
"NOT",
"IDENT",
"|",
"IDENT",
"AND",
"identlist",
"|",
"NOT",
"IDENT",
"AND",
"identlist"
] | python | train |
ihgazni2/elist | elist/elist.py | https://github.com/ihgazni2/elist/blob/8c07b5029bda34ead60ce10335ceb145f209263c/elist/elist.py#L3318-L3370 | def remove_many(ol,values,seqs,**kwargs):
'''
from elist.elist import *
ol = [1,'a',3,'b',5,'a',6,'a',7,'b',8,'b',9]
id(ol)
new = remove_many(ol,['a','b'],[1,2])
ol
new
id(ol)
id(new)
####
ol = [1,'a',3,'b',5,'a',6,'a',7,'b',8,'b',9]
id(ol)
rslt = remove_many(ol,['a','b'],[1,2],mode="original")
ol
rslt
id(ol)
id(rslt)
'''
if('mode' in kwargs):
mode = kwargs["mode"]
else:
mode = "new"
values = copy.deepcopy(values)
seqs = copy.deepcopy(seqs)
cursors = [-1] * values.__len__()
new = []
length = ol.__len__()
cpol = copy.deepcopy(ol)
for i in range(0,length):
label = True
for j in range(0,cursors.__len__()):
which = seqs[j]
value = values[j]
if(cpol[i] == value):
cursors[j] = cursors[j] + 1
if(cursors[j] == which):
label = False
break
else:
pass
else:
pass
if(label):
new.append(cpol[i])
else:
pass
if(mode == "new"):
return(new)
else:
ol.clear()
ol.extend(new)
return(ol) | [
"def",
"remove_many",
"(",
"ol",
",",
"values",
",",
"seqs",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"(",
"'mode'",
"in",
"kwargs",
")",
":",
"mode",
"=",
"kwargs",
"[",
"\"mode\"",
"]",
"else",
":",
"mode",
"=",
"\"new\"",
"values",
"=",
"copy",
... | from elist.elist import *
ol = [1,'a',3,'b',5,'a',6,'a',7,'b',8,'b',9]
id(ol)
new = remove_many(ol,['a','b'],[1,2])
ol
new
id(ol)
id(new)
####
ol = [1,'a',3,'b',5,'a',6,'a',7,'b',8,'b',9]
id(ol)
rslt = remove_many(ol,['a','b'],[1,2],mode="original")
ol
rslt
id(ol)
id(rslt) | [
"from",
"elist",
".",
"elist",
"import",
"*",
"ol",
"=",
"[",
"1",
"a",
"3",
"b",
"5",
"a",
"6",
"a",
"7",
"b",
"8",
"b",
"9",
"]",
"id",
"(",
"ol",
")",
"new",
"=",
"remove_many",
"(",
"ol",
"[",
"a",
"b",
"]",
"[",
"1",
"2",
"]",
")",... | python | valid |
woolfson-group/isambard | isambard/optimisation/optimizer.py | https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/optimisation/optimizer.py#L47-L67 | def buff_internal_eval(params):
"""Builds and evaluates BUFF internal energy of a model in parallelization
Parameters
----------
params: list
Tuple containing the specification to be built, the sequence
and the parameters for model building.
Returns
-------
model.bude_score: float
BUFF internal energy score to be assigned to particle fitness
value.
"""
specification, sequence, parsed_ind = params
model = specification(*parsed_ind)
model.build()
model.pack_new_sequences(sequence)
return model.buff_internal_energy.total_energy | [
"def",
"buff_internal_eval",
"(",
"params",
")",
":",
"specification",
",",
"sequence",
",",
"parsed_ind",
"=",
"params",
"model",
"=",
"specification",
"(",
"*",
"parsed_ind",
")",
"model",
".",
"build",
"(",
")",
"model",
".",
"pack_new_sequences",
"(",
"s... | Builds and evaluates BUFF internal energy of a model in parallelization
Parameters
----------
params: list
Tuple containing the specification to be built, the sequence
and the parameters for model building.
Returns
-------
model.bude_score: float
BUFF internal energy score to be assigned to particle fitness
value. | [
"Builds",
"and",
"evaluates",
"BUFF",
"internal",
"energy",
"of",
"a",
"model",
"in",
"parallelization"
] | python | train |
hugapi/hug | hug/output_format.py | https://github.com/hugapi/hug/blob/080901c81576657f82e2432fd4a82f1d0d2f370c/hug/output_format.py#L233-L250 | def video(video_type, video_mime, doc=None):
"""Dynamically creates a video type handler for the specified video type"""
@on_valid(video_mime)
def video_handler(data, **kwargs):
if hasattr(data, 'read'):
return data
elif hasattr(data, 'save'):
output = stream()
data.save(output, format=video_type.upper())
output.seek(0)
return output
elif hasattr(data, 'render'):
return data.render()
elif os.path.isfile(data):
return open(data, 'rb')
video_handler.__doc__ = doc or "{0} formatted video".format(video_type)
return video_handler | [
"def",
"video",
"(",
"video_type",
",",
"video_mime",
",",
"doc",
"=",
"None",
")",
":",
"@",
"on_valid",
"(",
"video_mime",
")",
"def",
"video_handler",
"(",
"data",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"hasattr",
"(",
"data",
",",
"'read'",
")"... | Dynamically creates a video type handler for the specified video type | [
"Dynamically",
"creates",
"a",
"video",
"type",
"handler",
"for",
"the",
"specified",
"video",
"type"
] | python | train |
bitesofcode/projexui | projexui/widgets/xwalkthroughwidget/xwalkthroughscene.py | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xwalkthroughwidget/xwalkthroughscene.py#L66-L81 | def findReference(self, name, cls=QtGui.QWidget):
"""
Looks up a reference from the widget based on its object name.
:param name | <str>
cls | <subclass of QtGui.QObject>
:return <QtGui.QObject> || None
"""
ref_widget = self._referenceWidget
if not ref_widget:
return None
if ref_widget.objectName() == name:
return ref_widget
return ref_widget.findChild(cls, name) | [
"def",
"findReference",
"(",
"self",
",",
"name",
",",
"cls",
"=",
"QtGui",
".",
"QWidget",
")",
":",
"ref_widget",
"=",
"self",
".",
"_referenceWidget",
"if",
"not",
"ref_widget",
":",
"return",
"None",
"if",
"ref_widget",
".",
"objectName",
"(",
")",
"... | Looks up a reference from the widget based on its object name.
:param name | <str>
cls | <subclass of QtGui.QObject>
:return <QtGui.QObject> || None | [
"Looks",
"up",
"a",
"reference",
"from",
"the",
"widget",
"based",
"on",
"its",
"object",
"name",
".",
":",
"param",
"name",
"|",
"<str",
">",
"cls",
"|",
"<subclass",
"of",
"QtGui",
".",
"QObject",
">",
":",
"return",
"<QtGui",
".",
"QObject",
">",
... | python | train |
agoragames/kairos | kairos/cassandra_backend.py | https://github.com/agoragames/kairos/blob/0b062d543b0f4a46df460fa0eb6ec281232ab179/kairos/cassandra_backend.py#L175-L185 | def _insert(self, name, value, timestamp, intervals, **kwargs):
'''
Insert the new value.
'''
if self._value_type in QUOTE_TYPES and not QUOTE_MATCH.match(value):
value = "'%s'"%(value)
for interval,config in self._intervals.items():
timestamps = self._normalize_timestamps(timestamp, intervals, config)
for tstamp in timestamps:
self._insert_data(name, value, tstamp, interval, config, **kwargs) | [
"def",
"_insert",
"(",
"self",
",",
"name",
",",
"value",
",",
"timestamp",
",",
"intervals",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_value_type",
"in",
"QUOTE_TYPES",
"and",
"not",
"QUOTE_MATCH",
".",
"match",
"(",
"value",
")",
":",
... | Insert the new value. | [
"Insert",
"the",
"new",
"value",
"."
] | python | train |
google/grr | grr/server/grr_response_server/client_index.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/client_index.py#L375-L406 | def LookupClients(self, keywords):
"""Returns a list of client URNs associated with keywords.
Args:
keywords: The list of keywords to search by.
Returns:
A list of client URNs.
Raises:
ValueError: A string (single keyword) was passed instead of an iterable.
"""
if isinstance(keywords, string_types):
raise ValueError(
"Keywords should be an iterable, not a string (got %s)." % keywords)
start_time, filtered_keywords = self._AnalyzeKeywords(keywords)
keyword_map = data_store.REL_DB.ListClientsForKeywords(
list(map(self._NormalizeKeyword, filtered_keywords)),
start_time=start_time)
results = itervalues(keyword_map)
relevant_set = set(next(results))
for hits in results:
relevant_set &= set(hits)
if not relevant_set:
return []
return sorted(relevant_set) | [
"def",
"LookupClients",
"(",
"self",
",",
"keywords",
")",
":",
"if",
"isinstance",
"(",
"keywords",
",",
"string_types",
")",
":",
"raise",
"ValueError",
"(",
"\"Keywords should be an iterable, not a string (got %s).\"",
"%",
"keywords",
")",
"start_time",
",",
"fi... | Returns a list of client URNs associated with keywords.
Args:
keywords: The list of keywords to search by.
Returns:
A list of client URNs.
Raises:
ValueError: A string (single keyword) was passed instead of an iterable. | [
"Returns",
"a",
"list",
"of",
"client",
"URNs",
"associated",
"with",
"keywords",
"."
] | python | train |
fossasia/knittingpattern | knittingpattern/Row.py | https://github.com/fossasia/knittingpattern/blob/8e608896b0ab82fea1ca9fbfa2b4ee023d8c8027/knittingpattern/Row.py#L46-L54 | def _instructions_changed(self, change):
"""Call when there is a change in the instructions."""
if change.adds():
for index, instruction in change.items():
if isinstance(instruction, dict):
in_row = self._parser.instruction_in_row(self, instruction)
self.instructions[index] = in_row
else:
instruction.transfer_to_row(self) | [
"def",
"_instructions_changed",
"(",
"self",
",",
"change",
")",
":",
"if",
"change",
".",
"adds",
"(",
")",
":",
"for",
"index",
",",
"instruction",
"in",
"change",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"instruction",
",",
"dict",
")",... | Call when there is a change in the instructions. | [
"Call",
"when",
"there",
"is",
"a",
"change",
"in",
"the",
"instructions",
"."
] | python | valid |
greenbone/ospd | ospd/misc.py | https://github.com/greenbone/ospd/blob/cef773166b15a19c17764721d3fe404fa0e107bf/ospd/misc.py#L742-L759 | def port_list_compress(port_list):
""" Compress a port list and return a string. """
if not port_list or len(port_list) == 0:
LOGGER.info("Invalid or empty port list.")
return ''
port_list = sorted(set(port_list))
compressed_list = []
for key, group in itertools.groupby(enumerate(port_list),
lambda t: t[1] - t[0]):
group = list(group)
if group[0][1] == group[-1][1]:
compressed_list.append(str(group[0][1]))
else:
compressed_list.append(str(group[0][1]) + '-' + str(group[-1][1]))
return ','.join(compressed_list) | [
"def",
"port_list_compress",
"(",
"port_list",
")",
":",
"if",
"not",
"port_list",
"or",
"len",
"(",
"port_list",
")",
"==",
"0",
":",
"LOGGER",
".",
"info",
"(",
"\"Invalid or empty port list.\"",
")",
"return",
"''",
"port_list",
"=",
"sorted",
"(",
"set",... | Compress a port list and return a string. | [
"Compress",
"a",
"port",
"list",
"and",
"return",
"a",
"string",
"."
] | python | train |
ellmetha/django-machina | machina/apps/forum_conversation/views.py | https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/apps/forum_conversation/views.py#L209-L226 | def get_post_form_kwargs(self):
""" Returns the keyword arguments for instantiating the post form. """
kwargs = {
'user': self.request.user,
'forum': self.get_forum(),
'topic': self.get_topic(),
}
post = self.get_post()
if post:
kwargs.update({'instance': post})
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
'files': self.request.FILES,
})
return kwargs | [
"def",
"get_post_form_kwargs",
"(",
"self",
")",
":",
"kwargs",
"=",
"{",
"'user'",
":",
"self",
".",
"request",
".",
"user",
",",
"'forum'",
":",
"self",
".",
"get_forum",
"(",
")",
",",
"'topic'",
":",
"self",
".",
"get_topic",
"(",
")",
",",
"}",
... | Returns the keyword arguments for instantiating the post form. | [
"Returns",
"the",
"keyword",
"arguments",
"for",
"instantiating",
"the",
"post",
"form",
"."
] | python | train |
ralphje/imagemounter | imagemounter/parser.py | https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/parser.py#L152-L160 | def get_volumes(self):
"""Gets a list of all volumes of all disks, concatenating :func:`Disk.get_volumes` of all disks.
:rtype: list"""
volumes = []
for disk in self.disks:
volumes.extend(disk.get_volumes())
return volumes | [
"def",
"get_volumes",
"(",
"self",
")",
":",
"volumes",
"=",
"[",
"]",
"for",
"disk",
"in",
"self",
".",
"disks",
":",
"volumes",
".",
"extend",
"(",
"disk",
".",
"get_volumes",
"(",
")",
")",
"return",
"volumes"
] | Gets a list of all volumes of all disks, concatenating :func:`Disk.get_volumes` of all disks.
:rtype: list | [
"Gets",
"a",
"list",
"of",
"all",
"volumes",
"of",
"all",
"disks",
"concatenating",
":",
"func",
":",
"Disk",
".",
"get_volumes",
"of",
"all",
"disks",
"."
] | python | train |
lebinh/aq | aq/engines.py | https://github.com/lebinh/aq/blob/eb366dd063db25598daa70a216170776e83383f4/aq/engines.py#L75-L89 | def load_table(self, table):
"""
Load resources as specified by given table into our db.
"""
region = table.database if table.database else self.default_region
resource_name, collection_name = table.table.split('_', 1)
# we use underscore "_" instead of dash "-" for region name but boto3 need dash
boto_region_name = region.replace('_', '-')
resource = self.boto3_session.resource(resource_name, region_name=boto_region_name)
if not hasattr(resource, collection_name):
raise QueryError(
'Unknown collection <{0}> of resource <{1}>'.format(collection_name, resource_name))
self.attach_region(region)
self.refresh_table(region, table.table, resource, getattr(resource, collection_name)) | [
"def",
"load_table",
"(",
"self",
",",
"table",
")",
":",
"region",
"=",
"table",
".",
"database",
"if",
"table",
".",
"database",
"else",
"self",
".",
"default_region",
"resource_name",
",",
"collection_name",
"=",
"table",
".",
"table",
".",
"split",
"("... | Load resources as specified by given table into our db. | [
"Load",
"resources",
"as",
"specified",
"by",
"given",
"table",
"into",
"our",
"db",
"."
] | python | train |
hannes-brt/hebel | hebel/pycuda_ops/cublas.py | https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/pycuda_ops/cublas.py#L2363-L2374 | def cublasSger(handle, m, n, alpha, x, incx, y, incy, A, lda):
"""
Rank-1 operation on real general matrix.
"""
status = _libcublas.cublasSger_v2(handle,
m, n,
ctypes.byref(ctypes.c_float(alpha)),
int(x), incx,
int(y), incy, int(A), lda)
cublasCheckStatus(status) | [
"def",
"cublasSger",
"(",
"handle",
",",
"m",
",",
"n",
",",
"alpha",
",",
"x",
",",
"incx",
",",
"y",
",",
"incy",
",",
"A",
",",
"lda",
")",
":",
"status",
"=",
"_libcublas",
".",
"cublasSger_v2",
"(",
"handle",
",",
"m",
",",
"n",
",",
"ctyp... | Rank-1 operation on real general matrix. | [
"Rank",
"-",
"1",
"operation",
"on",
"real",
"general",
"matrix",
"."
] | python | train |
RLBot/RLBot | src/main/python/rlbot/agents/base_agent.py | https://github.com/RLBot/RLBot/blob/3f9b6bec8b9baf4dcfff0f6cf3103c8744ac6234/src/main/python/rlbot/agents/base_agent.py#L96-L105 | def send_quick_chat(self, team_only, quick_chat):
"""
Sends a quick chat to the other bots.
If it is QuickChats.CHAT_NONE or None it does not send a quick chat to other bots.
:param team_only: either True or False, this says if the quick chat should only go to team members.
:param quick_chat: The quick chat selection, available chats are defined in quick_chats.py
"""
if quick_chat == QuickChats.CHAT_NONE or quick_chat is None:
return
self.__quick_chat_func(team_only, quick_chat) | [
"def",
"send_quick_chat",
"(",
"self",
",",
"team_only",
",",
"quick_chat",
")",
":",
"if",
"quick_chat",
"==",
"QuickChats",
".",
"CHAT_NONE",
"or",
"quick_chat",
"is",
"None",
":",
"return",
"self",
".",
"__quick_chat_func",
"(",
"team_only",
",",
"quick_cha... | Sends a quick chat to the other bots.
If it is QuickChats.CHAT_NONE or None it does not send a quick chat to other bots.
:param team_only: either True or False, this says if the quick chat should only go to team members.
:param quick_chat: The quick chat selection, available chats are defined in quick_chats.py | [
"Sends",
"a",
"quick",
"chat",
"to",
"the",
"other",
"bots",
".",
"If",
"it",
"is",
"QuickChats",
".",
"CHAT_NONE",
"or",
"None",
"it",
"does",
"not",
"send",
"a",
"quick",
"chat",
"to",
"other",
"bots",
".",
":",
"param",
"team_only",
":",
"either",
... | python | train |
ftao/python-ifcfg | src/ifcfg/__init__.py | https://github.com/ftao/python-ifcfg/blob/724a4a103088fee7dc2bc2f63b0b9006a614e1d0/src/ifcfg/__init__.py#L18-L39 | def get_parser_class():
"""
Returns the parser according to the system platform
"""
global distro
if distro == 'Linux':
Parser = parser.LinuxParser
if not os.path.exists(Parser.get_command()[0]):
Parser = parser.UnixIPParser
elif distro in ['Darwin', 'MacOSX']:
Parser = parser.MacOSXParser
elif distro == 'Windows':
# For some strange reason, Windows will always be win32, see:
# https://stackoverflow.com/a/2145582/405682
Parser = parser.WindowsParser
else:
Parser = parser.NullParser
Log.error("Unknown distro type '%s'." % distro)
Log.debug("Distro detected as '%s'" % distro)
Log.debug("Using '%s'" % Parser)
return Parser | [
"def",
"get_parser_class",
"(",
")",
":",
"global",
"distro",
"if",
"distro",
"==",
"'Linux'",
":",
"Parser",
"=",
"parser",
".",
"LinuxParser",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"Parser",
".",
"get_command",
"(",
")",
"[",
"0",
"]",
... | Returns the parser according to the system platform | [
"Returns",
"the",
"parser",
"according",
"to",
"the",
"system",
"platform"
] | python | train |
GeorgeArgyros/symautomata | symautomata/pdacnf.py | https://github.com/GeorgeArgyros/symautomata/blob/f5d66533573b27e155bec3f36b8c00b8e3937cb3/symautomata/pdacnf.py#L106-L117 | def get(self, statediag):
"""
Args:
statediag (list): The states of the PDA
Returns:
list: A reduced list of states using BFS
"""
if len(statediag) < 1:
print 'PDA is empty and can not be reduced'
return statediag
newstatediag = self.bfs(statediag, statediag[0])
return newstatediag | [
"def",
"get",
"(",
"self",
",",
"statediag",
")",
":",
"if",
"len",
"(",
"statediag",
")",
"<",
"1",
":",
"print",
"'PDA is empty and can not be reduced'",
"return",
"statediag",
"newstatediag",
"=",
"self",
".",
"bfs",
"(",
"statediag",
",",
"statediag",
"[... | Args:
statediag (list): The states of the PDA
Returns:
list: A reduced list of states using BFS | [
"Args",
":",
"statediag",
"(",
"list",
")",
":",
"The",
"states",
"of",
"the",
"PDA",
"Returns",
":",
"list",
":",
"A",
"reduced",
"list",
"of",
"states",
"using",
"BFS"
] | python | train |
twisted/axiom | benchmark/benchlib.py | https://github.com/twisted/axiom/blob/7de70bc8fe1bb81f9c2339fba8daec9eb2e92b68/benchmark/benchlib.py#L13-L22 | def itemTypeWithSomeAttributes(attributeTypes):
"""
Create a new L{Item} subclass with L{numAttributes} integers in its
schema.
"""
class SomeItem(Item):
typeName = 'someitem_' + str(typeNameCounter())
for i, attributeType in enumerate(attributeTypes):
locals()['attr_' + str(i)] = attributeType()
return SomeItem | [
"def",
"itemTypeWithSomeAttributes",
"(",
"attributeTypes",
")",
":",
"class",
"SomeItem",
"(",
"Item",
")",
":",
"typeName",
"=",
"'someitem_'",
"+",
"str",
"(",
"typeNameCounter",
"(",
")",
")",
"for",
"i",
",",
"attributeType",
"in",
"enumerate",
"(",
"at... | Create a new L{Item} subclass with L{numAttributes} integers in its
schema. | [
"Create",
"a",
"new",
"L",
"{",
"Item",
"}",
"subclass",
"with",
"L",
"{",
"numAttributes",
"}",
"integers",
"in",
"its",
"schema",
"."
] | python | train |
deepmind/pysc2 | pysc2/lib/stopwatch.py | https://github.com/deepmind/pysc2/blob/df4cc4b00f07a2242be9ba153d4a7f4ad2017897/pysc2/lib/stopwatch.py#L242-L252 | def parse(s):
"""Parse the output below to create a new StopWatch."""
stopwatch = StopWatch()
for line in s.splitlines():
if line.strip():
parts = line.split(None)
name = parts[0]
if name != "%": # ie not the header line
rest = (float(v) for v in parts[2:])
stopwatch.times[parts[0]].merge(Stat.build(*rest))
return stopwatch | [
"def",
"parse",
"(",
"s",
")",
":",
"stopwatch",
"=",
"StopWatch",
"(",
")",
"for",
"line",
"in",
"s",
".",
"splitlines",
"(",
")",
":",
"if",
"line",
".",
"strip",
"(",
")",
":",
"parts",
"=",
"line",
".",
"split",
"(",
"None",
")",
"name",
"=... | Parse the output below to create a new StopWatch. | [
"Parse",
"the",
"output",
"below",
"to",
"create",
"a",
"new",
"StopWatch",
"."
] | python | train |
danielhrisca/asammdf | benchmarks/gen_images.py | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/benchmarks/gen_images.py#L8-L94 | def generate_graphs(result, topic, aspect, for_doc=False):
""" genrate graphs from result file
Parameters
----------
result : str
path to result file
topic : str
benchmark topic; for example "Open file" or "Save file"
aspect : str
performance indiitemsor; can be "ram" (RAM memory usage) or "time" (elapsed time)
for_doc : bool
wether the source code is used inside the documentation
"""
result = result
topic = topic
aspect = aspect
for_doc = for_doc
with open(result, 'r') as f:
lines = f.readlines()
platform = 'x86' if '32 bit' in lines[2] else 'x64'
idx = [i for i, line in enumerate(lines) if line.startswith('==')]
table_spans = {'open': [idx[1] + 1, idx[2]],
'save': [idx[4] + 1, idx[5]],
'get': [idx[7] + 1, idx[8]],
'convert' : [idx[10] + 1, idx[11]],
'merge' : [idx[13] + 1, idx[14]]}
start, stop = table_spans[topic.lower()]
items = [l[:50].strip(' \t\r\n\0*') for l in lines[start: stop]]
time = np.array([int(l[50:61].strip(' \t\r\n\0*')) for l in lines[start: stop]])
ram = np.array([int(l[61:].strip(' \t\r\n\0*')) for l in lines[start: stop]])
if aspect == 'ram':
array = ram
else:
array = time
y_pos = list(range(len(items)))
fig, ax = plt.subplots()
fig.set_size_inches(15, 3.8 / 12 * len(items) + 1.2)
asam_pos = [i for i, c in enumerate(items) if c.startswith('asam')]
mdfreader_pos = [i for i, c in enumerate(items) if c.startswith('mdfreader')]
ax.barh(asam_pos, array[asam_pos], color='green', ecolor='green')
ax.barh(mdfreader_pos, array[mdfreader_pos], color='blue', ecolor='black')
ax.set_yticks(y_pos)
ax.set_yticklabels(items)
ax.invert_yaxis()
ax.set_xlabel('Time [ms]' if aspect == 'time' else 'RAM [MB]')
if topic == 'Get':
ax.set_title('Get all channels (36424 calls) - {}'
.format('time' if aspect == 'time' else 'ram usage'))
else:
ax.set_title('{} test file - {}'
.format(topic, 'time' if aspect == 'time' else 'ram usage'))
ax.xaxis.grid()
fig.subplots_adjust(bottom=0.72/fig.get_figheight(),
top=1-0.48/fig.get_figheight(),
left=0.4,
right=0.9)
if aspect == 'time':
if topic == 'Get':
name = '{}_get_all_channels.png'.format(platform)
else:
name = '{}_{}.png'.format(platform, topic.lower())
else:
if topic == 'Get':
name = '{}_get_all_channels_ram_usage.png'.format(platform)
else:
name = '{}_{}_ram_usage.png'.format(platform, topic.lower())
if for_doc:
plt.show()
else:
plt.savefig(name, dpi=300) | [
"def",
"generate_graphs",
"(",
"result",
",",
"topic",
",",
"aspect",
",",
"for_doc",
"=",
"False",
")",
":",
"result",
"=",
"result",
"topic",
"=",
"topic",
"aspect",
"=",
"aspect",
"for_doc",
"=",
"for_doc",
"with",
"open",
"(",
"result",
",",
"'r'",
... | genrate graphs from result file
Parameters
----------
result : str
path to result file
topic : str
benchmark topic; for example "Open file" or "Save file"
aspect : str
performance indiitemsor; can be "ram" (RAM memory usage) or "time" (elapsed time)
for_doc : bool
wether the source code is used inside the documentation | [
"genrate",
"graphs",
"from",
"result",
"file"
] | python | train |
hubo1016/vlcp | vlcp/event/core.py | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/core.py#L303-L518 | def main(self, installsignal = True, sendinit = True):
'''
Start main loop
'''
if installsignal:
sigterm = signal(SIGTERM, self._quitsignal)
sigint = signal(SIGINT, self._quitsignal)
try:
from signal import SIGUSR1
sigusr1 = signal(SIGUSR1, self._tracesignal)
except Exception:
pass
try:
if sendinit:
self.queue.append(SystemControlEvent(SystemControlEvent.INIT), True)
def processSyscall():
while self.syscallfunc is not None:
r = getattr(self, 'syscallrunnable', None)
if r is None:
self.syscallfunc = None
break
try:
try:
retvalue = self.syscallfunc(self, processEvent)
except Exception:
(t, v, tr) = sys.exc_info()
self.syscallfunc = None
self.syscallrunnable = None
r.send((SyscallReturnEvent(exception=(t, v, tr)), self.syscallmatcher))
else:
self.syscallfunc = None
self.syscallrunnable = None
r.send((SyscallReturnEvent(retvalue=retvalue), self.syscallmatcher))
except StopIteration:
self.unregisterall(r)
except QuitException:
self.unregisterall(r)
except Exception:
self.logger.exception('processing syscall failed with exception')
self.unregisterall(r)
def processEvent(event, emptys = ()):
if self.debugging:
self.logger.debug('Processing event %s', repr(event))
runnables = self.matchtree.matchesWithMatchers(event)
for r, m in runnables:
try:
self.syscallfunc = None
self.syscallrunnable = None
if self.debugging:
self.logger.debug('Send event to %r, matched with %r', r, m)
r.send((event, m))
except StopIteration:
self.unregisterall(r)
except QuitException:
self.unregisterall(r)
except Exception:
self.logger.exception('processing event %s failed with exception', repr(event))
self.unregisterall(r)
processSyscall()
if not event.canignore and not event.canignorenow():
self.eventtree.insert(event)
self.queue.block(event, emptys)
else:
for e in emptys:
processEvent(e)
def processQueueEvent(event):
"""
Optimized with queue events
"""
if self.debugging:
self.logger.debug('Processing event %s', repr(event))
is_valid = event.is_valid()
if is_valid is None:
processEvent(event)
else:
while event.is_valid():
result = self.matchtree.matchfirstwithmatcher(event)
if result is None:
break
r, m = result
try:
self.syscallfunc = None
self.syscallrunnable = None
if self.debugging:
self.logger.debug('Send event to %r, matched with %r', r, m)
r.send((event, m))
except StopIteration:
self.unregisterall(r)
except QuitException:
self.unregisterall(r)
except Exception:
self.logger.exception('processing event %s failed with exception', repr(event))
self.unregisterall(r)
processSyscall()
def processYields():
while self._pending_runnables:
i = 0
while i < len(self._pending_runnables):
r = self._pending_runnables[i]
try:
next(r)
except StopIteration:
self.unregisterall(r)
except QuitException:
self.unregisterall(r)
except Exception:
self.logger.exception('Resuming %r failed with exception', r)
self.unregisterall(r)
processSyscall()
i += 1
del self._pending_runnables[:i]
canquit = False
self.logger.info('Main loop started')
current_time = self.current_time = time()
processYields()
quitMatcher = SystemControlEvent.createMatcher(type=SystemControlEvent.QUIT)
while len(self.registerIndex) > len(self.daemons):
if self.debugging:
self.logger.debug('Blocked events: %d', len(self.queue.blockEvents))
self.logger.debug('Blocked events list: %r', list(self.queue.blockEvents.keys()))
if self.quitting:
self.logger.debug('Routines still not quit: %r', list(self.registerIndex.keys()))
if self.quitsignal:
self.quit()
if canquit and not self.queue.canPop() and not self.timers:
if self.quitting:
break
else:
self.quit(True)
self.queue.append(SystemControlLowPriorityEvent(SystemControlLowPriorityEvent.LOOP), True)
processedEvents = 0
while self.queue.canPop() and (self.processevents is None or processedEvents < self.processevents):
e, qes, emptys = self.queue.pop()
# Queue events will not enqueue again
if not e.canignore and not e.canignorenow():
# The event might block, must process it first
processEvent(e, emptys)
for qe in qes:
processQueueEvent(qe)
else:
for qe in qes:
processQueueEvent(qe)
processEvent(e, emptys)
processYields()
if quitMatcher.isMatch(e):
if e.daemononly:
runnables = list(self.daemons)
else:
runnables = list(self.registerIndex.keys())
for r in runnables:
try:
r.throw(QuitException)
except StopIteration:
self.unregisterall(r)
except QuitException:
self.unregisterall(r)
except Exception:
self.logger.exception('Runnable quit failed with exception')
self.unregisterall(r)
processSyscall()
processYields()
if self.quitsignal:
self.quit()
processedEvents += 1
if len(self.registerIndex) <= len(self.daemons):
break
end_time = time()
if end_time - current_time > 1:
self.logger.warning("An iteration takes %r seconds to process", end_time - current_time)
if self.generatecontinue or self.queue.canPop():
wait = 0
elif not self.timers:
wait = None
else:
wait = self.timers.top().timestamp - end_time
if wait < 0:
wait = 0
events, canquit = self.polling.pollEvents(wait)
for e in events:
self.queue.append(e, True)
current_time = self.current_time = time()
now = current_time + 0.1
while self.timers and self.timers.topPriority() < now:
t = self.timers.top()
if t.interval is not None:
t.timestamp += t.interval
self.timers.setpriority(t, t.timestamp)
else:
self.timers.pop()
self.queue.append(TimerEvent(t), True)
if self.generatecontinue:
self.queue.append(SystemControlEvent(SystemControlEvent.CONTINUE), True)
self.generatecontinue = False
if self.registerIndex:
if len(self.registerIndex) > len(self.daemons):
self.logger.warning('Some runnables are not quit, doing cleanup')
self.logger.warning('Runnables list: %r', set(self.registerIndex.keys()).difference(self.daemons))
for r in list(self.registerIndex.keys()):
try:
r.close()
except Exception:
self.logger.exception('Runnable quit failed with exception')
finally:
self.unregisterall(r)
self.logger.info('Main loop quit normally')
finally:
if installsignal:
signal(SIGTERM, sigterm)
signal(SIGINT, sigint)
try:
signal(SIGUSR1, sigusr1)
except Exception:
pass | [
"def",
"main",
"(",
"self",
",",
"installsignal",
"=",
"True",
",",
"sendinit",
"=",
"True",
")",
":",
"if",
"installsignal",
":",
"sigterm",
"=",
"signal",
"(",
"SIGTERM",
",",
"self",
".",
"_quitsignal",
")",
"sigint",
"=",
"signal",
"(",
"SIGINT",
"... | Start main loop | [
"Start",
"main",
"loop"
] | python | train |
shi-cong/PYSTUDY | PYSTUDY/middleware/rabbitmqlib.py | https://github.com/shi-cong/PYSTUDY/blob/c8da7128ea18ecaa5849f2066d321e70d6f97f70/PYSTUDY/middleware/rabbitmqlib.py#L107-L114 | def store_data(self, data):
"""
存储数据到存储队列
:param data: 数据
:return:
"""
self.ch.basic_publish(exchange='', routing_key=self.store_queue,
properties=pika.BasicProperties(delivery_mode=2), body=data) | [
"def",
"store_data",
"(",
"self",
",",
"data",
")",
":",
"self",
".",
"ch",
".",
"basic_publish",
"(",
"exchange",
"=",
"''",
",",
"routing_key",
"=",
"self",
".",
"store_queue",
",",
"properties",
"=",
"pika",
".",
"BasicProperties",
"(",
"delivery_mode",... | 存储数据到存储队列
:param data: 数据
:return: | [
"存储数据到存储队列",
":",
"param",
"data",
":",
"数据",
":",
"return",
":"
] | python | train |
honzamach/pydgets | pydgets/widgets.py | https://github.com/honzamach/pydgets/blob/5ca4ce19fc2d9b5f41441fb9163810f8ca502e79/pydgets/widgets.py#L1159-L1176 | def fmt_row(self, columns, dimensions, row, **settings):
"""
Format single table row.
"""
cells = []
i = 0
for column in columns:
cells.append(self.fmt_cell(
row[i],
dimensions[i],
column,
**settings[self.SETTING_TEXT_FORMATING]
)
)
i += 1
return self.bchar('v', 'm', settings[self.SETTING_BORDER_STYLE], **settings[self.SETTING_BORDER_FORMATING]) + \
self.bchar('v', 'm', settings[self.SETTING_BORDER_STYLE], **settings[self.SETTING_BORDER_FORMATING]).join(cells) + \
self.bchar('v', 'm', settings[self.SETTING_BORDER_STYLE], **settings[self.SETTING_BORDER_FORMATING]) | [
"def",
"fmt_row",
"(",
"self",
",",
"columns",
",",
"dimensions",
",",
"row",
",",
"*",
"*",
"settings",
")",
":",
"cells",
"=",
"[",
"]",
"i",
"=",
"0",
"for",
"column",
"in",
"columns",
":",
"cells",
".",
"append",
"(",
"self",
".",
"fmt_cell",
... | Format single table row. | [
"Format",
"single",
"table",
"row",
"."
] | python | train |
odlgroup/odl | odl/contrib/datasets/images/cambridge.py | https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/datasets/images/cambridge.py#L32-L59 | def convert(image, shape, gray=False, dtype='float64', normalize='max'):
"""Convert image to standardized format.
Several properties of the input image may be changed including the shape,
data type and maximal value of the image. In addition, this function may
convert the image into an ODL object and/or a gray scale image.
"""
image = image.astype(dtype)
if gray:
image[..., 0] *= 0.2126
image[..., 1] *= 0.7152
image[..., 2] *= 0.0722
image = np.sum(image, axis=2)
if shape is not None:
image = skimage.transform.resize(image, shape, mode='constant')
image = image.astype(dtype)
if normalize == 'max':
image /= image.max()
elif normalize == 'sum':
image /= image.sum()
else:
assert False
return image | [
"def",
"convert",
"(",
"image",
",",
"shape",
",",
"gray",
"=",
"False",
",",
"dtype",
"=",
"'float64'",
",",
"normalize",
"=",
"'max'",
")",
":",
"image",
"=",
"image",
".",
"astype",
"(",
"dtype",
")",
"if",
"gray",
":",
"image",
"[",
"...",
",",... | Convert image to standardized format.
Several properties of the input image may be changed including the shape,
data type and maximal value of the image. In addition, this function may
convert the image into an ODL object and/or a gray scale image. | [
"Convert",
"image",
"to",
"standardized",
"format",
"."
] | python | train |
theonion/django-bulbs | bulbs/content/serializers.py | https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/content/serializers.py#L24-L28 | def to_internal_value(self, value):
"""Convert to integer id."""
natural_key = value.split("_")
content_type = ContentType.objects.get_by_natural_key(*natural_key)
return content_type.id | [
"def",
"to_internal_value",
"(",
"self",
",",
"value",
")",
":",
"natural_key",
"=",
"value",
".",
"split",
"(",
"\"_\"",
")",
"content_type",
"=",
"ContentType",
".",
"objects",
".",
"get_by_natural_key",
"(",
"*",
"natural_key",
")",
"return",
"content_type"... | Convert to integer id. | [
"Convert",
"to",
"integer",
"id",
"."
] | python | train |
agoragames/haigha | haigha/reader.py | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/reader.py#L123-L145 | def read_bits(self, num):
'''
Read several bits packed into the same field. Will return as a list.
The bit field itself is little-endian, though the order of the
returned array looks big-endian for ease of decomposition.
Reader('\x02').read_bits(2) -> [False,True]
Reader('\x08').read_bits(2) ->
[False,True,False,False,False,False,False,False]
first_field, second_field = Reader('\x02').read_bits(2)
Will raise BufferUnderflow if there's not enough bytes in the buffer.
Will raise ValueError if num < 0 or num > 9
'''
# Perform a faster check on underflow
if self._pos >= self._end_pos:
raise self.BufferUnderflow()
if num < 0 or num >= 9:
raise ValueError("8 bits per field")
field = ord(self._input[self._pos])
result = map(lambda x: field >> x & 1, xrange(num))
self._pos += 1
return result | [
"def",
"read_bits",
"(",
"self",
",",
"num",
")",
":",
"# Perform a faster check on underflow",
"if",
"self",
".",
"_pos",
">=",
"self",
".",
"_end_pos",
":",
"raise",
"self",
".",
"BufferUnderflow",
"(",
")",
"if",
"num",
"<",
"0",
"or",
"num",
">=",
"9... | Read several bits packed into the same field. Will return as a list.
The bit field itself is little-endian, though the order of the
returned array looks big-endian for ease of decomposition.
Reader('\x02').read_bits(2) -> [False,True]
Reader('\x08').read_bits(2) ->
[False,True,False,False,False,False,False,False]
first_field, second_field = Reader('\x02').read_bits(2)
Will raise BufferUnderflow if there's not enough bytes in the buffer.
Will raise ValueError if num < 0 or num > 9 | [
"Read",
"several",
"bits",
"packed",
"into",
"the",
"same",
"field",
".",
"Will",
"return",
"as",
"a",
"list",
".",
"The",
"bit",
"field",
"itself",
"is",
"little",
"-",
"endian",
"though",
"the",
"order",
"of",
"the",
"returned",
"array",
"looks",
"big"... | python | train |
TangoAlpha/liffylights | liffylights.py | https://github.com/TangoAlpha/liffylights/blob/7ae9ed947ecf039734014d98b6e18de0f26fa1d3/liffylights.py#L130-L153 | def _gen_header(self, sequence, payloadtype):
""" Create packet header. """
protocol = bytearray.fromhex("00 34")
source = bytearray.fromhex("42 52 4b 52")
target = bytearray.fromhex("00 00 00 00 00 00 00 00")
reserved1 = bytearray.fromhex("00 00 00 00 00 00")
sequence = pack("<B", sequence)
ack = pack(">B", 3)
reserved2 = bytearray.fromhex("00 00 00 00 00 00 00 00")
packet_type = pack("<H", payloadtype)
reserved3 = bytearray.fromhex("00 00")
# assemble header
header = bytearray(protocol)
header.extend(source)
header.extend(target)
header.extend(reserved1)
header.extend(ack)
header.extend(sequence)
header.extend(reserved2)
header.extend(packet_type)
header.extend(reserved3)
return header | [
"def",
"_gen_header",
"(",
"self",
",",
"sequence",
",",
"payloadtype",
")",
":",
"protocol",
"=",
"bytearray",
".",
"fromhex",
"(",
"\"00 34\"",
")",
"source",
"=",
"bytearray",
".",
"fromhex",
"(",
"\"42 52 4b 52\"",
")",
"target",
"=",
"bytearray",
".",
... | Create packet header. | [
"Create",
"packet",
"header",
"."
] | python | train |
seung-lab/cloud-volume | cloudvolume/cacheservice.py | https://github.com/seung-lab/cloud-volume/blob/d2fd4500333f1bc3cd3e3919a8b649cec5d8e214/cloudvolume/cacheservice.py#L177-L233 | def check_info_validity(self):
"""
ValueError if cache differs at all from source data layer with
an excepton for volume_size which prints a warning.
"""
cache_info = self.get_json('info')
if not cache_info:
return
fresh_info = self.vol._fetch_info()
mismatch_error = ValueError("""
Data layer info file differs from cache. Please check whether this
change invalidates your cache.
If VALID do one of:
1) Manually delete the cache (see location below)
2) Refresh your on-disk cache as follows:
vol = CloudVolume(..., cache=False) # refreshes from source
vol.cache = True
vol.commit_info() # writes to disk
If INVALID do one of:
1) Delete the cache manually (see cache location below)
2) Instantiate as follows:
vol = CloudVolume(..., cache=False) # refreshes info from source
vol.flush_cache() # deletes cache
vol.cache = True
vol.commit_info() # writes info to disk
CACHED: {cache}
SOURCE: {source}
CACHE LOCATION: {path}
""".format(
cache=cache_info,
source=fresh_info,
path=self.path
))
try:
fresh_sizes = [ scale['size'] for scale in fresh_info['scales'] ]
cache_sizes = [ scale['size'] for scale in cache_info['scales'] ]
except KeyError:
raise mismatch_error
for scale in fresh_info['scales']:
del scale['size']
for scale in cache_info['scales']:
del scale['size']
if fresh_info != cache_info:
raise mismatch_error
if fresh_sizes != cache_sizes:
warn("WARNING: Data layer bounding box differs in cache.\nCACHED: {}\nSOURCE: {}\nCACHE LOCATION:{}".format(
cache_sizes, fresh_sizes, self.path
)) | [
"def",
"check_info_validity",
"(",
"self",
")",
":",
"cache_info",
"=",
"self",
".",
"get_json",
"(",
"'info'",
")",
"if",
"not",
"cache_info",
":",
"return",
"fresh_info",
"=",
"self",
".",
"vol",
".",
"_fetch_info",
"(",
")",
"mismatch_error",
"=",
"Valu... | ValueError if cache differs at all from source data layer with
an excepton for volume_size which prints a warning. | [
"ValueError",
"if",
"cache",
"differs",
"at",
"all",
"from",
"source",
"data",
"layer",
"with",
"an",
"excepton",
"for",
"volume_size",
"which",
"prints",
"a",
"warning",
"."
] | python | train |
mehcode/python-saml | saml/signature.py | https://github.com/mehcode/python-saml/blob/33ed62018efa9ec15b551f309429de510fa44321/saml/signature.py#L113-L166 | def verify(xml, stream):
"""
Verify the signaure of an XML document with the given certificate.
Returns `True` if the document is signed with a valid signature.
Returns `False` if the document is not signed or if the signature is
invalid.
:param lxml.etree._Element xml: The document to sign
:param file stream: The private key to sign the document with
:rtype: Boolean
"""
# Import xmlsec here to delay initializing the C library in
# case we don't need it.
import xmlsec
# Find the <Signature/> node.
signature_node = xmlsec.tree.find_node(xml, xmlsec.Node.SIGNATURE)
if signature_node is None:
# No `signature` node found; we cannot verify
return False
# Create a digital signature context (no key manager is needed).
ctx = xmlsec.SignatureContext()
# Register <Response/> and <Assertion/>
ctx.register_id(xml)
for assertion in xml.xpath("//*[local-name()='Assertion']"):
ctx.register_id(assertion)
# Load the public key.
key = None
for fmt in [
xmlsec.KeyFormat.PEM,
xmlsec.KeyFormat.CERT_PEM]:
stream.seek(0)
try:
key = xmlsec.Key.from_memory(stream, fmt)
break
except ValueError:
# xmlsec now throws when it can't load the key
pass
# Set the key on the context.
ctx.key = key
# Verify the signature.
try:
ctx.verify(signature_node)
return True
except Exception:
return False | [
"def",
"verify",
"(",
"xml",
",",
"stream",
")",
":",
"# Import xmlsec here to delay initializing the C library in",
"# case we don't need it.",
"import",
"xmlsec",
"# Find the <Signature/> node.",
"signature_node",
"=",
"xmlsec",
".",
"tree",
".",
"find_node",
"(",
"xml",
... | Verify the signaure of an XML document with the given certificate.
Returns `True` if the document is signed with a valid signature.
Returns `False` if the document is not signed or if the signature is
invalid.
:param lxml.etree._Element xml: The document to sign
:param file stream: The private key to sign the document with
:rtype: Boolean | [
"Verify",
"the",
"signaure",
"of",
"an",
"XML",
"document",
"with",
"the",
"given",
"certificate",
".",
"Returns",
"True",
"if",
"the",
"document",
"is",
"signed",
"with",
"a",
"valid",
"signature",
".",
"Returns",
"False",
"if",
"the",
"document",
"is",
"... | python | valid |
wandb/client | wandb/vendor/prompt_toolkit/shortcuts.py | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/shortcuts.py#L664-L671 | def confirm(message='Confirm (y or n) '):
"""
Display a confirmation prompt.
"""
assert isinstance(message, text_type)
app = create_confirm_application(message)
return run_application(app) | [
"def",
"confirm",
"(",
"message",
"=",
"'Confirm (y or n) '",
")",
":",
"assert",
"isinstance",
"(",
"message",
",",
"text_type",
")",
"app",
"=",
"create_confirm_application",
"(",
"message",
")",
"return",
"run_application",
"(",
"app",
")"
] | Display a confirmation prompt. | [
"Display",
"a",
"confirmation",
"prompt",
"."
] | python | train |
twilio/twilio-python | twilio/rest/api/v2010/account/__init__.py | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/__init__.py#L474-L483 | def new_keys(self):
"""
Access the new_keys
:returns: twilio.rest.api.v2010.account.new_key.NewKeyList
:rtype: twilio.rest.api.v2010.account.new_key.NewKeyList
"""
if self._new_keys is None:
self._new_keys = NewKeyList(self._version, account_sid=self._solution['sid'], )
return self._new_keys | [
"def",
"new_keys",
"(",
"self",
")",
":",
"if",
"self",
".",
"_new_keys",
"is",
"None",
":",
"self",
".",
"_new_keys",
"=",
"NewKeyList",
"(",
"self",
".",
"_version",
",",
"account_sid",
"=",
"self",
".",
"_solution",
"[",
"'sid'",
"]",
",",
")",
"r... | Access the new_keys
:returns: twilio.rest.api.v2010.account.new_key.NewKeyList
:rtype: twilio.rest.api.v2010.account.new_key.NewKeyList | [
"Access",
"the",
"new_keys"
] | python | train |
PyMLGame/pymlgame | game_example.py | https://github.com/PyMLGame/pymlgame/blob/450fe77d35f9a26c107586d6954f69c3895bf504/game_example.py#L134-L144 | def gameloop(self):
"""
A game loop that circles through the methods.
"""
try:
while True:
self.handle_events()
self.update()
self.render()
except KeyboardInterrupt:
pass | [
"def",
"gameloop",
"(",
"self",
")",
":",
"try",
":",
"while",
"True",
":",
"self",
".",
"handle_events",
"(",
")",
"self",
".",
"update",
"(",
")",
"self",
".",
"render",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"pass"
] | A game loop that circles through the methods. | [
"A",
"game",
"loop",
"that",
"circles",
"through",
"the",
"methods",
"."
] | python | train |
apache/incubator-heron | heron/tools/common/src/python/access/heron_api.py | https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/common/src/python/access/heron_api.py#L772-L799 | def fetch_max(self, cluster, metric, topology, component, instance, timerange, environ=None):
'''
:param cluster:
:param metric:
:param topology:
:param component:
:param instance:
:param timerange:
:param environ:
:return:
'''
components = [component] if component != "*" else (yield get_comps(cluster, environ, topology))
result = {}
futures = []
for comp in components:
query = self.get_query(metric, comp, instance)
max_query = "MAX(%s)" % query
future = get_metrics(cluster, environ, topology, timerange, max_query)
futures.append(future)
results = yield futures
data = self.compute_max(results)
result = self.get_metric_response(timerange, data, True)
raise tornado.gen.Return(result) | [
"def",
"fetch_max",
"(",
"self",
",",
"cluster",
",",
"metric",
",",
"topology",
",",
"component",
",",
"instance",
",",
"timerange",
",",
"environ",
"=",
"None",
")",
":",
"components",
"=",
"[",
"component",
"]",
"if",
"component",
"!=",
"\"*\"",
"else... | :param cluster:
:param metric:
:param topology:
:param component:
:param instance:
:param timerange:
:param environ:
:return: | [
":",
"param",
"cluster",
":",
":",
"param",
"metric",
":",
":",
"param",
"topology",
":",
":",
"param",
"component",
":",
":",
"param",
"instance",
":",
":",
"param",
"timerange",
":",
":",
"param",
"environ",
":",
":",
"return",
":"
] | python | valid |
lpantano/seqcluster | seqcluster/libs/thinkbayes.py | https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1633-L1649 | def GaussianCdfInverse(p, mu=0, sigma=1):
"""Evaluates the inverse CDF of the gaussian distribution.
See http://en.wikipedia.org/wiki/Normal_distribution#Quantile_function
Args:
p: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
x = ROOT2 * erfinv(2 * p - 1)
return mu + x * sigma | [
"def",
"GaussianCdfInverse",
"(",
"p",
",",
"mu",
"=",
"0",
",",
"sigma",
"=",
"1",
")",
":",
"x",
"=",
"ROOT2",
"*",
"erfinv",
"(",
"2",
"*",
"p",
"-",
"1",
")",
"return",
"mu",
"+",
"x",
"*",
"sigma"
] | Evaluates the inverse CDF of the gaussian distribution.
See http://en.wikipedia.org/wiki/Normal_distribution#Quantile_function
Args:
p: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float | [
"Evaluates",
"the",
"inverse",
"CDF",
"of",
"the",
"gaussian",
"distribution",
"."
] | python | train |
mikusjelly/apkutils | apkutils/elf/elfparser.py | https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/elf/elfparser.py#L87-L100 | def _section_from_spec(self, spec):
'''
Retrieve a section given a "spec" (either number or name).
Return None if no such section exists in the file.
'''
try:
num = int(spec)
if num < self.elf_file.num_sections():
return self.elf_file.get_section(num)
else:
return None
except ValueError:
# Not a number. Must be a name then
return self.elf_file.get_section_by_name(spec) | [
"def",
"_section_from_spec",
"(",
"self",
",",
"spec",
")",
":",
"try",
":",
"num",
"=",
"int",
"(",
"spec",
")",
"if",
"num",
"<",
"self",
".",
"elf_file",
".",
"num_sections",
"(",
")",
":",
"return",
"self",
".",
"elf_file",
".",
"get_section",
"(... | Retrieve a section given a "spec" (either number or name).
Return None if no such section exists in the file. | [
"Retrieve",
"a",
"section",
"given",
"a",
"spec",
"(",
"either",
"number",
"or",
"name",
")",
".",
"Return",
"None",
"if",
"no",
"such",
"section",
"exists",
"in",
"the",
"file",
"."
] | python | train |
mabuchilab/QNET | src/qnet/algebra/core/circuit_algebra.py | https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/algebra/core/circuit_algebra.py#L130-L152 | def get_blocks(self, block_structure=None):
"""For a reducible circuit, get a sequence of subblocks that when
concatenated again yield the original circuit. The block structure
given has to be compatible with the circuits actual block structure,
i.e. it can only be more coarse-grained.
Args:
block_structure (tuple): The block structure according to which the
subblocks are generated (default = ``None``, corresponds to the
circuit's own block structure)
Returns:
A tuple of subblocks that the circuit consists of.
Raises:
.IncompatibleBlockStructures
"""
if block_structure is None:
block_structure = self.block_structure
try:
return self._get_blocks(block_structure)
except IncompatibleBlockStructures as e:
raise e | [
"def",
"get_blocks",
"(",
"self",
",",
"block_structure",
"=",
"None",
")",
":",
"if",
"block_structure",
"is",
"None",
":",
"block_structure",
"=",
"self",
".",
"block_structure",
"try",
":",
"return",
"self",
".",
"_get_blocks",
"(",
"block_structure",
")",
... | For a reducible circuit, get a sequence of subblocks that when
concatenated again yield the original circuit. The block structure
given has to be compatible with the circuits actual block structure,
i.e. it can only be more coarse-grained.
Args:
block_structure (tuple): The block structure according to which the
subblocks are generated (default = ``None``, corresponds to the
circuit's own block structure)
Returns:
A tuple of subblocks that the circuit consists of.
Raises:
.IncompatibleBlockStructures | [
"For",
"a",
"reducible",
"circuit",
"get",
"a",
"sequence",
"of",
"subblocks",
"that",
"when",
"concatenated",
"again",
"yield",
"the",
"original",
"circuit",
".",
"The",
"block",
"structure",
"given",
"has",
"to",
"be",
"compatible",
"with",
"the",
"circuits"... | python | train |
cloudmesh/cloudmesh-common | cloudmesh/common/BaseConfigDict.py | https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/BaseConfigDict.py#L393-L399 | def yaml(self):
"""
returns the yaml output of the dict.
"""
return ordered_dump(OrderedDict(self),
Dumper=yaml.SafeDumper,
default_flow_style=False) | [
"def",
"yaml",
"(",
"self",
")",
":",
"return",
"ordered_dump",
"(",
"OrderedDict",
"(",
"self",
")",
",",
"Dumper",
"=",
"yaml",
".",
"SafeDumper",
",",
"default_flow_style",
"=",
"False",
")"
] | returns the yaml output of the dict. | [
"returns",
"the",
"yaml",
"output",
"of",
"the",
"dict",
"."
] | python | train |
gabstopper/smc-python | smc/core/engine.py | https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/core/engine.py#L1007-L1024 | def refresh(self, timeout=3, wait_for_finish=False, **kw):
"""
Refresh existing policy on specified device. This is an asynchronous
call that will return a 'follower' link that can be queried to
determine the status of the task.
::
poller = engine.refresh()
while not poller.done():
poller.wait(5)
print('Percentage complete {}%'.format(poller.task.progress))
:param int timeout: timeout between queries
:raises TaskRunFailed: refresh failed, possibly locked policy
:rtype: TaskOperationPoller
"""
return Task.execute(self, 'refresh',
timeout=timeout, wait_for_finish=wait_for_finish, **kw) | [
"def",
"refresh",
"(",
"self",
",",
"timeout",
"=",
"3",
",",
"wait_for_finish",
"=",
"False",
",",
"*",
"*",
"kw",
")",
":",
"return",
"Task",
".",
"execute",
"(",
"self",
",",
"'refresh'",
",",
"timeout",
"=",
"timeout",
",",
"wait_for_finish",
"=",
... | Refresh existing policy on specified device. This is an asynchronous
call that will return a 'follower' link that can be queried to
determine the status of the task.
::
poller = engine.refresh()
while not poller.done():
poller.wait(5)
print('Percentage complete {}%'.format(poller.task.progress))
:param int timeout: timeout between queries
:raises TaskRunFailed: refresh failed, possibly locked policy
:rtype: TaskOperationPoller | [
"Refresh",
"existing",
"policy",
"on",
"specified",
"device",
".",
"This",
"is",
"an",
"asynchronous",
"call",
"that",
"will",
"return",
"a",
"follower",
"link",
"that",
"can",
"be",
"queried",
"to",
"determine",
"the",
"status",
"of",
"the",
"task",
".",
... | python | train |
bd808/python-iptools | iptools/__init__.py | https://github.com/bd808/python-iptools/blob/5d3fae0056297540355bb7c6c112703cfaa4b6ce/iptools/__init__.py#L58-L65 | def _address2long(address):
"""
Convert an address string to a long.
"""
parsed = ipv4.ip2long(address)
if parsed is None:
parsed = ipv6.ip2long(address)
return parsed | [
"def",
"_address2long",
"(",
"address",
")",
":",
"parsed",
"=",
"ipv4",
".",
"ip2long",
"(",
"address",
")",
"if",
"parsed",
"is",
"None",
":",
"parsed",
"=",
"ipv6",
".",
"ip2long",
"(",
"address",
")",
"return",
"parsed"
] | Convert an address string to a long. | [
"Convert",
"an",
"address",
"string",
"to",
"a",
"long",
"."
] | python | train |
Telefonica/toolium | toolium/pageelements/page_element.py | https://github.com/Telefonica/toolium/blob/56847c243b3a98876df74c184b75e43f8810e475/toolium/pageelements/page_element.py#L103-L111 | def scroll_element_into_view(self):
"""Scroll element into view
:returns: page element instance
"""
x = self.web_element.location['x']
y = self.web_element.location['y']
self.driver.execute_script('window.scrollTo({0}, {1})'.format(x, y))
return self | [
"def",
"scroll_element_into_view",
"(",
"self",
")",
":",
"x",
"=",
"self",
".",
"web_element",
".",
"location",
"[",
"'x'",
"]",
"y",
"=",
"self",
".",
"web_element",
".",
"location",
"[",
"'y'",
"]",
"self",
".",
"driver",
".",
"execute_script",
"(",
... | Scroll element into view
:returns: page element instance | [
"Scroll",
"element",
"into",
"view"
] | python | train |
Contraz/demosys-py | demosys/loaders/scene/gltf.py | https://github.com/Contraz/demosys-py/blob/6466128a3029c4d09631420ccce73024025bd5b6/demosys/loaders/scene/gltf.py#L474-L476 | def interleaves(self, info):
"""Does the buffer interleave with this one?"""
return info.byte_offset == self.component_type.size * self.components | [
"def",
"interleaves",
"(",
"self",
",",
"info",
")",
":",
"return",
"info",
".",
"byte_offset",
"==",
"self",
".",
"component_type",
".",
"size",
"*",
"self",
".",
"components"
] | Does the buffer interleave with this one? | [
"Does",
"the",
"buffer",
"interleave",
"with",
"this",
"one?"
] | python | valid |
ctuning/ck | ck/kernel.py | https://github.com/ctuning/ck/blob/7e009814e975f8742790d3106340088a46223714/ck/kernel.py#L3118-L3261 | def perform_remote_action(i):
"""
Input: { See 'perform_action' function }
Output: { See 'perform_action' function }
"""
# Import modules compatible with Python 2.x and 3.x
import urllib
try: import urllib.request as urllib2
except: import urllib2 # pragma: no cover
try: from urllib.parse import urlencode
except: from urllib import urlencode # pragma: no cover
rr={'return':0}
# Get action
act=i.get('action','')
# Check output
o=i.get('out','')
if o=='con':
# out('Initiating remote access ...')
# out('')
i['out']='con'
i['quiet']='yes'
if act=='pull':
i['out']='json'
else:
i['out']='json'
# # Clean up input
# if o!='json_file':
# rr['out']='json' # Decided to return json to show that it's remote ...
if 'cid' in i:
del(i['cid']) # already processed
# Get URL
url=i.get('remote_server_url','')
# Process i
if 'remote_server_url' in i: del(i['remote_server_url'])
# Pre process if push file ...
if act=='push':
# Check file
fn=i.get('filename','')
if fn=='':
x=i.get('cids',[])
if len(x)>0:
fn=x[0]
if fn=='':
return {'return':1, 'error':'filename is empty'}
if not os.path.isfile(fn):
return {'return':1, 'error':'file '+fn+' not found'}
rx=convert_file_to_upload_string({'filename':fn})
if rx['return']>0: return rx
i['file_content_base64']=rx['file_content_base64']
# Leave only filename without path
i['filename']=os.path.basename(fn)
# Prepare post variables
r=dumps_json({'dict':i, 'skip_indent':'yes'})
if r['return']>0: return r
s=r['string'].encode('utf8')
post=urlencode({'ck_json':s})
if sys.version_info[0]>2: post=post.encode('utf8')
# If auth
au=i.get('remote_server_user','')
if au!='':
del(i['remote_server_user'])
ap=i.get('remote_server_pass','')
if ap!='':
del(i['remote_server_pass'])
auth = urllib2.HTTPPasswordMgrWithDefaultRealm()
auth.add_password(None, url, au, ap)
urllib2.install_opener(urllib2.build_opener(urllib2.HTTPBasicAuthHandler(auth)))
# Prepare request
request = urllib2.Request(url, post)
# Connect
try:
f=urllib2.urlopen(request)
except Exception as e:
return {'return':1, 'error':'Access to remote CK repository failed ('+format(e)+')'}
# Read from Internet
try:
s=f.read()
f.close()
except Exception as e:
return {'return':1, 'error':'Failed reading stream from remote CK web service ('+format(e)+')'}
# Check output
try: s=s.decode('utf8')
except Exception as e: pass
if o=='con' and act!='pull':
out(s.rstrip())
else:
# Try to convert output to dictionary
r=convert_json_str_to_dict({'str':s, 'skip_quote_replacement':'yes'})
if r['return']>0:
return {'return':1, 'error':'can\'t parse output from remote CK server ('+r['error']+'):\n'+s[:256]+'\n\n...)'}
d=r['dict']
if 'return' in d: d['return']=int(d['return']) # Fix for some strange behavior when 'return' is not integer - should check why ...
if d.get('return',0)>0:
return d
# Post process if pull file ...
if act=='pull':
if o!='json' and o!='json_file':
# Convert encoded file to real file ...
x=d.get('file_content_base64','')
fn=d.get('filename','')
if fn=='': fn=cfg['default_archive_name']
r=convert_upload_string_to_file({'file_content_base64':x, 'filename':fn})
if r['return']>0: return r
if 'file_content_base64' in d: del(d['file_content_base64'])
rr.update(d)
# Restore original output
i['out']=o
return rr | [
"def",
"perform_remote_action",
"(",
"i",
")",
":",
"# Import modules compatible with Python 2.x and 3.x",
"import",
"urllib",
"try",
":",
"import",
"urllib",
".",
"request",
"as",
"urllib2",
"except",
":",
"import",
"urllib2",
"# pragma: no cover",
"try",
":",
"from"... | Input: { See 'perform_action' function }
Output: { See 'perform_action' function } | [
"Input",
":",
"{",
"See",
"perform_action",
"function",
"}",
"Output",
":",
"{",
"See",
"perform_action",
"function",
"}"
] | python | train |
tensorflow/tensor2tensor | tensor2tensor/utils/learning_rate.py | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/learning_rate.py#L105-L114 | def _global_step(hparams):
"""Adjust global step if a multi-step optimizer is used."""
step = tf.to_float(tf.train.get_or_create_global_step())
multiplier = hparams.optimizer_multistep_accumulate_steps
if not multiplier:
return step
tf.logging.info("Dividing global step by %d for multi-step optimizer."
% multiplier)
return step / tf.to_float(multiplier) | [
"def",
"_global_step",
"(",
"hparams",
")",
":",
"step",
"=",
"tf",
".",
"to_float",
"(",
"tf",
".",
"train",
".",
"get_or_create_global_step",
"(",
")",
")",
"multiplier",
"=",
"hparams",
".",
"optimizer_multistep_accumulate_steps",
"if",
"not",
"multiplier",
... | Adjust global step if a multi-step optimizer is used. | [
"Adjust",
"global",
"step",
"if",
"a",
"multi",
"-",
"step",
"optimizer",
"is",
"used",
"."
] | python | train |
scanny/python-pptx | pptx/api.py | https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/api.py#L20-L36 | def Presentation(pptx=None):
"""
Return a |Presentation| object loaded from *pptx*, where *pptx* can be
either a path to a ``.pptx`` file (a string) or a file-like object. If
*pptx* is missing or ``None``, the built-in default presentation
"template" is loaded.
"""
if pptx is None:
pptx = _default_pptx_path()
presentation_part = Package.open(pptx).main_document_part
if not _is_pptx_package(presentation_part):
tmpl = "file '%s' is not a PowerPoint file, content type is '%s'"
raise ValueError(tmpl % (pptx, presentation_part.content_type))
return presentation_part.presentation | [
"def",
"Presentation",
"(",
"pptx",
"=",
"None",
")",
":",
"if",
"pptx",
"is",
"None",
":",
"pptx",
"=",
"_default_pptx_path",
"(",
")",
"presentation_part",
"=",
"Package",
".",
"open",
"(",
"pptx",
")",
".",
"main_document_part",
"if",
"not",
"_is_pptx_p... | Return a |Presentation| object loaded from *pptx*, where *pptx* can be
either a path to a ``.pptx`` file (a string) or a file-like object. If
*pptx* is missing or ``None``, the built-in default presentation
"template" is loaded. | [
"Return",
"a",
"|Presentation|",
"object",
"loaded",
"from",
"*",
"pptx",
"*",
"where",
"*",
"pptx",
"*",
"can",
"be",
"either",
"a",
"path",
"to",
"a",
".",
"pptx",
"file",
"(",
"a",
"string",
")",
"or",
"a",
"file",
"-",
"like",
"object",
".",
"I... | python | train |
jupyterhub/kubespawner | kubespawner/spawner.py | https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/spawner.py#L1839-L1873 | def options_from_form(self, formdata):
"""get the option selected by the user on the form
This only constructs the user_options dict,
it should not actually load any options.
That is done later in `.load_user_options()`
Args:
formdata: user selection returned by the form
To access to the value, you can use the `get` accessor and the name of the html element,
for example::
formdata.get('profile',[0])
to get the value of the form named "profile", as defined in `form_template`::
<select class="form-control" name="profile"...>
</select>
Returns:
user_options (dict): the selected profile in the user_options form,
e.g. ``{"profile": "8 CPUs"}``
"""
if not self.profile_list or self._profile_list is None:
return formdata
# Default to first profile if somehow none is provided
try:
selected_profile = int(formdata.get('profile', [0])[0])
options = self._profile_list[selected_profile]
except (TypeError, IndexError, ValueError):
raise web.HTTPError(400, "No such profile: %i", formdata.get('profile', None))
return {
'profile': options['display_name']
} | [
"def",
"options_from_form",
"(",
"self",
",",
"formdata",
")",
":",
"if",
"not",
"self",
".",
"profile_list",
"or",
"self",
".",
"_profile_list",
"is",
"None",
":",
"return",
"formdata",
"# Default to first profile if somehow none is provided",
"try",
":",
"selected... | get the option selected by the user on the form
This only constructs the user_options dict,
it should not actually load any options.
That is done later in `.load_user_options()`
Args:
formdata: user selection returned by the form
To access to the value, you can use the `get` accessor and the name of the html element,
for example::
formdata.get('profile',[0])
to get the value of the form named "profile", as defined in `form_template`::
<select class="form-control" name="profile"...>
</select>
Returns:
user_options (dict): the selected profile in the user_options form,
e.g. ``{"profile": "8 CPUs"}`` | [
"get",
"the",
"option",
"selected",
"by",
"the",
"user",
"on",
"the",
"form"
] | python | train |
Alignak-monitoring/alignak | alignak/external_command.py | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L2195-L2208 | def disable_host_notifications(self, host):
"""Disable notifications for a host
Format of the line that triggers function call::
DISABLE_HOST_NOTIFICATIONS;<host_name>
:param host: host to edit
:type host: alignak.objects.host.Host
:return: None
"""
if host.notifications_enabled:
host.modified_attributes |= DICT_MODATTR["MODATTR_NOTIFICATIONS_ENABLED"].value
host.notifications_enabled = False
self.send_an_element(host.get_update_status_brok()) | [
"def",
"disable_host_notifications",
"(",
"self",
",",
"host",
")",
":",
"if",
"host",
".",
"notifications_enabled",
":",
"host",
".",
"modified_attributes",
"|=",
"DICT_MODATTR",
"[",
"\"MODATTR_NOTIFICATIONS_ENABLED\"",
"]",
".",
"value",
"host",
".",
"notificatio... | Disable notifications for a host
Format of the line that triggers function call::
DISABLE_HOST_NOTIFICATIONS;<host_name>
:param host: host to edit
:type host: alignak.objects.host.Host
:return: None | [
"Disable",
"notifications",
"for",
"a",
"host",
"Format",
"of",
"the",
"line",
"that",
"triggers",
"function",
"call",
"::"
] | python | train |
polyaxon/polyaxon-cli | polyaxon_cli/cli/job.py | https://github.com/polyaxon/polyaxon-cli/blob/a7f5eed74d4d909cad79059f3c21c58606881449/polyaxon_cli/cli/job.py#L131-L169 | def update(ctx, name, description, tags):
"""Update job.
Uses [Caching](/references/polyaxon-cli/#caching)
Example:
\b
```bash
$ polyaxon job -j 2 update --description="new description for my job"
```
"""
user, project_name, _job = get_job_or_local(ctx.obj.get('project'), ctx.obj.get('job'))
update_dict = {}
if name:
update_dict['name'] = name
if description:
update_dict['description'] = description
tags = validate_tags(tags)
if tags:
update_dict['tags'] = tags
if not update_dict:
Printer.print_warning('No argument was provided to update the job.')
sys.exit(0)
try:
response = PolyaxonClient().job.update_job(
user, project_name, _job, update_dict)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not update job `{}`.'.format(_job))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
Printer.print_success("Job updated.")
get_job_details(response) | [
"def",
"update",
"(",
"ctx",
",",
"name",
",",
"description",
",",
"tags",
")",
":",
"user",
",",
"project_name",
",",
"_job",
"=",
"get_job_or_local",
"(",
"ctx",
".",
"obj",
".",
"get",
"(",
"'project'",
")",
",",
"ctx",
".",
"obj",
".",
"get",
"... | Update job.
Uses [Caching](/references/polyaxon-cli/#caching)
Example:
\b
```bash
$ polyaxon job -j 2 update --description="new description for my job"
``` | [
"Update",
"job",
"."
] | python | valid |
Sheeprider/BitBucket-api | bitbucket/bitbucket.py | https://github.com/Sheeprider/BitBucket-api/blob/be45515d506d87f14807a676f3c2f20d79674b75/bitbucket/bitbucket.py#L216-L251 | def dispatch(self, method, url, auth=None, params=None, **kwargs):
""" Send HTTP request, with given method,
credentials and data to the given URL,
and return the success and the result on success.
"""
r = Request(
method=method,
url=url,
auth=auth,
params=params,
data=kwargs)
s = Session()
resp = s.send(r.prepare())
status = resp.status_code
text = resp.text
error = resp.reason
if status >= 200 and status < 300:
if text:
try:
return (True, json.loads(text))
except TypeError:
pass
except ValueError:
pass
return (True, text)
elif status >= 300 and status < 400:
return (
False,
'Unauthorized access, '
'please check your credentials.')
elif status >= 400 and status < 500:
return (False, 'Service not found.')
elif status >= 500 and status < 600:
return (False, 'Server error.')
else:
return (False, error) | [
"def",
"dispatch",
"(",
"self",
",",
"method",
",",
"url",
",",
"auth",
"=",
"None",
",",
"params",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"r",
"=",
"Request",
"(",
"method",
"=",
"method",
",",
"url",
"=",
"url",
",",
"auth",
"=",
"au... | Send HTTP request, with given method,
credentials and data to the given URL,
and return the success and the result on success. | [
"Send",
"HTTP",
"request",
"with",
"given",
"method",
"credentials",
"and",
"data",
"to",
"the",
"given",
"URL",
"and",
"return",
"the",
"success",
"and",
"the",
"result",
"on",
"success",
"."
] | python | train |
Robpol86/sphinxcontrib-versioning | sphinxcontrib/versioning/__main__.py | https://github.com/Robpol86/sphinxcontrib-versioning/blob/920edec0ac764081b583a2ecf4e6952762b9dbf2/sphinxcontrib/versioning/__main__.py#L80-L89 | def invoke(self, ctx):
"""Inject overflow arguments into context state.
:param click.core.Context ctx: Click context.
:return: super() return value.
"""
if self.overflow:
ctx.ensure_object(Config).update(dict(overflow=self.overflow))
return super(ClickGroup, self).invoke(ctx) | [
"def",
"invoke",
"(",
"self",
",",
"ctx",
")",
":",
"if",
"self",
".",
"overflow",
":",
"ctx",
".",
"ensure_object",
"(",
"Config",
")",
".",
"update",
"(",
"dict",
"(",
"overflow",
"=",
"self",
".",
"overflow",
")",
")",
"return",
"super",
"(",
"C... | Inject overflow arguments into context state.
:param click.core.Context ctx: Click context.
:return: super() return value. | [
"Inject",
"overflow",
"arguments",
"into",
"context",
"state",
"."
] | python | train |
lreis2415/PyGeoC | pygeoc/vector.py | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/vector.py#L94-L117 | def write_line_shp(line_list, out_shp):
"""Export ESRI Shapefile -- Line feature"""
print('Write line shapefile: %s' % out_shp)
driver = ogr_GetDriverByName(str('ESRI Shapefile'))
if driver is None:
print('ESRI Shapefile driver not available.')
sys.exit(1)
if os.path.exists(out_shp):
driver.DeleteDataSource(out_shp)
ds = driver.CreateDataSource(out_shp.rpartition(os.sep)[0])
if ds is None:
print('ERROR Output: Creation of output file failed.')
sys.exit(1)
lyr = ds.CreateLayer(str(out_shp.rpartition(os.sep)[2].split('.')[0]), None, wkbLineString)
for l in line_list:
line = ogr_Geometry(wkbLineString)
for i in l:
line.AddPoint(i[0], i[1])
templine = ogr_CreateGeometryFromJson(line.ExportToJson())
feature = ogr_Feature(lyr.GetLayerDefn())
feature.SetGeometry(templine)
lyr.CreateFeature(feature)
feature.Destroy()
ds.Destroy() | [
"def",
"write_line_shp",
"(",
"line_list",
",",
"out_shp",
")",
":",
"print",
"(",
"'Write line shapefile: %s'",
"%",
"out_shp",
")",
"driver",
"=",
"ogr_GetDriverByName",
"(",
"str",
"(",
"'ESRI Shapefile'",
")",
")",
"if",
"driver",
"is",
"None",
":",
"print... | Export ESRI Shapefile -- Line feature | [
"Export",
"ESRI",
"Shapefile",
"--",
"Line",
"feature"
] | python | train |
spdx/tools-python | spdx/parsers/tagvalue.py | https://github.com/spdx/tools-python/blob/301d72f6ae57c832c1da7f6402fa49b192de6810/spdx/parsers/tagvalue.py#L400-L409 | def p_file_contrib_1(self, p):
"""file_contrib : FILE_CONTRIB LINE"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.add_file_contribution(self.document, value)
except OrderError:
self.order_error('FileContributor', 'FileName', p.lineno(1)) | [
"def",
"p_file_contrib_1",
"(",
"self",
",",
"p",
")",
":",
"try",
":",
"if",
"six",
".",
"PY2",
":",
"value",
"=",
"p",
"[",
"2",
"]",
".",
"decode",
"(",
"encoding",
"=",
"'utf-8'",
")",
"else",
":",
"value",
"=",
"p",
"[",
"2",
"]",
"self",
... | file_contrib : FILE_CONTRIB LINE | [
"file_contrib",
":",
"FILE_CONTRIB",
"LINE"
] | python | valid |
quantopian/alphalens | alphalens/utils.py | https://github.com/quantopian/alphalens/blob/d43eac871bb061e956df936794d3dd514da99e44/alphalens/utils.py#L830-L853 | def std_conversion(period_std, base_period):
"""
one_period_len standard deviation (or standard error) approximation
Parameters
----------
period_std: pd.DataFrame
DataFrame containing standard deviation or standard error values
with column headings representing the return period.
base_period: string
The base period length used in the conversion
It must follow pandas.Timedelta constructor format (e.g. '1 days',
'1D', '30m', '3h', '1D1h', etc)
Returns
-------
pd.DataFrame
DataFrame in same format as input but with one-period
standard deviation/error values.
"""
period_len = period_std.name
conversion_factor = (pd.Timedelta(period_len) /
pd.Timedelta(base_period))
return period_std / np.sqrt(conversion_factor) | [
"def",
"std_conversion",
"(",
"period_std",
",",
"base_period",
")",
":",
"period_len",
"=",
"period_std",
".",
"name",
"conversion_factor",
"=",
"(",
"pd",
".",
"Timedelta",
"(",
"period_len",
")",
"/",
"pd",
".",
"Timedelta",
"(",
"base_period",
")",
")",
... | one_period_len standard deviation (or standard error) approximation
Parameters
----------
period_std: pd.DataFrame
DataFrame containing standard deviation or standard error values
with column headings representing the return period.
base_period: string
The base period length used in the conversion
It must follow pandas.Timedelta constructor format (e.g. '1 days',
'1D', '30m', '3h', '1D1h', etc)
Returns
-------
pd.DataFrame
DataFrame in same format as input but with one-period
standard deviation/error values. | [
"one_period_len",
"standard",
"deviation",
"(",
"or",
"standard",
"error",
")",
"approximation"
] | python | train |
breuleux/hrepr | hrepr/__init__.py | https://github.com/breuleux/hrepr/blob/a411395d31ac7c8c071d174e63a093751aa5997b/hrepr/__init__.py#L236-L256 | def stdrepr_short(self, obj, *, cls=None, tag='span'):
"""
Standard short representation for objects, used for objects at
a depth that exceeds ``hrepr_object.config.max_depth``. That
representation is just the object's type between ``<>``s, e.g.
``<MyClass>``.
This behavior can be overriden with a ``__hrepr_short__`` method
on the object, or an entry in ``hrepr_object.type_handlers_short``.
Args:
obj: The object to represent.
cls (optional): The class name for the representation. If None,
stdrepr will use ``'hrepr-' + obj.__class__.___name__``
tag (optional): The tag for the representation, defaults to
'span'.
"""
cls_name = obj.__class__.__name__
if cls is None:
cls = f'hrepr-short-{cls_name}'
return getattr(self.H, tag)[cls](f'<{cls_name}>') | [
"def",
"stdrepr_short",
"(",
"self",
",",
"obj",
",",
"*",
",",
"cls",
"=",
"None",
",",
"tag",
"=",
"'span'",
")",
":",
"cls_name",
"=",
"obj",
".",
"__class__",
".",
"__name__",
"if",
"cls",
"is",
"None",
":",
"cls",
"=",
"f'hrepr-short-{cls_name}'",... | Standard short representation for objects, used for objects at
a depth that exceeds ``hrepr_object.config.max_depth``. That
representation is just the object's type between ``<>``s, e.g.
``<MyClass>``.
This behavior can be overriden with a ``__hrepr_short__`` method
on the object, or an entry in ``hrepr_object.type_handlers_short``.
Args:
obj: The object to represent.
cls (optional): The class name for the representation. If None,
stdrepr will use ``'hrepr-' + obj.__class__.___name__``
tag (optional): The tag for the representation, defaults to
'span'. | [
"Standard",
"short",
"representation",
"for",
"objects",
"used",
"for",
"objects",
"at",
"a",
"depth",
"that",
"exceeds",
"hrepr_object",
".",
"config",
".",
"max_depth",
".",
"That",
"representation",
"is",
"just",
"the",
"object",
"s",
"type",
"between",
"<"... | python | train |
angr/angr | angr/calling_conventions.py | https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/calling_conventions.py#L667-L687 | def get_return_val(self, state, is_fp=None, size=None, stack_base=None):
"""
Get the return value out of the given state
"""
ty = self.func_ty.returnty if self.func_ty is not None else None
if self.ret_val is not None:
loc = self.ret_val
elif is_fp is not None:
loc = self.FP_RETURN_VAL if is_fp else self.RETURN_VAL
elif ty is not None:
loc = self.FP_RETURN_VAL if isinstance(ty, SimTypeFloat) else self.RETURN_VAL
else:
loc = self.RETURN_VAL
if loc is None:
raise NotImplementedError("This SimCC doesn't know how to get this value - should be implemented")
val = loc.get_value(state, stack_base=stack_base, size=None if ty is None else ty.size//state.arch.byte_width)
if self.is_fp_arg(loc) or self.is_fp_value(val) or isinstance(ty, SimTypeFloat):
val = val.raw_to_fp()
return val | [
"def",
"get_return_val",
"(",
"self",
",",
"state",
",",
"is_fp",
"=",
"None",
",",
"size",
"=",
"None",
",",
"stack_base",
"=",
"None",
")",
":",
"ty",
"=",
"self",
".",
"func_ty",
".",
"returnty",
"if",
"self",
".",
"func_ty",
"is",
"not",
"None",
... | Get the return value out of the given state | [
"Get",
"the",
"return",
"value",
"out",
"of",
"the",
"given",
"state"
] | python | train |
hyperledger/indy-plenum | plenum/server/catchup/catchup_rep_service.py | https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/catchup/catchup_rep_service.py#L374-L425 | def _has_valid_catchup_replies(self, seq_no: int, txns_to_process: List[Tuple[int, Any]]) -> Tuple[bool, str, int]:
"""
Transforms transactions for ledger!
Returns:
Whether catchup reply corresponding to seq_no
Name of node from which txns came
Number of transactions ready to be processed
"""
# TODO: Remove after stop passing seqNo here
assert seq_no == txns_to_process[0][0]
# Here seqNo has to be the seqNo of first transaction of
# `catchupReplies`
# Get the transactions in the catchup reply which has sequence
# number `seqNo`
node_name, catchup_rep = self._find_catchup_reply_for_seq_no(seq_no)
txns = catchup_rep.txns
# Add only those transaction in the temporary tree from the above
# batch which are not present in the ledger
# Integer keys being converted to strings when marshaled to JSON
txns = [self._provider.transform_txn_for_ledger(txn)
for s, txn in txns_to_process[:len(txns)]
if str(s) in txns]
# Creating a temporary tree which will be used to verify consistency
# proof, by inserting transactions. Duplicating a merkle tree is not
# expensive since we are using a compact merkle tree.
temp_tree = self._ledger.treeWithAppliedTxns(txns)
proof = catchup_rep.consProof
final_size = self._catchup_till.final_size
final_hash = self._catchup_till.final_hash
try:
logger.info("{} verifying proof for {}, {}, {}, {}, {}".
format(self, temp_tree.tree_size, final_size,
temp_tree.root_hash, final_hash, proof))
verified = self._provider.verifier(self._ledger_id).verify_tree_consistency(
temp_tree.tree_size,
final_size,
temp_tree.root_hash,
Ledger.strToHash(final_hash),
[Ledger.strToHash(p) for p in proof]
)
except Exception as ex:
logger.info("{} could not verify catchup reply {} since {}".format(self, catchup_rep, ex))
verified = False
return bool(verified), node_name, len(txns) | [
"def",
"_has_valid_catchup_replies",
"(",
"self",
",",
"seq_no",
":",
"int",
",",
"txns_to_process",
":",
"List",
"[",
"Tuple",
"[",
"int",
",",
"Any",
"]",
"]",
")",
"->",
"Tuple",
"[",
"bool",
",",
"str",
",",
"int",
"]",
":",
"# TODO: Remove after sto... | Transforms transactions for ledger!
Returns:
Whether catchup reply corresponding to seq_no
Name of node from which txns came
Number of transactions ready to be processed | [
"Transforms",
"transactions",
"for",
"ledger!"
] | python | train |
rande/python-simple-ioc | ioc/extra/tornado/router.py | https://github.com/rande/python-simple-ioc/blob/36ddf667c1213a07a53cd4cdd708d02494e5190b/ioc/extra/tornado/router.py#L32-L42 | def generate_static(self, path):
"""
This method generates a valid path to the public folder of the running project
"""
if not path:
return ""
if path[0] == '/':
return "%s?v=%s" % (path, self.version)
return "%s/%s?v=%s" % (self.static, path, self.version) | [
"def",
"generate_static",
"(",
"self",
",",
"path",
")",
":",
"if",
"not",
"path",
":",
"return",
"\"\"",
"if",
"path",
"[",
"0",
"]",
"==",
"'/'",
":",
"return",
"\"%s?v=%s\"",
"%",
"(",
"path",
",",
"self",
".",
"version",
")",
"return",
"\"%s/%s?v... | This method generates a valid path to the public folder of the running project | [
"This",
"method",
"generates",
"a",
"valid",
"path",
"to",
"the",
"public",
"folder",
"of",
"the",
"running",
"project"
] | python | train |
saltstack/salt | salt/modules/netbox.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netbox.py#L644-L697 | def openconfig_lacp(device_name=None):
'''
.. versionadded:: 2019.2.0
Return a dictionary structured as standardised in the
`openconfig-lacp <http://ops.openconfig.net/branches/master/openconfig-lacp.html>`_
YANG model, with configuration data for Link Aggregation Control Protocol
(LACP) for aggregate interfaces.
.. note::
The ``interval`` and ``lacp_mode`` keys have the values set as ``SLOW``
and ``ACTIVE`` respectively, as this data is not currently available
in Netbox, therefore defaulting to the values defined in the standard.
See `interval <http://ops.openconfig.net/branches/master/docs/openconfig-lacp.html#lacp-interfaces-interface-config-interval>`_
and `lacp-mode <http://ops.openconfig.net/branches/master/docs/openconfig-lacp.html#lacp-interfaces-interface-config-lacp-mode>`_
for further details.
device_name: ``None``
The name of the device to query the LACP information for. If not provided,
will use the Minion ID.
CLI Example:
.. code-block:: bash
salt '*' netbox.openconfig_lacp
salt '*' netbox.openconfig_lacp device_name=cr1.thn.lon
'''
oc_lacp = {}
interfaces = get_interfaces(device_name=device_name)
for interface in interfaces:
if not interface['lag']:
continue
if_name, if_unit = _if_name_unit(interface['name'])
parent_if = interface['lag']['name']
if parent_if not in oc_lacp:
oc_lacp[parent_if] = {
'config': {
'name': parent_if,
'interval': 'SLOW',
'lacp_mode': 'ACTIVE'
},
'members': {
'member': {}
}
}
oc_lacp[parent_if]['members']['member'][if_name] = {}
return {
'lacp': {
'interfaces': {
'interface': oc_lacp
}
}
} | [
"def",
"openconfig_lacp",
"(",
"device_name",
"=",
"None",
")",
":",
"oc_lacp",
"=",
"{",
"}",
"interfaces",
"=",
"get_interfaces",
"(",
"device_name",
"=",
"device_name",
")",
"for",
"interface",
"in",
"interfaces",
":",
"if",
"not",
"interface",
"[",
"'lag... | .. versionadded:: 2019.2.0
Return a dictionary structured as standardised in the
`openconfig-lacp <http://ops.openconfig.net/branches/master/openconfig-lacp.html>`_
YANG model, with configuration data for Link Aggregation Control Protocol
(LACP) for aggregate interfaces.
.. note::
The ``interval`` and ``lacp_mode`` keys have the values set as ``SLOW``
and ``ACTIVE`` respectively, as this data is not currently available
in Netbox, therefore defaulting to the values defined in the standard.
See `interval <http://ops.openconfig.net/branches/master/docs/openconfig-lacp.html#lacp-interfaces-interface-config-interval>`_
and `lacp-mode <http://ops.openconfig.net/branches/master/docs/openconfig-lacp.html#lacp-interfaces-interface-config-lacp-mode>`_
for further details.
device_name: ``None``
The name of the device to query the LACP information for. If not provided,
will use the Minion ID.
CLI Example:
.. code-block:: bash
salt '*' netbox.openconfig_lacp
salt '*' netbox.openconfig_lacp device_name=cr1.thn.lon | [
"..",
"versionadded",
"::",
"2019",
".",
"2",
".",
"0"
] | python | train |
Devoxin/Lavalink.py | lavalink/PlayerManager.py | https://github.com/Devoxin/Lavalink.py/blob/63f55c3d726d24c4cfd3674d3cd6aab6f5be110d/lavalink/PlayerManager.py#L55-L60 | def connected_channel(self):
""" Returns the voice channel the player is connected to. """
if not self.channel_id:
return None
return self._lavalink.bot.get_channel(int(self.channel_id)) | [
"def",
"connected_channel",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"channel_id",
":",
"return",
"None",
"return",
"self",
".",
"_lavalink",
".",
"bot",
".",
"get_channel",
"(",
"int",
"(",
"self",
".",
"channel_id",
")",
")"
] | Returns the voice channel the player is connected to. | [
"Returns",
"the",
"voice",
"channel",
"the",
"player",
"is",
"connected",
"to",
"."
] | python | valid |
rochacbruno/manage | manage/commands_collector.py | https://github.com/rochacbruno/manage/blob/e904c451862f036f4be8723df5704a9844103c74/manage/commands_collector.py#L10-L27 | def add_click_commands(module, cli, command_dict, namespaced):
"""Loads all click commands"""
module_commands = [
item for item in getmembers(module)
if isinstance(item[1], BaseCommand)
]
options = command_dict.get('config', {})
namespace = command_dict.get('namespace')
for name, function in module_commands:
f_options = options.get(name, {})
command_name = f_options.get('name', getattr(function, 'name', name))
if namespace:
command_name = '{}_{}'.format(namespace, command_name)
elif namespaced:
module_namespace = module.__name__.split('.')[-1]
command_name = '{}_{}'.format(module_namespace, command_name)
function.short_help = f_options.get('help_text', function.short_help)
cli.add_command(function, name=command_name) | [
"def",
"add_click_commands",
"(",
"module",
",",
"cli",
",",
"command_dict",
",",
"namespaced",
")",
":",
"module_commands",
"=",
"[",
"item",
"for",
"item",
"in",
"getmembers",
"(",
"module",
")",
"if",
"isinstance",
"(",
"item",
"[",
"1",
"]",
",",
"Ba... | Loads all click commands | [
"Loads",
"all",
"click",
"commands"
] | python | train |
saltstack/salt | salt/utils/profile.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/profile.py#L30-L47 | def profile_func(filename=None):
'''
Decorator for adding profiling to a nested function in Salt
'''
def proffunc(fun):
def profiled_func(*args, **kwargs):
logging.info('Profiling function %s', fun.__name__)
try:
profiler = cProfile.Profile()
retval = profiler.runcall(fun, *args, **kwargs)
profiler.dump_stats((filename or '{0}_func.profile'
.format(fun.__name__)))
except IOError:
logging.exception('Could not open profile file %s', filename)
return retval
return profiled_func
return proffunc | [
"def",
"profile_func",
"(",
"filename",
"=",
"None",
")",
":",
"def",
"proffunc",
"(",
"fun",
")",
":",
"def",
"profiled_func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"logging",
".",
"info",
"(",
"'Profiling function %s'",
",",
"fun",
"."... | Decorator for adding profiling to a nested function in Salt | [
"Decorator",
"for",
"adding",
"profiling",
"to",
"a",
"nested",
"function",
"in",
"Salt"
] | python | train |
CartoDB/cartoframes | cartoframes/utils.py | https://github.com/CartoDB/cartoframes/blob/c94238a545f3dec45963dac3892540942b6f0df8/cartoframes/utils.py#L60-L67 | def safe_quotes(text, escape_single_quotes=False):
"""htmlify string"""
if isinstance(text, str):
safe_text = text.replace('"', """)
if escape_single_quotes:
safe_text = safe_text.replace("'", "\'")
return safe_text.replace('True', 'true')
return text | [
"def",
"safe_quotes",
"(",
"text",
",",
"escape_single_quotes",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"text",
",",
"str",
")",
":",
"safe_text",
"=",
"text",
".",
"replace",
"(",
"'\"'",
",",
"\""\"",
")",
"if",
"escape_single_quotes",
":",... | htmlify string | [
"htmlify",
"string"
] | python | train |
tensorflow/cleverhans | examples/multigpu_advtrain/evaluator.py | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/multigpu_advtrain/evaluator.py#L138-L159 | def eval_advs(self, x, y, preds_adv, X_test, Y_test, att_type):
"""
Evaluate the accuracy of the model on adversarial examples
:param x: symbolic input to model.
:param y: symbolic variable for the label.
:param preds_adv: symbolic variable for the prediction on an
adversarial example.
:param X_test: NumPy array of test set inputs.
:param Y_test: NumPy array of test set labels.
:param att_type: name of the attack.
"""
end = (len(X_test) // self.batch_size) * self.batch_size
if self.hparams.fast_tests:
end = 10*self.batch_size
acc = model_eval(self.sess, x, y, preds_adv, X_test[:end],
Y_test[:end], args=self.eval_params)
self.log_value('test_accuracy_%s' % att_type, acc,
'Test accuracy on adversarial examples')
return acc | [
"def",
"eval_advs",
"(",
"self",
",",
"x",
",",
"y",
",",
"preds_adv",
",",
"X_test",
",",
"Y_test",
",",
"att_type",
")",
":",
"end",
"=",
"(",
"len",
"(",
"X_test",
")",
"//",
"self",
".",
"batch_size",
")",
"*",
"self",
".",
"batch_size",
"if",
... | Evaluate the accuracy of the model on adversarial examples
:param x: symbolic input to model.
:param y: symbolic variable for the label.
:param preds_adv: symbolic variable for the prediction on an
adversarial example.
:param X_test: NumPy array of test set inputs.
:param Y_test: NumPy array of test set labels.
:param att_type: name of the attack. | [
"Evaluate",
"the",
"accuracy",
"of",
"the",
"model",
"on",
"adversarial",
"examples"
] | python | train |
what-studio/profiling | profiling/tracing/__init__.py | https://github.com/what-studio/profiling/blob/49666ba3ea295eb73782ae6c18a4ec7929d7d8b7/profiling/tracing/__init__.py#L116-L124 | def record_leaving(self, time, code, frame_key, parent_stats):
"""Left from a function call."""
try:
stats = parent_stats.get_child(code)
time_entered = self._times_entered.pop((code, frame_key))
except KeyError:
return
time_elapsed = time - time_entered
stats.deep_time += max(0, time_elapsed) | [
"def",
"record_leaving",
"(",
"self",
",",
"time",
",",
"code",
",",
"frame_key",
",",
"parent_stats",
")",
":",
"try",
":",
"stats",
"=",
"parent_stats",
".",
"get_child",
"(",
"code",
")",
"time_entered",
"=",
"self",
".",
"_times_entered",
".",
"pop",
... | Left from a function call. | [
"Left",
"from",
"a",
"function",
"call",
"."
] | python | train |
wavycloud/pyboto3 | pyboto3/mturk.py | https://github.com/wavycloud/pyboto3/blob/924957ccf994303713a4eed90b775ff2ab95b2e5/pyboto3/mturk.py#L195-L442 | def create_hit(MaxAssignments=None, AutoApprovalDelayInSeconds=None, LifetimeInSeconds=None, AssignmentDurationInSeconds=None, Reward=None, Title=None, Keywords=None, Description=None, Question=None, RequesterAnnotation=None, QualificationRequirements=None, UniqueRequestToken=None, AssignmentReviewPolicy=None, HITReviewPolicy=None, HITLayoutId=None, HITLayoutParameters=None):
"""
The CreateHIT operation creates a new Human Intelligence Task (HIT). The new HIT is made available for Workers to find and accept on the Amazon Mechanical Turk website.
This operation allows you to specify a new HIT by passing in values for the properties of the HIT, such as its title, reward amount and number of assignments. When you pass these values to CreateHIT , a new HIT is created for you, with a new HITTypeID . The HITTypeID can be used to create additional HITs in the future without needing to specify common parameters such as the title, description and reward amount each time.
An alternative way to create HITs is to first generate a HITTypeID using the CreateHITType operation and then call the CreateHITWithHITType operation. This is the recommended best practice for Requesters who are creating large numbers of HITs.
CreateHIT also supports several ways to provide question data: by providing a value for the Question parameter that fully specifies the contents of the HIT, or by providing a HitLayoutId and associated HitLayoutParameters .
See also: AWS API Documentation
:example: response = client.create_hit(
MaxAssignments=123,
AutoApprovalDelayInSeconds=123,
LifetimeInSeconds=123,
AssignmentDurationInSeconds=123,
Reward='string',
Title='string',
Keywords='string',
Description='string',
Question='string',
RequesterAnnotation='string',
QualificationRequirements=[
{
'QualificationTypeId': 'string',
'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn',
'IntegerValues': [
123,
],
'LocaleValues': [
{
'Country': 'string',
'Subdivision': 'string'
},
],
'RequiredToPreview': True|False
},
],
UniqueRequestToken='string',
AssignmentReviewPolicy={
'PolicyName': 'string',
'Parameters': [
{
'Key': 'string',
'Values': [
'string',
],
'MapEntries': [
{
'Key': 'string',
'Values': [
'string',
]
},
]
},
]
},
HITReviewPolicy={
'PolicyName': 'string',
'Parameters': [
{
'Key': 'string',
'Values': [
'string',
],
'MapEntries': [
{
'Key': 'string',
'Values': [
'string',
]
},
]
},
]
},
HITLayoutId='string',
HITLayoutParameters=[
{
'Name': 'string',
'Value': 'string'
},
]
)
:type MaxAssignments: integer
:param MaxAssignments: The number of times the HIT can be accepted and completed before the HIT becomes unavailable.
:type AutoApprovalDelayInSeconds: integer
:param AutoApprovalDelayInSeconds: The number of seconds after an assignment for the HIT has been submitted, after which the assignment is considered Approved automatically unless the Requester explicitly rejects it.
:type LifetimeInSeconds: integer
:param LifetimeInSeconds: [REQUIRED]
An amount of time, in seconds, after which the HIT is no longer available for users to accept. After the lifetime of the HIT elapses, the HIT no longer appears in HIT searches, even if not all of the assignments for the HIT have been accepted.
:type AssignmentDurationInSeconds: integer
:param AssignmentDurationInSeconds: [REQUIRED]
The amount of time, in seconds, that a Worker has to complete the HIT after accepting it. If a Worker does not complete the assignment within the specified duration, the assignment is considered abandoned. If the HIT is still active (that is, its lifetime has not elapsed), the assignment becomes available for other users to find and accept.
:type Reward: string
:param Reward: [REQUIRED]
The amount of money the Requester will pay a Worker for successfully completing the HIT.
:type Title: string
:param Title: [REQUIRED]
The title of the HIT. A title should be short and descriptive about the kind of task the HIT contains. On the Amazon Mechanical Turk web site, the HIT title appears in search results, and everywhere the HIT is mentioned.
:type Keywords: string
:param Keywords: One or more words or phrases that describe the HIT, separated by commas. These words are used in searches to find HITs.
:type Description: string
:param Description: [REQUIRED]
A general description of the HIT. A description includes detailed information about the kind of task the HIT contains. On the Amazon Mechanical Turk web site, the HIT description appears in the expanded view of search results, and in the HIT and assignment screens. A good description gives the user enough information to evaluate the HIT before accepting it.
:type Question: string
:param Question: The data the person completing the HIT uses to produce the results.
Constraints: Must be a QuestionForm data structure, an ExternalQuestion data structure, or an HTMLQuestion data structure. The XML question data must not be larger than 64 kilobytes (65,535 bytes) in size, including whitespace.
Either a Question parameter or a HITLayoutId parameter must be provided.
:type RequesterAnnotation: string
:param RequesterAnnotation: An arbitrary data field. The RequesterAnnotation parameter lets your application attach arbitrary data to the HIT for tracking purposes. For example, this parameter could be an identifier internal to the Requester's application that corresponds with the HIT.
The RequesterAnnotation parameter for a HIT is only visible to the Requester who created the HIT. It is not shown to the Worker, or any other Requester.
The RequesterAnnotation parameter may be different for each HIT you submit. It does not affect how your HITs are grouped.
:type QualificationRequirements: list
:param QualificationRequirements: A condition that a Worker's Qualifications must meet before the Worker is allowed to accept and complete the HIT.
(dict) --The QualificationRequirement data structure describes a Qualification that a Worker must have before the Worker is allowed to accept a HIT. A requirement may optionally state that a Worker must have the Qualification in order to preview the HIT.
QualificationTypeId (string) -- [REQUIRED]The ID of the Qualification type for the requirement.
Comparator (string) -- [REQUIRED]The kind of comparison to make against a Qualification's value. You can compare a Qualification's value to an IntegerValue to see if it is LessThan, LessThanOrEqualTo, GreaterThan, GreaterThanOrEqualTo, EqualTo, or NotEqualTo the IntegerValue. You can compare it to a LocaleValue to see if it is EqualTo, or NotEqualTo the LocaleValue. You can check to see if the value is In or NotIn a set of IntegerValue or LocaleValue values. Lastly, a Qualification requirement can also test if a Qualification Exists or DoesNotExist in the user's profile, regardless of its value.
IntegerValues (list) --The integer value to compare against the Qualification's value. IntegerValue must not be present if Comparator is Exists or DoesNotExist. IntegerValue can only be used if the Qualification type has an integer value; it cannot be used with the Worker_Locale QualificationType ID. When performing a set comparison by using the In or the NotIn comparator, you can use up to 15 IntegerValue elements in a QualificationRequirement data structure.
(integer) --
LocaleValues (list) --The locale value to compare against the Qualification's value. The local value must be a valid ISO 3166 country code or supports ISO 3166-2 subdivisions. LocaleValue can only be used with a Worker_Locale QualificationType ID. LocaleValue can only be used with the EqualTo, NotEqualTo, In, and NotIn comparators. You must only use a single LocaleValue element when using the EqualTo or NotEqualTo comparators. When performing a set comparison by using the In or the NotIn comparator, you can use up to 30 LocaleValue elements in a QualificationRequirement data structure.
(dict) --The Locale data structure represents a geographical region or location.
Country (string) -- [REQUIRED]The country of the locale. Must be a valid ISO 3166 country code. For example, the code US refers to the United States of America.
Subdivision (string) --The state or subdivision of the locale. A valid ISO 3166-2 subdivision code. For example, the code WA refers to the state of Washington.
RequiredToPreview (boolean) --If true, the question data for the HIT will not be shown when a Worker whose Qualifications do not meet this requirement tries to preview the HIT. That is, a Worker's Qualifications must meet all of the requirements for which RequiredToPreview is true in order to preview the HIT. If a Worker meets all of the requirements where RequiredToPreview is true (or if there are no such requirements), but does not meet all of the requirements for the HIT, the Worker will be allowed to preview the HIT's question data, but will not be allowed to accept and complete the HIT. The default is false.
:type UniqueRequestToken: string
:param UniqueRequestToken: A unique identifier for this request which allows you to retry the call on error without creating duplicate HITs. This is useful in cases such as network timeouts where it is unclear whether or not the call succeeded on the server. If the HIT already exists in the system from a previous call using the same UniqueRequestToken, subsequent calls will return a AWS.MechanicalTurk.HitAlreadyExists error with a message containing the HITId.
Note
Note: It is your responsibility to ensure uniqueness of the token. The unique token expires after 24 hours. Subsequent calls using the same UniqueRequestToken made after the 24 hour limit could create duplicate HITs.
:type AssignmentReviewPolicy: dict
:param AssignmentReviewPolicy: The Assignment-level Review Policy applies to the assignments under the HIT. You can specify for Mechanical Turk to take various actions based on the policy.
PolicyName (string) --Name of a Review Policy: SimplePlurality/2011-09-01 or ScoreMyKnownAnswers/2011-09-01
Parameters (list) --Name of the parameter from the Review policy.
(dict) --Name of the parameter from the Review policy.
Key (string) --Name of the parameter from the list of Review Polices.
Values (list) --The list of values of the Parameter
(string) --
MapEntries (list) --List of ParameterMapEntry objects.
(dict) --This data structure is the data type for the AnswerKey parameter of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Key (string) --The QuestionID from the HIT that is used to identify which question requires Mechanical Turk to score as part of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Values (list) --The list of answers to the question specified in the MapEntry Key element. The Worker must match all values in order for the answer to be scored correctly.
(string) --
:type HITReviewPolicy: dict
:param HITReviewPolicy: The HIT-level Review Policy applies to the HIT. You can specify for Mechanical Turk to take various actions based on the policy.
PolicyName (string) --Name of a Review Policy: SimplePlurality/2011-09-01 or ScoreMyKnownAnswers/2011-09-01
Parameters (list) --Name of the parameter from the Review policy.
(dict) --Name of the parameter from the Review policy.
Key (string) --Name of the parameter from the list of Review Polices.
Values (list) --The list of values of the Parameter
(string) --
MapEntries (list) --List of ParameterMapEntry objects.
(dict) --This data structure is the data type for the AnswerKey parameter of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Key (string) --The QuestionID from the HIT that is used to identify which question requires Mechanical Turk to score as part of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Values (list) --The list of answers to the question specified in the MapEntry Key element. The Worker must match all values in order for the answer to be scored correctly.
(string) --
:type HITLayoutId: string
:param HITLayoutId: The HITLayoutId allows you to use a pre-existing HIT design with placeholder values and create an additional HIT by providing those values as HITLayoutParameters.
Constraints: Either a Question parameter or a HITLayoutId parameter must be provided.
:type HITLayoutParameters: list
:param HITLayoutParameters: If the HITLayoutId is provided, any placeholder values must be filled in with values using the HITLayoutParameter structure. For more information, see HITLayout.
(dict) --The HITLayoutParameter data structure defines parameter values used with a HITLayout. A HITLayout is a reusable Amazon Mechanical Turk project template used to provide Human Intelligence Task (HIT) question data for CreateHIT.
Name (string) --The name of the parameter in the HITLayout.
Value (string) --The value substituted for the parameter referenced in the HITLayout.
:rtype: dict
:return: {
'HIT': {
'HITId': 'string',
'HITTypeId': 'string',
'HITGroupId': 'string',
'HITLayoutId': 'string',
'CreationTime': datetime(2015, 1, 1),
'Title': 'string',
'Description': 'string',
'Question': 'string',
'Keywords': 'string',
'HITStatus': 'Assignable'|'Unassignable'|'Reviewable'|'Reviewing'|'Disposed',
'MaxAssignments': 123,
'Reward': 'string',
'AutoApprovalDelayInSeconds': 123,
'Expiration': datetime(2015, 1, 1),
'AssignmentDurationInSeconds': 123,
'RequesterAnnotation': 'string',
'QualificationRequirements': [
{
'QualificationTypeId': 'string',
'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn',
'IntegerValues': [
123,
],
'LocaleValues': [
{
'Country': 'string',
'Subdivision': 'string'
},
],
'RequiredToPreview': True|False
},
],
'HITReviewStatus': 'NotReviewed'|'MarkedForReview'|'ReviewedAppropriate'|'ReviewedInappropriate',
'NumberOfAssignmentsPending': 123,
'NumberOfAssignmentsAvailable': 123,
'NumberOfAssignmentsCompleted': 123
}
}
:returns:
(integer) --
"""
pass | [
"def",
"create_hit",
"(",
"MaxAssignments",
"=",
"None",
",",
"AutoApprovalDelayInSeconds",
"=",
"None",
",",
"LifetimeInSeconds",
"=",
"None",
",",
"AssignmentDurationInSeconds",
"=",
"None",
",",
"Reward",
"=",
"None",
",",
"Title",
"=",
"None",
",",
"Keywords... | The CreateHIT operation creates a new Human Intelligence Task (HIT). The new HIT is made available for Workers to find and accept on the Amazon Mechanical Turk website.
This operation allows you to specify a new HIT by passing in values for the properties of the HIT, such as its title, reward amount and number of assignments. When you pass these values to CreateHIT , a new HIT is created for you, with a new HITTypeID . The HITTypeID can be used to create additional HITs in the future without needing to specify common parameters such as the title, description and reward amount each time.
An alternative way to create HITs is to first generate a HITTypeID using the CreateHITType operation and then call the CreateHITWithHITType operation. This is the recommended best practice for Requesters who are creating large numbers of HITs.
CreateHIT also supports several ways to provide question data: by providing a value for the Question parameter that fully specifies the contents of the HIT, or by providing a HitLayoutId and associated HitLayoutParameters .
See also: AWS API Documentation
:example: response = client.create_hit(
MaxAssignments=123,
AutoApprovalDelayInSeconds=123,
LifetimeInSeconds=123,
AssignmentDurationInSeconds=123,
Reward='string',
Title='string',
Keywords='string',
Description='string',
Question='string',
RequesterAnnotation='string',
QualificationRequirements=[
{
'QualificationTypeId': 'string',
'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn',
'IntegerValues': [
123,
],
'LocaleValues': [
{
'Country': 'string',
'Subdivision': 'string'
},
],
'RequiredToPreview': True|False
},
],
UniqueRequestToken='string',
AssignmentReviewPolicy={
'PolicyName': 'string',
'Parameters': [
{
'Key': 'string',
'Values': [
'string',
],
'MapEntries': [
{
'Key': 'string',
'Values': [
'string',
]
},
]
},
]
},
HITReviewPolicy={
'PolicyName': 'string',
'Parameters': [
{
'Key': 'string',
'Values': [
'string',
],
'MapEntries': [
{
'Key': 'string',
'Values': [
'string',
]
},
]
},
]
},
HITLayoutId='string',
HITLayoutParameters=[
{
'Name': 'string',
'Value': 'string'
},
]
)
:type MaxAssignments: integer
:param MaxAssignments: The number of times the HIT can be accepted and completed before the HIT becomes unavailable.
:type AutoApprovalDelayInSeconds: integer
:param AutoApprovalDelayInSeconds: The number of seconds after an assignment for the HIT has been submitted, after which the assignment is considered Approved automatically unless the Requester explicitly rejects it.
:type LifetimeInSeconds: integer
:param LifetimeInSeconds: [REQUIRED]
An amount of time, in seconds, after which the HIT is no longer available for users to accept. After the lifetime of the HIT elapses, the HIT no longer appears in HIT searches, even if not all of the assignments for the HIT have been accepted.
:type AssignmentDurationInSeconds: integer
:param AssignmentDurationInSeconds: [REQUIRED]
The amount of time, in seconds, that a Worker has to complete the HIT after accepting it. If a Worker does not complete the assignment within the specified duration, the assignment is considered abandoned. If the HIT is still active (that is, its lifetime has not elapsed), the assignment becomes available for other users to find and accept.
:type Reward: string
:param Reward: [REQUIRED]
The amount of money the Requester will pay a Worker for successfully completing the HIT.
:type Title: string
:param Title: [REQUIRED]
The title of the HIT. A title should be short and descriptive about the kind of task the HIT contains. On the Amazon Mechanical Turk web site, the HIT title appears in search results, and everywhere the HIT is mentioned.
:type Keywords: string
:param Keywords: One or more words or phrases that describe the HIT, separated by commas. These words are used in searches to find HITs.
:type Description: string
:param Description: [REQUIRED]
A general description of the HIT. A description includes detailed information about the kind of task the HIT contains. On the Amazon Mechanical Turk web site, the HIT description appears in the expanded view of search results, and in the HIT and assignment screens. A good description gives the user enough information to evaluate the HIT before accepting it.
:type Question: string
:param Question: The data the person completing the HIT uses to produce the results.
Constraints: Must be a QuestionForm data structure, an ExternalQuestion data structure, or an HTMLQuestion data structure. The XML question data must not be larger than 64 kilobytes (65,535 bytes) in size, including whitespace.
Either a Question parameter or a HITLayoutId parameter must be provided.
:type RequesterAnnotation: string
:param RequesterAnnotation: An arbitrary data field. The RequesterAnnotation parameter lets your application attach arbitrary data to the HIT for tracking purposes. For example, this parameter could be an identifier internal to the Requester's application that corresponds with the HIT.
The RequesterAnnotation parameter for a HIT is only visible to the Requester who created the HIT. It is not shown to the Worker, or any other Requester.
The RequesterAnnotation parameter may be different for each HIT you submit. It does not affect how your HITs are grouped.
:type QualificationRequirements: list
:param QualificationRequirements: A condition that a Worker's Qualifications must meet before the Worker is allowed to accept and complete the HIT.
(dict) --The QualificationRequirement data structure describes a Qualification that a Worker must have before the Worker is allowed to accept a HIT. A requirement may optionally state that a Worker must have the Qualification in order to preview the HIT.
QualificationTypeId (string) -- [REQUIRED]The ID of the Qualification type for the requirement.
Comparator (string) -- [REQUIRED]The kind of comparison to make against a Qualification's value. You can compare a Qualification's value to an IntegerValue to see if it is LessThan, LessThanOrEqualTo, GreaterThan, GreaterThanOrEqualTo, EqualTo, or NotEqualTo the IntegerValue. You can compare it to a LocaleValue to see if it is EqualTo, or NotEqualTo the LocaleValue. You can check to see if the value is In or NotIn a set of IntegerValue or LocaleValue values. Lastly, a Qualification requirement can also test if a Qualification Exists or DoesNotExist in the user's profile, regardless of its value.
IntegerValues (list) --The integer value to compare against the Qualification's value. IntegerValue must not be present if Comparator is Exists or DoesNotExist. IntegerValue can only be used if the Qualification type has an integer value; it cannot be used with the Worker_Locale QualificationType ID. When performing a set comparison by using the In or the NotIn comparator, you can use up to 15 IntegerValue elements in a QualificationRequirement data structure.
(integer) --
LocaleValues (list) --The locale value to compare against the Qualification's value. The local value must be a valid ISO 3166 country code or supports ISO 3166-2 subdivisions. LocaleValue can only be used with a Worker_Locale QualificationType ID. LocaleValue can only be used with the EqualTo, NotEqualTo, In, and NotIn comparators. You must only use a single LocaleValue element when using the EqualTo or NotEqualTo comparators. When performing a set comparison by using the In or the NotIn comparator, you can use up to 30 LocaleValue elements in a QualificationRequirement data structure.
(dict) --The Locale data structure represents a geographical region or location.
Country (string) -- [REQUIRED]The country of the locale. Must be a valid ISO 3166 country code. For example, the code US refers to the United States of America.
Subdivision (string) --The state or subdivision of the locale. A valid ISO 3166-2 subdivision code. For example, the code WA refers to the state of Washington.
RequiredToPreview (boolean) --If true, the question data for the HIT will not be shown when a Worker whose Qualifications do not meet this requirement tries to preview the HIT. That is, a Worker's Qualifications must meet all of the requirements for which RequiredToPreview is true in order to preview the HIT. If a Worker meets all of the requirements where RequiredToPreview is true (or if there are no such requirements), but does not meet all of the requirements for the HIT, the Worker will be allowed to preview the HIT's question data, but will not be allowed to accept and complete the HIT. The default is false.
:type UniqueRequestToken: string
:param UniqueRequestToken: A unique identifier for this request which allows you to retry the call on error without creating duplicate HITs. This is useful in cases such as network timeouts where it is unclear whether or not the call succeeded on the server. If the HIT already exists in the system from a previous call using the same UniqueRequestToken, subsequent calls will return a AWS.MechanicalTurk.HitAlreadyExists error with a message containing the HITId.
Note
Note: It is your responsibility to ensure uniqueness of the token. The unique token expires after 24 hours. Subsequent calls using the same UniqueRequestToken made after the 24 hour limit could create duplicate HITs.
:type AssignmentReviewPolicy: dict
:param AssignmentReviewPolicy: The Assignment-level Review Policy applies to the assignments under the HIT. You can specify for Mechanical Turk to take various actions based on the policy.
PolicyName (string) --Name of a Review Policy: SimplePlurality/2011-09-01 or ScoreMyKnownAnswers/2011-09-01
Parameters (list) --Name of the parameter from the Review policy.
(dict) --Name of the parameter from the Review policy.
Key (string) --Name of the parameter from the list of Review Polices.
Values (list) --The list of values of the Parameter
(string) --
MapEntries (list) --List of ParameterMapEntry objects.
(dict) --This data structure is the data type for the AnswerKey parameter of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Key (string) --The QuestionID from the HIT that is used to identify which question requires Mechanical Turk to score as part of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Values (list) --The list of answers to the question specified in the MapEntry Key element. The Worker must match all values in order for the answer to be scored correctly.
(string) --
:type HITReviewPolicy: dict
:param HITReviewPolicy: The HIT-level Review Policy applies to the HIT. You can specify for Mechanical Turk to take various actions based on the policy.
PolicyName (string) --Name of a Review Policy: SimplePlurality/2011-09-01 or ScoreMyKnownAnswers/2011-09-01
Parameters (list) --Name of the parameter from the Review policy.
(dict) --Name of the parameter from the Review policy.
Key (string) --Name of the parameter from the list of Review Polices.
Values (list) --The list of values of the Parameter
(string) --
MapEntries (list) --List of ParameterMapEntry objects.
(dict) --This data structure is the data type for the AnswerKey parameter of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Key (string) --The QuestionID from the HIT that is used to identify which question requires Mechanical Turk to score as part of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Values (list) --The list of answers to the question specified in the MapEntry Key element. The Worker must match all values in order for the answer to be scored correctly.
(string) --
:type HITLayoutId: string
:param HITLayoutId: The HITLayoutId allows you to use a pre-existing HIT design with placeholder values and create an additional HIT by providing those values as HITLayoutParameters.
Constraints: Either a Question parameter or a HITLayoutId parameter must be provided.
:type HITLayoutParameters: list
:param HITLayoutParameters: If the HITLayoutId is provided, any placeholder values must be filled in with values using the HITLayoutParameter structure. For more information, see HITLayout.
(dict) --The HITLayoutParameter data structure defines parameter values used with a HITLayout. A HITLayout is a reusable Amazon Mechanical Turk project template used to provide Human Intelligence Task (HIT) question data for CreateHIT.
Name (string) --The name of the parameter in the HITLayout.
Value (string) --The value substituted for the parameter referenced in the HITLayout.
:rtype: dict
:return: {
'HIT': {
'HITId': 'string',
'HITTypeId': 'string',
'HITGroupId': 'string',
'HITLayoutId': 'string',
'CreationTime': datetime(2015, 1, 1),
'Title': 'string',
'Description': 'string',
'Question': 'string',
'Keywords': 'string',
'HITStatus': 'Assignable'|'Unassignable'|'Reviewable'|'Reviewing'|'Disposed',
'MaxAssignments': 123,
'Reward': 'string',
'AutoApprovalDelayInSeconds': 123,
'Expiration': datetime(2015, 1, 1),
'AssignmentDurationInSeconds': 123,
'RequesterAnnotation': 'string',
'QualificationRequirements': [
{
'QualificationTypeId': 'string',
'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn',
'IntegerValues': [
123,
],
'LocaleValues': [
{
'Country': 'string',
'Subdivision': 'string'
},
],
'RequiredToPreview': True|False
},
],
'HITReviewStatus': 'NotReviewed'|'MarkedForReview'|'ReviewedAppropriate'|'ReviewedInappropriate',
'NumberOfAssignmentsPending': 123,
'NumberOfAssignmentsAvailable': 123,
'NumberOfAssignmentsCompleted': 123
}
}
:returns:
(integer) -- | [
"The",
"CreateHIT",
"operation",
"creates",
"a",
"new",
"Human",
"Intelligence",
"Task",
"(",
"HIT",
")",
".",
"The",
"new",
"HIT",
"is",
"made",
"available",
"for",
"Workers",
"to",
"find",
"and",
"accept",
"on",
"the",
"Amazon",
"Mechanical",
"Turk",
"we... | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.