nwo stringlengths 5 106 | sha stringlengths 40 40 | path stringlengths 4 174 | language stringclasses 1
value | identifier stringlengths 1 140 | parameters stringlengths 0 87.7k | argument_list stringclasses 1
value | return_statement stringlengths 0 426k | docstring stringlengths 0 64.3k | docstring_summary stringlengths 0 26.3k | docstring_tokens list | function stringlengths 18 4.83M | function_tokens list | url stringlengths 83 304 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
edfungus/Crouton | ada98b3930192938a48909072b45cb84b945f875 | clients/python_clients/cf_demo_client/cf_env/lib/python2.7/site-packages/pip/_vendor/distlib/locators.py | python | Page.__init__ | (self, data, url) | Initialise an instance with the Unicode page contents and the URL they
came from. | Initialise an instance with the Unicode page contents and the URL they
came from. | [
"Initialise",
"an",
"instance",
"with",
"the",
"Unicode",
"page",
"contents",
"and",
"the",
"URL",
"they",
"came",
"from",
"."
] | def __init__(self, data, url):
"""
Initialise an instance with the Unicode page contents and the URL they
came from.
"""
self.data = data
self.base_url = self.url = url
m = self._base.search(self.data)
if m:
self.base_url = m.group(1) | [
"def",
"__init__",
"(",
"self",
",",
"data",
",",
"url",
")",
":",
"self",
".",
"data",
"=",
"data",
"self",
".",
"base_url",
"=",
"self",
".",
"url",
"=",
"url",
"m",
"=",
"self",
".",
"_base",
".",
"search",
"(",
"self",
".",
"data",
")",
"if... | https://github.com/edfungus/Crouton/blob/ada98b3930192938a48909072b45cb84b945f875/clients/python_clients/cf_demo_client/cf_env/lib/python2.7/site-packages/pip/_vendor/distlib/locators.py#L512-L521 | ||
aneisch/home-assistant-config | 86e381fde9609cb8871c439c433c12989e4e225d | custom_components/monitor_docker/helpers.py | python | DockerContainerAPI.set_name | (self, name) | Set the container name. | Set the container name. | [
"Set",
"the",
"container",
"name",
"."
] | def set_name(self, name):
"""Set the container name."""
self._name = name | [
"def",
"set_name",
"(",
"self",
",",
"name",
")",
":",
"self",
".",
"_name",
"=",
"name"
] | https://github.com/aneisch/home-assistant-config/blob/86e381fde9609cb8871c439c433c12989e4e225d/custom_components/monitor_docker/helpers.py#L1273-L1275 | ||
Chaffelson/nipyapi | d3b186fd701ce308c2812746d98af9120955e810 | nipyapi/nifi/apis/versions_api.py | python | VersionsApi.get_update_request | (self, id, **kwargs) | Returns the Update Request with the given ID
Returns the Update Request with the given ID. Once an Update Request has been created by performing a POST to /versions/update-requests/process-groups/{id}, that request can subsequently be retrieved via this endpoint, and the request that is fetched will contain the updated state, such as percent complete, the current state of the request, and any failures. Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_update_request(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The ID of the Update Request (required)
:return: VersionedFlowUpdateRequestEntity
If the method is called asynchronously,
returns the request thread. | Returns the Update Request with the given ID
Returns the Update Request with the given ID. Once an Update Request has been created by performing a POST to /versions/update-requests/process-groups/{id}, that request can subsequently be retrieved via this endpoint, and the request that is fetched will contain the updated state, such as percent complete, the current state of the request, and any failures. Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_update_request(id, callback=callback_function) | [
"Returns",
"the",
"Update",
"Request",
"with",
"the",
"given",
"ID",
"Returns",
"the",
"Update",
"Request",
"with",
"the",
"given",
"ID",
".",
"Once",
"an",
"Update",
"Request",
"has",
"been",
"created",
"by",
"performing",
"a",
"POST",
"to",
"/",
"version... | def get_update_request(self, id, **kwargs):
"""
Returns the Update Request with the given ID
Returns the Update Request with the given ID. Once an Update Request has been created by performing a POST to /versions/update-requests/process-groups/{id}, that request can subsequently be retrieved via this endpoint, and the request that is fetched will contain the updated state, such as percent complete, the current state of the request, and any failures. Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_update_request(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The ID of the Update Request (required)
:return: VersionedFlowUpdateRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_update_request_with_http_info(id, **kwargs)
else:
(data) = self.get_update_request_with_http_info(id, **kwargs)
return data | [
"def",
"get_update_request",
"(",
"self",
",",
"id",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'callback'",
")",
":",
"return",
"self",
".",
"get_update_request_with_http... | https://github.com/Chaffelson/nipyapi/blob/d3b186fd701ce308c2812746d98af9120955e810/nipyapi/nifi/apis/versions_api.py#L691-L715 | ||
hacktoolkit/django-htk | 902f3780630f1308aa97a70b9b62a5682239ff2d | lib/stripe_lib/utils.py | python | get_event_type | (event) | return event_type | Gets the event type
`event` can either be a StripeEvent object or just a JSON dictionary | Gets the event type | [
"Gets",
"the",
"event",
"type"
] | def get_event_type(event):
"""Gets the event type
`event` can either be a StripeEvent object or just a JSON dictionary
"""
if type(event) == dict:
event_type = event.get('type', None)
else:
event_type = event.type
return event_type | [
"def",
"get_event_type",
"(",
"event",
")",
":",
"if",
"type",
"(",
"event",
")",
"==",
"dict",
":",
"event_type",
"=",
"event",
".",
"get",
"(",
"'type'",
",",
"None",
")",
"else",
":",
"event_type",
"=",
"event",
".",
"type",
"return",
"event_type"
] | https://github.com/hacktoolkit/django-htk/blob/902f3780630f1308aa97a70b9b62a5682239ff2d/lib/stripe_lib/utils.py#L208-L217 | |
securityclippy/elasticintel | aa08d3e9f5ab1c000128e95161139ce97ff0e334 | ingest_feed_lambda/numpy/core/defchararray.py | python | less | (x1, x2) | return compare_chararrays(x1, x2, '<', True) | Return (x1 < x2) element-wise.
Unlike `numpy.greater`, this comparison is performed by first
stripping whitespace characters from the end of the string. This
behavior is provided for backward-compatibility with numarray.
Parameters
----------
x1, x2 : array_like of str or unicode
Input arrays of the same shape.
Returns
-------
out : ndarray or bool
Output array of bools, or a single bool if x1 and x2 are scalars.
See Also
--------
equal, not_equal, greater_equal, less_equal, greater | Return (x1 < x2) element-wise. | [
"Return",
"(",
"x1",
"<",
"x2",
")",
"element",
"-",
"wise",
"."
] | def less(x1, x2):
"""
Return (x1 < x2) element-wise.
Unlike `numpy.greater`, this comparison is performed by first
stripping whitespace characters from the end of the string. This
behavior is provided for backward-compatibility with numarray.
Parameters
----------
x1, x2 : array_like of str or unicode
Input arrays of the same shape.
Returns
-------
out : ndarray or bool
Output array of bools, or a single bool if x1 and x2 are scalars.
See Also
--------
equal, not_equal, greater_equal, less_equal, greater
"""
return compare_chararrays(x1, x2, '<', True) | [
"def",
"less",
"(",
"x1",
",",
"x2",
")",
":",
"return",
"compare_chararrays",
"(",
"x1",
",",
"x2",
",",
"'<'",
",",
"True",
")"
] | https://github.com/securityclippy/elasticintel/blob/aa08d3e9f5ab1c000128e95161139ce97ff0e334/ingest_feed_lambda/numpy/core/defchararray.py#L219-L241 | |
facebookresearch/pyrobot | 27ffd64bbb7ce3ff6ec4b2122d84b438d5641d0f | src/pyrobot/core.py | python | Arm._callback_joint_states | (self, msg) | ROS subscriber callback for arm joint state (position, velocity)
:param msg: Contains message published in topic
:type msg: sensor_msgs/JointState | ROS subscriber callback for arm joint state (position, velocity) | [
"ROS",
"subscriber",
"callback",
"for",
"arm",
"joint",
"state",
"(",
"position",
"velocity",
")"
] | def _callback_joint_states(self, msg):
"""
ROS subscriber callback for arm joint state (position, velocity)
:param msg: Contains message published in topic
:type msg: sensor_msgs/JointState
"""
self.joint_state_lock.acquire()
for idx, name in enumerate(msg.name):
if name in self.arm_joint_names:
if idx < len(msg.position):
self._joint_angles[name] = msg.position[idx]
if idx < len(msg.velocity):
self._joint_velocities[name] = msg.velocity[idx]
if idx < len(msg.effort):
self._joint_efforts[name] = msg.effort[idx]
self.joint_state_lock.release() | [
"def",
"_callback_joint_states",
"(",
"self",
",",
"msg",
")",
":",
"self",
".",
"joint_state_lock",
".",
"acquire",
"(",
")",
"for",
"idx",
",",
"name",
"in",
"enumerate",
"(",
"msg",
".",
"name",
")",
":",
"if",
"name",
"in",
"self",
".",
"arm_joint_... | https://github.com/facebookresearch/pyrobot/blob/27ffd64bbb7ce3ff6ec4b2122d84b438d5641d0f/src/pyrobot/core.py#L1150-L1166 | ||
gammapy/gammapy | 735b25cd5bbed35e2004d633621896dcd5295e8b | gammapy/maps/axes.py | python | TimeMapAxis._init_copy | (self, **kwargs) | return self.__class__(**kwargs) | Init map axis instance by copying missing init arguments from self. | Init map axis instance by copying missing init arguments from self. | [
"Init",
"map",
"axis",
"instance",
"by",
"copying",
"missing",
"init",
"arguments",
"from",
"self",
"."
] | def _init_copy(self, **kwargs):
"""Init map axis instance by copying missing init arguments from self."""
argnames = inspect.getfullargspec(self.__init__).args
argnames.remove("self")
for arg in argnames:
value = getattr(self, "_" + arg)
kwargs.setdefault(arg, copy.deepcopy(value))
return self.__class__(**kwargs) | [
"def",
"_init_copy",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"argnames",
"=",
"inspect",
".",
"getfullargspec",
"(",
"self",
".",
"__init__",
")",
".",
"args",
"argnames",
".",
"remove",
"(",
"\"self\"",
")",
"for",
"arg",
"in",
"argnames",
":"... | https://github.com/gammapy/gammapy/blob/735b25cd5bbed35e2004d633621896dcd5295e8b/gammapy/maps/axes.py#L2354-L2363 | |
openstack/swift | b8d7c3dcb817504dcc0959ba52cc4ed2cf66c100 | swift/common/middleware/s3api/s3request.py | python | SigV4Mixin._parse_header_authentication | (self) | return cred_param['access'], sig | Parse v4 header authentication
- version 4:
'X-Amz-Credential' and 'X-Amz-Signature' should be in param
:raises: AccessDenied
:raises: AuthorizationHeaderMalformed | Parse v4 header authentication
- version 4:
'X-Amz-Credential' and 'X-Amz-Signature' should be in param
:raises: AccessDenied
:raises: AuthorizationHeaderMalformed | [
"Parse",
"v4",
"header",
"authentication",
"-",
"version",
"4",
":",
"X",
"-",
"Amz",
"-",
"Credential",
"and",
"X",
"-",
"Amz",
"-",
"Signature",
"should",
"be",
"in",
"param",
":",
"raises",
":",
"AccessDenied",
":",
"raises",
":",
"AuthorizationHeaderMa... | def _parse_header_authentication(self):
"""
Parse v4 header authentication
- version 4:
'X-Amz-Credential' and 'X-Amz-Signature' should be in param
:raises: AccessDenied
:raises: AuthorizationHeaderMalformed
"""
auth_str = swob.wsgi_to_str(self.headers['Authorization'])
cred_param = self._parse_credential(auth_str.partition(
"Credential=")[2].split(',')[0])
sig = auth_str.partition("Signature=")[2].split(',')[0]
if not sig:
raise AccessDenied()
signed_headers = auth_str.partition(
"SignedHeaders=")[2].split(',', 1)[0]
if not signed_headers:
# TODO: make sure if is it Malformed?
raise AuthorizationHeaderMalformed()
invalid_messages = {
'date': 'Invalid credential date "%s". This date is not the same '
'as X-Amz-Date: "%s".',
'region': "The authorization header is malformed; the region '%s' "
"is wrong; expecting '%s'",
'service': 'The authorization header is malformed; incorrect '
'service "%s". This endpoint belongs to "%s".',
'terminal': 'The authorization header is malformed; incorrect '
'terminal "%s". This endpoint uses "%s".',
}
for key in ('date', 'region', 'service', 'terminal'):
if cred_param[key] != self.scope[key]:
kwargs = {}
if key == 'region':
# Allow lowercase region name
# for AWS .NET SDK compatibility
if not self.scope[key].islower() and \
cred_param[key] == self.scope[key].lower():
self.location = self.location.lower()
continue
kwargs = {'region': self.scope['region']}
raise AuthorizationHeaderMalformed(
invalid_messages[key] % (cred_param[key], self.scope[key]),
**kwargs)
self._signed_headers = set(signed_headers.split(';'))
return cred_param['access'], sig | [
"def",
"_parse_header_authentication",
"(",
"self",
")",
":",
"auth_str",
"=",
"swob",
".",
"wsgi_to_str",
"(",
"self",
".",
"headers",
"[",
"'Authorization'",
"]",
")",
"cred_param",
"=",
"self",
".",
"_parse_credential",
"(",
"auth_str",
".",
"partition",
"(... | https://github.com/openstack/swift/blob/b8d7c3dcb817504dcc0959ba52cc4ed2cf66c100/swift/common/middleware/s3api/s3request.py#L300-L348 | |
jgagneastro/coffeegrindsize | 22661ebd21831dba4cf32bfc6ba59fe3d49f879c | App/venv/lib/python3.7/site-packages/pip/_vendor/distlib/database.py | python | InstalledDistribution._get_records | (self) | return results | Get the list of installed files for the distribution
:return: A list of tuples of path, hash and size. Note that hash and
size might be ``None`` for some entries. The path is exactly
as stored in the file (which is as in PEP 376). | Get the list of installed files for the distribution
:return: A list of tuples of path, hash and size. Note that hash and
size might be ``None`` for some entries. The path is exactly
as stored in the file (which is as in PEP 376). | [
"Get",
"the",
"list",
"of",
"installed",
"files",
"for",
"the",
"distribution",
":",
"return",
":",
"A",
"list",
"of",
"tuples",
"of",
"path",
"hash",
"and",
"size",
".",
"Note",
"that",
"hash",
"and",
"size",
"might",
"be",
"None",
"for",
"some",
"ent... | def _get_records(self):
"""
Get the list of installed files for the distribution
:return: A list of tuples of path, hash and size. Note that hash and
size might be ``None`` for some entries. The path is exactly
as stored in the file (which is as in PEP 376).
"""
results = []
r = self.get_distinfo_resource('RECORD')
with contextlib.closing(r.as_stream()) as stream:
with CSVReader(stream=stream) as record_reader:
# Base location is parent dir of .dist-info dir
#base_location = os.path.dirname(self.path)
#base_location = os.path.abspath(base_location)
for row in record_reader:
missing = [None for i in range(len(row), 3)]
path, checksum, size = row + missing
#if not os.path.isabs(path):
# path = path.replace('/', os.sep)
# path = os.path.join(base_location, path)
results.append((path, checksum, size))
return results | [
"def",
"_get_records",
"(",
"self",
")",
":",
"results",
"=",
"[",
"]",
"r",
"=",
"self",
".",
"get_distinfo_resource",
"(",
"'RECORD'",
")",
"with",
"contextlib",
".",
"closing",
"(",
"r",
".",
"as_stream",
"(",
")",
")",
"as",
"stream",
":",
"with",
... | https://github.com/jgagneastro/coffeegrindsize/blob/22661ebd21831dba4cf32bfc6ba59fe3d49f879c/App/venv/lib/python3.7/site-packages/pip/_vendor/distlib/database.py#L580-L601 | |
Matheus-Garbelini/sweyntooth_bluetooth_low_energy_attacks | 40c985b9a9ff1189ddf278462440b120cf96b196 | libs/scapy/layers/x509.py | python | X509_TBSCertificate.get_issuer_str | (self) | return name_str | Returns a one-line string containing every type/value
in a rather specific order. sorted() built-in ensures unicity. | Returns a one-line string containing every type/value
in a rather specific order. sorted() built-in ensures unicity. | [
"Returns",
"a",
"one",
"-",
"line",
"string",
"containing",
"every",
"type",
"/",
"value",
"in",
"a",
"rather",
"specific",
"order",
".",
"sorted",
"()",
"built",
"-",
"in",
"ensures",
"unicity",
"."
] | def get_issuer_str(self):
"""
Returns a one-line string containing every type/value
in a rather specific order. sorted() built-in ensures unicity.
"""
name_str = ""
attrsDict = self.get_issuer()
for attrType, attrSymbol in _attrName_mapping:
if attrType in attrsDict:
name_str += "/" + attrSymbol + "="
name_str += attrsDict[attrType]
for attrType in sorted(attrsDict):
if attrType not in _attrName_specials:
name_str += "/" + attrType + "="
name_str += attrsDict[attrType]
return name_str | [
"def",
"get_issuer_str",
"(",
"self",
")",
":",
"name_str",
"=",
"\"\"",
"attrsDict",
"=",
"self",
".",
"get_issuer",
"(",
")",
"for",
"attrType",
",",
"attrSymbol",
"in",
"_attrName_mapping",
":",
"if",
"attrType",
"in",
"attrsDict",
":",
"name_str",
"+=",
... | https://github.com/Matheus-Garbelini/sweyntooth_bluetooth_low_energy_attacks/blob/40c985b9a9ff1189ddf278462440b120cf96b196/libs/scapy/layers/x509.py#L966-L981 | |
xiepaup/dbatools | 8549f2571aaee6a39f5c6f32179ac9c5d301a9aa | mysqlTools/mysql_utilities/mysql/utilities/common/console.py | python | _Command.get_command | (self) | return self.command | Return the current command.
Returns string - the current command | Return the current command.
Returns string - the current command | [
"Return",
"the",
"current",
"command",
".",
"Returns",
"string",
"-",
"the",
"current",
"command"
] | def get_command(self):
"""Return the current command.
Returns string - the current command
"""
return self.command | [
"def",
"get_command",
"(",
"self",
")",
":",
"return",
"self",
".",
"command"
] | https://github.com/xiepaup/dbatools/blob/8549f2571aaee6a39f5c6f32179ac9c5d301a9aa/mysqlTools/mysql_utilities/mysql/utilities/common/console.py#L209-L214 | |
zatosource/zato | 2a9d273f06f9d776fbfeb53e73855af6e40fa208 | code/zato-zmq/src/zato/zmq_/mdp/__init__.py | python | EventClientReply.serialize | (self) | return [self.recipient, b'', const.v01.client, self.service, self.body] | Serializes this message on behalf of a worker sending it to a broker. | Serializes this message on behalf of a worker sending it to a broker. | [
"Serializes",
"this",
"message",
"on",
"behalf",
"of",
"a",
"worker",
"sending",
"it",
"to",
"a",
"broker",
"."
] | def serialize(self):
""" Serializes this message on behalf of a worker sending it to a broker.
"""
return [self.recipient, b'', const.v01.client, self.service, self.body] | [
"def",
"serialize",
"(",
"self",
")",
":",
"return",
"[",
"self",
".",
"recipient",
",",
"b''",
",",
"const",
".",
"v01",
".",
"client",
",",
"self",
".",
"service",
",",
"self",
".",
"body",
"]"
] | https://github.com/zatosource/zato/blob/2a9d273f06f9d776fbfeb53e73855af6e40fa208/code/zato-zmq/src/zato/zmq_/mdp/__init__.py#L219-L222 | |
johnolafenwa/TorchFusion | 8837ca2863e2d62192ed44e43b1827a7b56c30f8 | torchfusion/initializers/initializers.py | python | Kaiming_Normal.__init__ | (self,neg_slope=0,mode="fan_in",non_linearity="leaky_relu") | :param neg_slope:
:param mode:
:param non_linearity: | [] | def __init__(self,neg_slope=0,mode="fan_in",non_linearity="leaky_relu"):
"""
:param neg_slope:
:param mode:
:param non_linearity:
"""
self.neg_slope = neg_slope
self.mode = mode
self.non_linearity = non_linearity | [
"def",
"__init__",
"(",
"self",
",",
"neg_slope",
"=",
"0",
",",
"mode",
"=",
"\"fan_in\"",
",",
"non_linearity",
"=",
"\"leaky_relu\"",
")",
":",
"self",
".",
"neg_slope",
"=",
"neg_slope",
"self",
".",
"mode",
"=",
"mode",
"self",
".",
"non_linearity",
... | https://github.com/johnolafenwa/TorchFusion/blob/8837ca2863e2d62192ed44e43b1827a7b56c30f8/torchfusion/initializers/initializers.py#L79-L88 | |||
makerbot/ReplicatorG | d6f2b07785a5a5f1e172fb87cb4303b17c575d5d | skein_engines/skeinforge-47/skeinforge_application/skeinforge_plugins/craft_plugins/feed.py | python | main | () | Display the feed dialog. | Display the feed dialog. | [
"Display",
"the",
"feed",
"dialog",
"."
] | def main():
'Display the feed dialog.'
if len(sys.argv) > 1:
writeOutput(' '.join(sys.argv[1 :]))
else:
settings.startMainLoopFromConstructor(getNewRepository()) | [
"def",
"main",
"(",
")",
":",
"if",
"len",
"(",
"sys",
".",
"argv",
")",
">",
"1",
":",
"writeOutput",
"(",
"' '",
".",
"join",
"(",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
")",
")",
"else",
":",
"settings",
".",
"startMainLoopFromConstructor",
"... | https://github.com/makerbot/ReplicatorG/blob/d6f2b07785a5a5f1e172fb87cb4303b17c575d5d/skein_engines/skeinforge-47/skeinforge_application/skeinforge_plugins/craft_plugins/feed.py#L175-L180 | ||
dmnfarrell/tkintertable | f3fc8950aaa0f087de100d671ce13c24006d9639 | tkintertable/Tables.py | python | TableCanvas.drawRowHeader | (self) | return | User has clicked to select a cell | User has clicked to select a cell | [
"User",
"has",
"clicked",
"to",
"select",
"a",
"cell"
] | def drawRowHeader(self):
"""User has clicked to select a cell"""
self.delete('rowheader')
x_start=self.x_start
y_start=self.y_start
h=self.rowheight
rowpos=0
for row in self.rowrange:
x1,y1,x2,y2 = self.getCellCoords(rowpos,0)
self.create_rectangle(0,y1,x_start-2,y2,
fill='gray75',
outline='white',
width=1,
tag='rowheader')
self.create_text(x_start/2,y1+h/2,
text=row+1,
fill='black',
font=self.thefont,
tag='rowheader')
rowpos+=1
return | [
"def",
"drawRowHeader",
"(",
"self",
")",
":",
"self",
".",
"delete",
"(",
"'rowheader'",
")",
"x_start",
"=",
"self",
".",
"x_start",
"y_start",
"=",
"self",
".",
"y_start",
"h",
"=",
"self",
".",
"rowheight",
"rowpos",
"=",
"0",
"for",
"row",
"in",
... | https://github.com/dmnfarrell/tkintertable/blob/f3fc8950aaa0f087de100d671ce13c24006d9639/tkintertable/Tables.py#L1537-L1557 | |
pgmpy/pgmpy | 24279929a28082ea994c52f3d165ca63fc56b02b | pgmpy/factors/discrete/DiscreteFactor.py | python | DiscreteFactor.scope | (self) | return self.variables | Returns the scope of the factor i.e. the variables on which the factor is defined.
Returns
-------
Scope of the factor: list
List of variables on which the factor is defined.
Examples
--------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], np.ones(12))
>>> phi.scope()
['x1', 'x2', 'x3'] | Returns the scope of the factor i.e. the variables on which the factor is defined. | [
"Returns",
"the",
"scope",
"of",
"the",
"factor",
"i",
".",
"e",
".",
"the",
"variables",
"on",
"which",
"the",
"factor",
"is",
"defined",
"."
] | def scope(self):
"""
Returns the scope of the factor i.e. the variables on which the factor is defined.
Returns
-------
Scope of the factor: list
List of variables on which the factor is defined.
Examples
--------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], np.ones(12))
>>> phi.scope()
['x1', 'x2', 'x3']
"""
return self.variables | [
"def",
"scope",
"(",
"self",
")",
":",
"return",
"self",
".",
"variables"
] | https://github.com/pgmpy/pgmpy/blob/24279929a28082ea994c52f3d165ca63fc56b02b/pgmpy/factors/discrete/DiscreteFactor.py#L110-L126 | |
demisto/content | 5c664a65b992ac8ca90ac3f11b1b2cdf11ee9b07 | Packs/VMwareWorkspaceONEUEM/Integrations/VMwareWorkspaceONEUEM/VMwareWorkspaceONEUEM.py | python | test_module | (client: Client) | return 'ok' | Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param client: client to use
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str`` | Tests API connectivity and authentication' | [
"Tests",
"API",
"connectivity",
"and",
"authentication"
] | def test_module(client: Client) -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param client: client to use
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
client.http_request(method='GET', url_suffix='devices/search')
return 'ok' | [
"def",
"test_module",
"(",
"client",
":",
"Client",
")",
"->",
"str",
":",
"client",
".",
"http_request",
"(",
"method",
"=",
"'GET'",
",",
"url_suffix",
"=",
"'devices/search'",
")",
"return",
"'ok'"
] | https://github.com/demisto/content/blob/5c664a65b992ac8ca90ac3f11b1b2cdf11ee9b07/Packs/VMwareWorkspaceONEUEM/Integrations/VMwareWorkspaceONEUEM/VMwareWorkspaceONEUEM.py#L432-L447 | |
thaines/helit | 04bd36ee0fb6b762c63d746e2cd8813641dceda9 | handwriting/hst/texture_cache.py | python | TextureCache.__getitem__ | (self, fn) | return ret | Converts a filename into a numpy array of the texture, or returns None if there is no such file. | Converts a filename into a numpy array of the texture, or returns None if there is no such file. | [
"Converts",
"a",
"filename",
"into",
"a",
"numpy",
"array",
"of",
"the",
"texture",
"or",
"returns",
"None",
"if",
"there",
"is",
"no",
"such",
"file",
"."
] | def __getitem__(self, fn):
"""Converts a filename into a numpy array of the texture, or returns None if there is no such file."""
# Handle it already being in the cache...
if fn in self.cache:
ret = self.cache[fn]
del self.cache[fn]
self.cache[fn] = ret # Put it to the back of the list.
return ret
# Load the file and convert it into a numpy array...
alt_fn = os.path.splitext(fn)[0] + '_alpha.png'
if os.path.exists(alt_fn):
pixbuf = GdkPixbuf.Pixbuf.new_from_file(alt_fn)
elif os.path.exists(fn):
pixbuf = GdkPixbuf.Pixbuf.new_from_file(fn)
else:
return None
texture = cairo.ImageSurface(cairo.FORMAT_ARGB32, pixbuf.get_width(), pixbuf.get_height())
ctx = cairo.Context(texture)
ctx.set_operator(cairo.OPERATOR_SOURCE)
Gdk.cairo_set_source_pixbuf(ctx, pixbuf, 0, 0)
ctx.paint()
del pixbuf
ret = numpy.fromstring(texture.get_data(), dtype=numpy.uint8)
ret = ret.reshape((texture.get_height(), texture.get_width(), -1))
# Handle cache expiry and return...
self.cache[fn] = ret
if len(self.cache) > self.limit:
self.cache.popitem(False)
return ret | [
"def",
"__getitem__",
"(",
"self",
",",
"fn",
")",
":",
"# Handle it already being in the cache...",
"if",
"fn",
"in",
"self",
".",
"cache",
":",
"ret",
"=",
"self",
".",
"cache",
"[",
"fn",
"]",
"del",
"self",
".",
"cache",
"[",
"fn",
"]",
"self",
"."... | https://github.com/thaines/helit/blob/04bd36ee0fb6b762c63d746e2cd8813641dceda9/handwriting/hst/texture_cache.py#L34-L68 | |
Pyomo/pyomo | dbd4faee151084f343b893cc2b0c04cf2b76fd92 | pyomo/contrib/pynumero/sparse/block_vector.py | python | BlockVector.argmax | (self, axis=None, out=None) | return self.flatten().argmax(axis=axis, out=out) | Returns the index of the larges element. | Returns the index of the larges element. | [
"Returns",
"the",
"index",
"of",
"the",
"larges",
"element",
"."
] | def argmax(self, axis=None, out=None):
"""
Returns the index of the larges element.
"""
assert_block_structure(self)
return self.flatten().argmax(axis=axis, out=out) | [
"def",
"argmax",
"(",
"self",
",",
"axis",
"=",
"None",
",",
"out",
"=",
"None",
")",
":",
"assert_block_structure",
"(",
"self",
")",
"return",
"self",
".",
"flatten",
"(",
")",
".",
"argmax",
"(",
"axis",
"=",
"axis",
",",
"out",
"=",
"out",
")"
... | https://github.com/Pyomo/pyomo/blob/dbd4faee151084f343b893cc2b0c04cf2b76fd92/pyomo/contrib/pynumero/sparse/block_vector.py#L616-L621 | |
mne-tools/mne-python | f90b303ce66a8415e64edd4605b09ac0179c1ebf | mne/externals/tqdm/_tqdm/std.py | python | tqdm.write | (cls, s, file=None, end="\n", nolock=False) | Print a message via tqdm (without overlap with bars). | Print a message via tqdm (without overlap with bars). | [
"Print",
"a",
"message",
"via",
"tqdm",
"(",
"without",
"overlap",
"with",
"bars",
")",
"."
] | def write(cls, s, file=None, end="\n", nolock=False):
"""Print a message via tqdm (without overlap with bars)."""
fp = file if file is not None else sys.stdout
with cls.external_write_mode(file=file, nolock=nolock):
# Write the message
fp.write(s)
fp.write(end) | [
"def",
"write",
"(",
"cls",
",",
"s",
",",
"file",
"=",
"None",
",",
"end",
"=",
"\"\\n\"",
",",
"nolock",
"=",
"False",
")",
":",
"fp",
"=",
"file",
"if",
"file",
"is",
"not",
"None",
"else",
"sys",
".",
"stdout",
"with",
"cls",
".",
"external_w... | https://github.com/mne-tools/mne-python/blob/f90b303ce66a8415e64edd4605b09ac0179c1ebf/mne/externals/tqdm/_tqdm/std.py#L570-L576 | ||
deepmind/spriteworld | ace9e186ee9a819e8f4de070bd11cf27e2265b63 | spriteworld/configs/examples/goal_finding_embodied.py | python | get_config | (mode=None) | return config | Generate environment config.
Args:
mode: Unused task mode.
Returns:
config: Dictionary defining task/environment configuration. Can be fed as
kwargs to environment.Environment. | Generate environment config. | [
"Generate",
"environment",
"config",
"."
] | def get_config(mode=None):
"""Generate environment config.
Args:
mode: Unused task mode.
Returns:
config: Dictionary defining task/environment configuration. Can be fed as
kwargs to environment.Environment.
"""
del mode
shared_factors = distribs.Product([
distribs.Continuous('x', 0.1, 0.9),
distribs.Continuous('y', 0.1, 0.9),
distribs.Discrete('shape', ['square', 'triangle', 'circle']),
distribs.Discrete('scale', [0.13]),
distribs.Continuous('c1', 0.3, 1.),
distribs.Continuous('c2', 0.9, 1.),
])
target_hue = distribs.Continuous('c0', 0., 0.4)
distractor_hue = distribs.Continuous('c0', 0.5, 0.9)
target_factors = distribs.Product([
target_hue,
shared_factors,
])
distractor_factors = distribs.Product([
distractor_hue,
shared_factors,
])
target_sprite_gen = sprite_generators.generate_sprites(
target_factors, num_sprites=NUM_TARGETS)
distractor_sprite_gen = sprite_generators.generate_sprites(
distractor_factors, num_sprites=NUM_DISTRACTORS)
sprite_gen = sprite_generators.chain_generators(target_sprite_gen,
distractor_sprite_gen)
# Randomize sprite ordering to eliminate any task information from occlusions
sprite_gen = sprite_generators.shuffle(sprite_gen)
# Create the agent body
agent_body_factors = distribs.Product([
distribs.Continuous('x', 0.1, 0.9),
distribs.Continuous('y', 0.1, 0.9),
distribs.Discrete('shape', ['circle']),
distribs.Discrete('scale', [0.07]),
distribs.Discrete('c0', [1.]),
distribs.Discrete('c1', [0.]),
distribs.Discrete('c2', [1.]),
])
agent_body_gen = sprite_generators.generate_sprites(
agent_body_factors, num_sprites=1)
sprite_gen = sprite_generators.chain_generators(sprite_gen, agent_body_gen)
task = tasks.FindGoalPosition(
filter_distrib=target_hue, terminate_distance=TERMINATE_DISTANCE)
renderers = {
'image':
spriteworld_renderers.PILRenderer(
image_size=(64, 64),
anti_aliasing=5,
color_to_rgb=spriteworld_renderers.color_maps.hsv_to_rgb)
}
config = {
'task': task,
'action_space': action_spaces.Embodied(step_size=0.05),
'renderers': renderers,
'init_sprites': sprite_gen,
'max_episode_length': 50,
'metadata': {
'name': os.path.basename(__file__),
}
}
return config | [
"def",
"get_config",
"(",
"mode",
"=",
"None",
")",
":",
"del",
"mode",
"shared_factors",
"=",
"distribs",
".",
"Product",
"(",
"[",
"distribs",
".",
"Continuous",
"(",
"'x'",
",",
"0.1",
",",
"0.9",
")",
",",
"distribs",
".",
"Continuous",
"(",
"'y'",... | https://github.com/deepmind/spriteworld/blob/ace9e186ee9a819e8f4de070bd11cf27e2265b63/spriteworld/configs/examples/goal_finding_embodied.py#L41-L116 | |
ninthDevilHAUNSTER/ArknightsAutoHelper | a27a930502d6e432368d9f62595a1d69a992f4e6 | vendor/penguin_client/penguin_client/models/item_quantity.py | python | ItemQuantity.quantity | (self, quantity) | Sets the quantity of this ItemQuantity.
The number of times this item has dropped # noqa: E501
:param quantity: The quantity of this ItemQuantity. # noqa: E501
:type: int | Sets the quantity of this ItemQuantity. | [
"Sets",
"the",
"quantity",
"of",
"this",
"ItemQuantity",
"."
] | def quantity(self, quantity):
"""Sets the quantity of this ItemQuantity.
The number of times this item has dropped # noqa: E501
:param quantity: The quantity of this ItemQuantity. # noqa: E501
:type: int
"""
self._quantity = quantity | [
"def",
"quantity",
"(",
"self",
",",
"quantity",
")",
":",
"self",
".",
"_quantity",
"=",
"quantity"
] | https://github.com/ninthDevilHAUNSTER/ArknightsAutoHelper/blob/a27a930502d6e432368d9f62595a1d69a992f4e6/vendor/penguin_client/penguin_client/models/item_quantity.py#L88-L97 | ||
floooh/fips | 5ce5aebfc7c69778cab03ef5f8830928f2bad6d4 | mod/tools/cmake_gui.py | python | check_exists | (fips_dir) | test if cmake-gui is in the path
:returns: True if cmake-gui is in the path | test if cmake-gui is in the path
:returns: True if cmake-gui is in the path | [
"test",
"if",
"cmake",
"-",
"gui",
"is",
"in",
"the",
"path",
":",
"returns",
":",
"True",
"if",
"cmake",
"-",
"gui",
"is",
"in",
"the",
"path"
] | def check_exists(fips_dir) :
"""test if cmake-gui is in the path
:returns: True if cmake-gui is in the path
"""
try:
out = subprocess.check_output(['cmake-gui', '--version'])
return True
except (OSError, subprocess.CalledProcessError) :
return False; | [
"def",
"check_exists",
"(",
"fips_dir",
")",
":",
"try",
":",
"out",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"'cmake-gui'",
",",
"'--version'",
"]",
")",
"return",
"True",
"except",
"(",
"OSError",
",",
"subprocess",
".",
"CalledProcessError",
")",... | https://github.com/floooh/fips/blob/5ce5aebfc7c69778cab03ef5f8830928f2bad6d4/mod/tools/cmake_gui.py#L10-L19 | ||
Yelp/paasta | 6c08c04a577359509575c794b973ea84d72accf9 | paasta_tools/marathon_tools.py | python | get_expected_instance_count_for_namespace | (
service: str,
namespace: str,
cluster: str = None,
instance_type_class: Type[LongRunningServiceConfig] = MarathonServiceConfig,
soa_dir: str = DEFAULT_SOA_DIR,
) | return total_expected | Get the number of expected instances for a namespace, based on the number
of instances set to run on that namespace as specified in Marathon service
configuration files.
:param service: The service's name
:param namespace: The namespace for that service to check
instance_type_class: The type of the instance, options are MarathonServiceConfig and KubernetesDeploymentConfig,
:param soa_dir: The SOA configuration directory to read from
:returns: An integer value of the # of expected instances for the namespace | Get the number of expected instances for a namespace, based on the number
of instances set to run on that namespace as specified in Marathon service
configuration files. | [
"Get",
"the",
"number",
"of",
"expected",
"instances",
"for",
"a",
"namespace",
"based",
"on",
"the",
"number",
"of",
"instances",
"set",
"to",
"run",
"on",
"that",
"namespace",
"as",
"specified",
"in",
"Marathon",
"service",
"configuration",
"files",
"."
] | def get_expected_instance_count_for_namespace(
service: str,
namespace: str,
cluster: str = None,
instance_type_class: Type[LongRunningServiceConfig] = MarathonServiceConfig,
soa_dir: str = DEFAULT_SOA_DIR,
) -> int:
"""Get the number of expected instances for a namespace, based on the number
of instances set to run on that namespace as specified in Marathon service
configuration files.
:param service: The service's name
:param namespace: The namespace for that service to check
instance_type_class: The type of the instance, options are MarathonServiceConfig and KubernetesDeploymentConfig,
:param soa_dir: The SOA configuration directory to read from
:returns: An integer value of the # of expected instances for the namespace"""
total_expected = 0
if not cluster:
cluster = load_system_paasta_config().get_cluster()
pscl = PaastaServiceConfigLoader(
service=service, soa_dir=soa_dir, load_deployments=False
)
for job_config in pscl.instance_configs(
cluster=cluster, instance_type_class=instance_type_class
):
if f"{service}.{namespace}" in job_config.get_registrations():
total_expected += job_config.get_instances()
return total_expected | [
"def",
"get_expected_instance_count_for_namespace",
"(",
"service",
":",
"str",
",",
"namespace",
":",
"str",
",",
"cluster",
":",
"str",
"=",
"None",
",",
"instance_type_class",
":",
"Type",
"[",
"LongRunningServiceConfig",
"]",
"=",
"MarathonServiceConfig",
",",
... | https://github.com/Yelp/paasta/blob/6c08c04a577359509575c794b973ea84d72accf9/paasta_tools/marathon_tools.py#L1355-L1383 | |
IronLanguages/ironpython3 | 7a7bb2a872eeab0d1009fc8a6e24dca43f65b693 | Src/StdLib/Lib/binhex.py | python | HexBin._readheader | (self) | [] | def _readheader(self):
len = self._read(1)
fname = self._read(ord(len))
rest = self._read(1 + 4 + 4 + 2 + 4 + 4)
self._checkcrc()
type = rest[1:5]
creator = rest[5:9]
flags = struct.unpack('>h', rest[9:11])[0]
self.dlen = struct.unpack('>l', rest[11:15])[0]
self.rlen = struct.unpack('>l', rest[15:19])[0]
self.FName = fname
self.FInfo = FInfo()
self.FInfo.Creator = creator
self.FInfo.Type = type
self.FInfo.Flags = flags
self.state = _DID_HEADER | [
"def",
"_readheader",
"(",
"self",
")",
":",
"len",
"=",
"self",
".",
"_read",
"(",
"1",
")",
"fname",
"=",
"self",
".",
"_read",
"(",
"ord",
"(",
"len",
")",
")",
"rest",
"=",
"self",
".",
"_read",
"(",
"1",
"+",
"4",
"+",
"4",
"+",
"2",
"... | https://github.com/IronLanguages/ironpython3/blob/7a7bb2a872eeab0d1009fc8a6e24dca43f65b693/Src/StdLib/Lib/binhex.py#L389-L407 | ||||
oilshell/oil | 94388e7d44a9ad879b12615f6203b38596b5a2d3 | Python-2.7.13/Lib/htmllib.py | python | HTMLParser.do_isindex | (self, attrs) | [] | def do_isindex(self, attrs):
self.isindex = 1 | [
"def",
"do_isindex",
"(",
"self",
",",
"attrs",
")",
":",
"self",
".",
"isindex",
"=",
"1"
] | https://github.com/oilshell/oil/blob/94388e7d44a9ad879b12615f6203b38596b5a2d3/Python-2.7.13/Lib/htmllib.py#L161-L162 | ||||
wireservice/leather | a10c27e0c073a6307133a7a2ad4db0268f02bf4b | leather/svg.py | python | translate | (x, y) | return 'translate(%i %i)' % (x, y) | Generate an SVG transform statement representing a simple translation. | Generate an SVG transform statement representing a simple translation. | [
"Generate",
"an",
"SVG",
"transform",
"statement",
"representing",
"a",
"simple",
"translation",
"."
] | def translate(x, y):
"""
Generate an SVG transform statement representing a simple translation.
"""
return 'translate(%i %i)' % (x, y) | [
"def",
"translate",
"(",
"x",
",",
"y",
")",
":",
"return",
"'translate(%i %i)'",
"%",
"(",
"x",
",",
"y",
")"
] | https://github.com/wireservice/leather/blob/a10c27e0c073a6307133a7a2ad4db0268f02bf4b/leather/svg.py#L32-L36 | |
bruderstein/PythonScript | df9f7071ddf3a079e3a301b9b53a6dc78cf1208f | PythonLib/full/argparse.py | python | _ActionsContainer.add_argument_group | (self, *args, **kwargs) | return group | [] | def add_argument_group(self, *args, **kwargs):
group = _ArgumentGroup(self, *args, **kwargs)
self._action_groups.append(group)
return group | [
"def",
"add_argument_group",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"group",
"=",
"_ArgumentGroup",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"_action_groups",
".",
"append",
"(",
"group",
")... | https://github.com/bruderstein/PythonScript/blob/df9f7071ddf3a079e3a301b9b53a6dc78cf1208f/PythonLib/full/argparse.py#L1439-L1442 | |||
numba/numba | bf480b9e0da858a65508c2b17759a72ee6a44c51 | numba/core/decorators.py | python | cfunc | (sig, locals={}, cache=False, pipeline_class=None, **options) | return wrapper | This decorator is used to compile a Python function into a C callback
usable with foreign C libraries.
Usage::
@cfunc("float64(float64, float64)", nopython=True, cache=True)
def add(a, b):
return a + b | This decorator is used to compile a Python function into a C callback
usable with foreign C libraries. | [
"This",
"decorator",
"is",
"used",
"to",
"compile",
"a",
"Python",
"function",
"into",
"a",
"C",
"callback",
"usable",
"with",
"foreign",
"C",
"libraries",
"."
] | def cfunc(sig, locals={}, cache=False, pipeline_class=None, **options):
"""
This decorator is used to compile a Python function into a C callback
usable with foreign C libraries.
Usage::
@cfunc("float64(float64, float64)", nopython=True, cache=True)
def add(a, b):
return a + b
"""
sig = sigutils.normalize_signature(sig)
def wrapper(func):
from numba.core.ccallback import CFunc
additional_args = {}
if pipeline_class is not None:
additional_args['pipeline_class'] = pipeline_class
res = CFunc(func, sig, locals=locals, options=options, **additional_args)
if cache:
res.enable_caching()
res.compile()
return res
return wrapper | [
"def",
"cfunc",
"(",
"sig",
",",
"locals",
"=",
"{",
"}",
",",
"cache",
"=",
"False",
",",
"pipeline_class",
"=",
"None",
",",
"*",
"*",
"options",
")",
":",
"sig",
"=",
"sigutils",
".",
"normalize_signature",
"(",
"sig",
")",
"def",
"wrapper",
"(",
... | https://github.com/numba/numba/blob/bf480b9e0da858a65508c2b17759a72ee6a44c51/numba/core/decorators.py#L261-L285 | |
smart-mobile-software/gitstack | d9fee8f414f202143eb6e620529e8e5539a2af56 | python/Lib/warnings.py | python | _show_warning | (message, category, filename, lineno, file=None, line=None) | Hook to write a warning to a file; replace if you like. | Hook to write a warning to a file; replace if you like. | [
"Hook",
"to",
"write",
"a",
"warning",
"to",
"a",
"file",
";",
"replace",
"if",
"you",
"like",
"."
] | def _show_warning(message, category, filename, lineno, file=None, line=None):
"""Hook to write a warning to a file; replace if you like."""
if file is None:
file = sys.stderr
try:
file.write(formatwarning(message, category, filename, lineno, line))
except IOError:
pass # the file (probably stderr) is invalid - this warning gets lost. | [
"def",
"_show_warning",
"(",
"message",
",",
"category",
",",
"filename",
",",
"lineno",
",",
"file",
"=",
"None",
",",
"line",
"=",
"None",
")",
":",
"if",
"file",
"is",
"None",
":",
"file",
"=",
"sys",
".",
"stderr",
"try",
":",
"file",
".",
"wri... | https://github.com/smart-mobile-software/gitstack/blob/d9fee8f414f202143eb6e620529e8e5539a2af56/python/Lib/warnings.py#L24-L31 | ||
KhronosGroup/NNEF-Tools | c913758ca687dab8cb7b49e8f1556819a2d0ca25 | nnef_tools/model/graph.py | python | Graph.__str__ | (self) | return "graph {name}({inputs}) -> ({outputs})".format(
name=repr(self),
inputs=', '.join(repr(input) for input in self.inputs),
outputs=', '.join(repr(input) for input in self.outputs),
) | [] | def __str__(self):
return "graph {name}({inputs}) -> ({outputs})".format(
name=repr(self),
inputs=', '.join(repr(input) for input in self.inputs),
outputs=', '.join(repr(input) for input in self.outputs),
) | [
"def",
"__str__",
"(",
"self",
")",
":",
"return",
"\"graph {name}({inputs}) -> ({outputs})\"",
".",
"format",
"(",
"name",
"=",
"repr",
"(",
"self",
")",
",",
"inputs",
"=",
"', '",
".",
"join",
"(",
"repr",
"(",
"input",
")",
"for",
"input",
"in",
"sel... | https://github.com/KhronosGroup/NNEF-Tools/blob/c913758ca687dab8cb7b49e8f1556819a2d0ca25/nnef_tools/model/graph.py#L431-L436 | |||
NoGameNoLife00/mybolg | afe17ea5bfe405e33766e5682c43a4262232ee12 | libs/jinja2/parser.py | python | Parser.parse_tuple | (self, simplified=False, with_condexpr=True,
extra_end_rules=None, explicit_parentheses=False) | return nodes.Tuple(args, 'load', lineno=lineno) | Works like `parse_expression` but if multiple expressions are
delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
This method could also return a regular expression instead of a tuple
if no commas where found.
The default parsing mode is a full tuple. If `simplified` is `True`
only names and literals are parsed. The `no_condexpr` parameter is
forwarded to :meth:`parse_expression`.
Because tuples do not require delimiters and may end in a bogus comma
an extra hint is needed that marks the end of a tuple. For example
for loops support tuples between `for` and `in`. In that case the
`extra_end_rules` is set to ``['name:in']``.
`explicit_parentheses` is true if the parsing was triggered by an
expression in parentheses. This is used to figure out if an empty
tuple is a valid expression or not. | Works like `parse_expression` but if multiple expressions are
delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
This method could also return a regular expression instead of a tuple
if no commas where found. | [
"Works",
"like",
"parse_expression",
"but",
"if",
"multiple",
"expressions",
"are",
"delimited",
"by",
"a",
"comma",
"a",
":",
"class",
":",
"~jinja2",
".",
"nodes",
".",
"Tuple",
"node",
"is",
"created",
".",
"This",
"method",
"could",
"also",
"return",
"... | def parse_tuple(self, simplified=False, with_condexpr=True,
extra_end_rules=None, explicit_parentheses=False):
"""Works like `parse_expression` but if multiple expressions are
delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
This method could also return a regular expression instead of a tuple
if no commas where found.
The default parsing mode is a full tuple. If `simplified` is `True`
only names and literals are parsed. The `no_condexpr` parameter is
forwarded to :meth:`parse_expression`.
Because tuples do not require delimiters and may end in a bogus comma
an extra hint is needed that marks the end of a tuple. For example
for loops support tuples between `for` and `in`. In that case the
`extra_end_rules` is set to ``['name:in']``.
`explicit_parentheses` is true if the parsing was triggered by an
expression in parentheses. This is used to figure out if an empty
tuple is a valid expression or not.
"""
lineno = self.stream.current.lineno
if simplified:
parse = self.parse_primary
elif with_condexpr:
parse = self.parse_expression
else:
parse = lambda: self.parse_expression(with_condexpr=False)
args = []
is_tuple = False
while 1:
if args:
self.stream.expect('comma')
if self.is_tuple_end(extra_end_rules):
break
args.append(parse())
if self.stream.current.type == 'comma':
is_tuple = True
else:
break
lineno = self.stream.current.lineno
if not is_tuple:
if args:
return args[0]
# if we don't have explicit parentheses, an empty tuple is
# not a valid expression. This would mean nothing (literally
# nothing) in the spot of an expression would be an empty
# tuple.
if not explicit_parentheses:
self.fail('Expected an expression, got \'%s\'' %
describe_token(self.stream.current))
return nodes.Tuple(args, 'load', lineno=lineno) | [
"def",
"parse_tuple",
"(",
"self",
",",
"simplified",
"=",
"False",
",",
"with_condexpr",
"=",
"True",
",",
"extra_end_rules",
"=",
"None",
",",
"explicit_parentheses",
"=",
"False",
")",
":",
"lineno",
"=",
"self",
".",
"stream",
".",
"current",
".",
"lin... | https://github.com/NoGameNoLife00/mybolg/blob/afe17ea5bfe405e33766e5682c43a4262232ee12/libs/jinja2/parser.py#L578-L631 | |
lad1337/XDM | 0c1b7009fe00f06f102a6f67c793478f515e7efe | site-packages/pylint/checkers/imports.py | python | ImportsChecker._external_dependencies_info | (self) | return self.__ext_dep_info | return cached external dependencies information or build and
cache them | return cached external dependencies information or build and
cache them | [
"return",
"cached",
"external",
"dependencies",
"information",
"or",
"build",
"and",
"cache",
"them"
] | def _external_dependencies_info(self):
"""return cached external dependencies information or build and
cache them
"""
if self.__ext_dep_info is None:
package = self.linter.base_name
self.__ext_dep_info = result = {}
for importee, importers in self.stats['dependencies'].iteritems():
if not importee.startswith(package):
result[importee] = importers
return self.__ext_dep_info | [
"def",
"_external_dependencies_info",
"(",
"self",
")",
":",
"if",
"self",
".",
"__ext_dep_info",
"is",
"None",
":",
"package",
"=",
"self",
".",
"linter",
".",
"base_name",
"self",
".",
"__ext_dep_info",
"=",
"result",
"=",
"{",
"}",
"for",
"importee",
",... | https://github.com/lad1337/XDM/blob/0c1b7009fe00f06f102a6f67c793478f515e7efe/site-packages/pylint/checkers/imports.py#L356-L366 | |
mozman/ezdxf | 59d0fc2ea63f5cf82293428f5931da7e9f9718e9 | src/ezdxf/lldxf/tags.py | python | binary_data_to_dxf_tags | (
data: bytes,
length_group_code: int = 160,
value_group_code: int = 310,
value_size=127,
) | return tags | Convert binary data to DXF tags. | Convert binary data to DXF tags. | [
"Convert",
"binary",
"data",
"to",
"DXF",
"tags",
"."
] | def binary_data_to_dxf_tags(
data: bytes,
length_group_code: int = 160,
value_group_code: int = 310,
value_size=127,
) -> Tags:
"""Convert binary data to DXF tags."""
tags = Tags()
length = len(data)
tags.append(dxftag(length_group_code, length))
index = 0
while index < length:
chunk = data[index : index + value_size]
tags.append(dxftag(value_group_code, chunk))
index += value_size
return tags | [
"def",
"binary_data_to_dxf_tags",
"(",
"data",
":",
"bytes",
",",
"length_group_code",
":",
"int",
"=",
"160",
",",
"value_group_code",
":",
"int",
"=",
"310",
",",
"value_size",
"=",
"127",
",",
")",
"->",
"Tags",
":",
"tags",
"=",
"Tags",
"(",
")",
"... | https://github.com/mozman/ezdxf/blob/59d0fc2ea63f5cf82293428f5931da7e9f9718e9/src/ezdxf/lldxf/tags.py#L416-L431 | |
google/clusterfuzz | f358af24f414daa17a3649b143e71ea71871ef59 | src/clusterfuzz/_internal/bot/tasks/corpus_pruning_task.py | python | choose_cross_pollination_strategy | (current_fuzzer_name) | return (Pollination.RANDOM, None) | Chooses cross pollination strategy. In seperate function to mock for
predictable test behaviror. | Chooses cross pollination strategy. In seperate function to mock for
predictable test behaviror. | [
"Chooses",
"cross",
"pollination",
"strategy",
".",
"In",
"seperate",
"function",
"to",
"mock",
"for",
"predictable",
"test",
"behaviror",
"."
] | def choose_cross_pollination_strategy(current_fuzzer_name):
"""Chooses cross pollination strategy. In seperate function to mock for
predictable test behaviror."""
method = random.choice([Pollination.RANDOM, Pollination.TAGGED])
if method == Pollination.TAGGED:
similar_targets = corpus_tagging.get_similarly_tagged_fuzzers(
current_fuzzer_name)
if similar_targets:
return (Pollination.TAGGED, random.choice(list(similar_targets.keys())))
return (Pollination.RANDOM, None) | [
"def",
"choose_cross_pollination_strategy",
"(",
"current_fuzzer_name",
")",
":",
"method",
"=",
"random",
".",
"choice",
"(",
"[",
"Pollination",
".",
"RANDOM",
",",
"Pollination",
".",
"TAGGED",
"]",
")",
"if",
"method",
"==",
"Pollination",
".",
"TAGGED",
"... | https://github.com/google/clusterfuzz/blob/f358af24f414daa17a3649b143e71ea71871ef59/src/clusterfuzz/_internal/bot/tasks/corpus_pruning_task.py#L904-L914 | |
cloudera/hue | 23f02102d4547c17c32bd5ea0eb24e9eadd657a4 | desktop/core/ext-py/Django-1.11.29/django/contrib/gis/gdal/prototypes/errcheck.py | python | check_envelope | (result, func, cargs, offset=-1) | return env | Checks a function that returns an OGR Envelope by reference. | Checks a function that returns an OGR Envelope by reference. | [
"Checks",
"a",
"function",
"that",
"returns",
"an",
"OGR",
"Envelope",
"by",
"reference",
"."
] | def check_envelope(result, func, cargs, offset=-1):
"Checks a function that returns an OGR Envelope by reference."
env = ptr_byref(cargs, offset)
return env | [
"def",
"check_envelope",
"(",
"result",
",",
"func",
",",
"cargs",
",",
"offset",
"=",
"-",
"1",
")",
":",
"env",
"=",
"ptr_byref",
"(",
"cargs",
",",
"offset",
")",
"return",
"env"
] | https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/core/ext-py/Django-1.11.29/django/contrib/gis/gdal/prototypes/errcheck.py#L71-L74 | |
WoLpH/python-progressbar | 7f045a251435ed4d0ecb2b0ce78a9c15af8529cb | progressbar/bar.py | python | ProgressBar._needs_update | (self) | return False | Returns whether the ProgressBar should redraw the line. | Returns whether the ProgressBar should redraw the line. | [
"Returns",
"whether",
"the",
"ProgressBar",
"should",
"redraw",
"the",
"line",
"."
] | def _needs_update(self):
'Returns whether the ProgressBar should redraw the line.'
delta = timeit.default_timer() - self._last_update_timer
if delta < self.min_poll_interval:
# Prevent updating too often
return False
elif self.poll_interval and delta > self.poll_interval:
# Needs to redraw timers and animations
return True
# Update if value increment is not large enough to
# add more bars to progressbar (according to current
# terminal width)
try:
divisor = self.max_value / self.term_width # float division
if self.value // divisor != self.previous_value // divisor:
return True
except Exception:
# ignore any division errors
pass
# No need to redraw yet
return False | [
"def",
"_needs_update",
"(",
"self",
")",
":",
"delta",
"=",
"timeit",
".",
"default_timer",
"(",
")",
"-",
"self",
".",
"_last_update_timer",
"if",
"delta",
"<",
"self",
".",
"min_poll_interval",
":",
"# Prevent updating too often",
"return",
"False",
"elif",
... | https://github.com/WoLpH/python-progressbar/blob/7f045a251435ed4d0ecb2b0ce78a9c15af8529cb/progressbar/bar.py#L623-L645 | |
dimagi/commcare-hq | d67ff1d3b4c51fa050c19e60c3253a79d3452a39 | corehq/apps/api/es.py | python | XFormServerModifiedParams.consume_params | (self, raw_params) | [] | def consume_params(self, raw_params):
value = raw_params.pop(self.param, None)
if value:
return filters.OR(
filters.AND(
filters.NOT(filters.missing(self.param)), filters.range_filter(self.param, **value)
),
filters.AND(
filters.missing(self.param), filters.range_filter("received_on", **value)
)
) | [
"def",
"consume_params",
"(",
"self",
",",
"raw_params",
")",
":",
"value",
"=",
"raw_params",
".",
"pop",
"(",
"self",
".",
"param",
",",
"None",
")",
"if",
"value",
":",
"return",
"filters",
".",
"OR",
"(",
"filters",
".",
"AND",
"(",
"filters",
".... | https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/corehq/apps/api/es.py#L478-L488 | ||||
tomplus/kubernetes_asyncio | f028cc793e3a2c519be6a52a49fb77ff0b014c9b | kubernetes_asyncio/client/models/v1_network_policy_egress_rule.py | python | V1NetworkPolicyEgressRule.__init__ | (self, ports=None, to=None, local_vars_configuration=None) | V1NetworkPolicyEgressRule - a model defined in OpenAPI | V1NetworkPolicyEgressRule - a model defined in OpenAPI | [
"V1NetworkPolicyEgressRule",
"-",
"a",
"model",
"defined",
"in",
"OpenAPI"
] | def __init__(self, ports=None, to=None, local_vars_configuration=None): # noqa: E501
"""V1NetworkPolicyEgressRule - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._ports = None
self._to = None
self.discriminator = None
if ports is not None:
self.ports = ports
if to is not None:
self.to = to | [
"def",
"__init__",
"(",
"self",
",",
"ports",
"=",
"None",
",",
"to",
"=",
"None",
",",
"local_vars_configuration",
"=",
"None",
")",
":",
"# noqa: E501",
"# noqa: E501",
"if",
"local_vars_configuration",
"is",
"None",
":",
"local_vars_configuration",
"=",
"Conf... | https://github.com/tomplus/kubernetes_asyncio/blob/f028cc793e3a2c519be6a52a49fb77ff0b014c9b/kubernetes_asyncio/client/models/v1_network_policy_egress_rule.py#L45-L58 | ||
giswqs/whitebox-python | b4df0bbb10a1dee3bd0f6b3482511f7c829b38fe | whitebox/whitebox_tools.py | python | WhiteboxTools.lidar_classify_subset | (self, base, subset, output, subset_class, nonsubset_class=None, callback=None) | return self.run_tool('lidar_classify_subset', args, callback) | Classifies the values in one LiDAR point cloud that correpond with points in a subset cloud.
Keyword arguments:
base -- Input base LiDAR file.
subset -- Input subset LiDAR file.
output -- Output LiDAR file.
subset_class -- Subset point class value (must be 0-18; see LAS specifications).
nonsubset_class -- Non-subset point class value (must be 0-18; see LAS specifications).
callback -- Custom function for handling tool text outputs. | Classifies the values in one LiDAR point cloud that correpond with points in a subset cloud. | [
"Classifies",
"the",
"values",
"in",
"one",
"LiDAR",
"point",
"cloud",
"that",
"correpond",
"with",
"points",
"in",
"a",
"subset",
"cloud",
"."
] | def lidar_classify_subset(self, base, subset, output, subset_class, nonsubset_class=None, callback=None):
"""Classifies the values in one LiDAR point cloud that correpond with points in a subset cloud.
Keyword arguments:
base -- Input base LiDAR file.
subset -- Input subset LiDAR file.
output -- Output LiDAR file.
subset_class -- Subset point class value (must be 0-18; see LAS specifications).
nonsubset_class -- Non-subset point class value (must be 0-18; see LAS specifications).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--base='{}'".format(base))
args.append("--subset='{}'".format(subset))
args.append("--output='{}'".format(output))
args.append("--subset_class='{}'".format(subset_class))
if nonsubset_class is not None: args.append("--nonsubset_class='{}'".format(nonsubset_class))
return self.run_tool('lidar_classify_subset', args, callback) | [
"def",
"lidar_classify_subset",
"(",
"self",
",",
"base",
",",
"subset",
",",
"output",
",",
"subset_class",
",",
"nonsubset_class",
"=",
"None",
",",
"callback",
"=",
"None",
")",
":",
"args",
"=",
"[",
"]",
"args",
".",
"append",
"(",
"\"--base='{}'\"",
... | https://github.com/giswqs/whitebox-python/blob/b4df0bbb10a1dee3bd0f6b3482511f7c829b38fe/whitebox/whitebox_tools.py#L6609-L6627 | |
pymedusa/Medusa | 1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38 | ext/pyparsing.py | python | ParserElement.parseString | (self, instring, parseAll=False) | Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
Returns the parsed data as a :class:`ParseResults` object, which may be
accessed as a list, or as a dict or object with attributes if the given parser
includes results names.
If you want the grammar to require that the entire input string be
successfully parsed, then set ``parseAll`` to True (equivalent to ending
the grammar with ``StringEnd()``).
Note: ``parseString`` implicitly calls ``expandtabs()`` on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the ``loc`` argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling ``parseWithTabs`` on your grammar before calling ``parseString``
(see :class:`parseWithTabs`)
- define your parse action using the full ``(s, loc, toks)`` signature, and
reference the input string using the parse action's ``s`` argument
- explictly expand the tabs in your input string before calling
``parseString``
Example::
Word('a').parseString('aaaaabaaa') # -> ['aaaaa']
Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text | Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built. | [
"Execute",
"the",
"parse",
"expression",
"with",
"the",
"given",
"string",
".",
"This",
"is",
"the",
"main",
"interface",
"to",
"the",
"client",
"code",
"once",
"the",
"complete",
"expression",
"has",
"been",
"built",
"."
] | def parseString(self, instring, parseAll=False):
"""
Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
Returns the parsed data as a :class:`ParseResults` object, which may be
accessed as a list, or as a dict or object with attributes if the given parser
includes results names.
If you want the grammar to require that the entire input string be
successfully parsed, then set ``parseAll`` to True (equivalent to ending
the grammar with ``StringEnd()``).
Note: ``parseString`` implicitly calls ``expandtabs()`` on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the ``loc`` argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling ``parseWithTabs`` on your grammar before calling ``parseString``
(see :class:`parseWithTabs`)
- define your parse action using the full ``(s, loc, toks)`` signature, and
reference the input string using the parse action's ``s`` argument
- explictly expand the tabs in your input string before calling
``parseString``
Example::
Word('a').parseString('aaaaabaaa') # -> ['aaaaa']
Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
# ~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
loc, tokens = self._parse(instring, 0)
if parseAll:
loc = self.preParse(instring, loc)
se = Empty() + StringEnd()
se._parse(instring, loc)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clearing out pyparsing internal stack trace
if getattr(exc, '__traceback__', None) is not None:
exc.__traceback__ = self._trim_traceback(exc.__traceback__)
raise exc
else:
return tokens | [
"def",
"parseString",
"(",
"self",
",",
"instring",
",",
"parseAll",
"=",
"False",
")",
":",
"ParserElement",
".",
"resetCache",
"(",
")",
"if",
"not",
"self",
".",
"streamlined",
":",
"self",
".",
"streamline",
"(",
")",
"# ~ self.saveAsList = True",
"for",... | https://github.com/pymedusa/Medusa/blob/1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38/ext/pyparsing.py#L1901-L1957 | ||
pyscf/pyscf | 0adfb464333f5ceee07b664f291d4084801bae64 | pyscf/tdscf/common_slow.py | python | msize | (m) | return s | Checks whether the matrix is square and returns its size.
Args:
m (numpy.ndarray): the matrix to measure;
Returns:
An integer with the size. | Checks whether the matrix is square and returns its size.
Args:
m (numpy.ndarray): the matrix to measure; | [
"Checks",
"whether",
"the",
"matrix",
"is",
"square",
"and",
"returns",
"its",
"size",
".",
"Args",
":",
"m",
"(",
"numpy",
".",
"ndarray",
")",
":",
"the",
"matrix",
"to",
"measure",
";"
] | def msize(m):
"""
Checks whether the matrix is square and returns its size.
Args:
m (numpy.ndarray): the matrix to measure;
Returns:
An integer with the size.
"""
s = m.shape[0]
if m.shape != (s, s):
raise ValueError("Do not recognize the shape (must be a square matrix): {}".format(m.shape))
return s | [
"def",
"msize",
"(",
"m",
")",
":",
"s",
"=",
"m",
".",
"shape",
"[",
"0",
"]",
"if",
"m",
".",
"shape",
"!=",
"(",
"s",
",",
"s",
")",
":",
"raise",
"ValueError",
"(",
"\"Do not recognize the shape (must be a square matrix): {}\"",
".",
"format",
"(",
... | https://github.com/pyscf/pyscf/blob/0adfb464333f5ceee07b664f291d4084801bae64/pyscf/tdscf/common_slow.py#L24-L36 | |
cloudera/hue | 23f02102d4547c17c32bd5ea0eb24e9eadd657a4 | desktop/core/ext-py/docutils-0.14/docutils/nodes.py | python | Node.deepcopy | (self) | Return a deep copy of self (also copying children). | Return a deep copy of self (also copying children). | [
"Return",
"a",
"deep",
"copy",
"of",
"self",
"(",
"also",
"copying",
"children",
")",
"."
] | def deepcopy(self):
"""Return a deep copy of self (also copying children)."""
raise NotImplementedError | [
"def",
"deepcopy",
"(",
"self",
")",
":",
"raise",
"NotImplementedError"
] | https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/core/ext-py/docutils-0.14/docutils/nodes.py#L88-L90 | ||
rwth-i6/returnn | f2d718a197a280b0d5f0fd91a7fcb8658560dddb | returnn/datasets/lm.py | python | collapse_whitespace | (text) | return text | :param str text:
:rtype: str | :param str text:
:rtype: str | [
":",
"param",
"str",
"text",
":",
":",
"rtype",
":",
"str"
] | def collapse_whitespace(text):
"""
:param str text:
:rtype: str
"""
text = re.sub(_whitespace_re, ' ', text)
text = text.strip()
return text | [
"def",
"collapse_whitespace",
"(",
"text",
")",
":",
"text",
"=",
"re",
".",
"sub",
"(",
"_whitespace_re",
",",
"' '",
",",
"text",
")",
"text",
"=",
"text",
".",
"strip",
"(",
")",
"return",
"text"
] | https://github.com/rwth-i6/returnn/blob/f2d718a197a280b0d5f0fd91a7fcb8658560dddb/returnn/datasets/lm.py#L1788-L1795 | |
CedricGuillemet/Imogen | ee417b42747ed5b46cb11b02ef0c3630000085b3 | bin/Lib/difflib.py | python | Differ._dump | (self, tag, x, lo, hi) | Generate comparison results for a same-tagged range. | Generate comparison results for a same-tagged range. | [
"Generate",
"comparison",
"results",
"for",
"a",
"same",
"-",
"tagged",
"range",
"."
] | def _dump(self, tag, x, lo, hi):
"""Generate comparison results for a same-tagged range."""
for i in range(lo, hi):
yield '%s %s' % (tag, x[i]) | [
"def",
"_dump",
"(",
"self",
",",
"tag",
",",
"x",
",",
"lo",
",",
"hi",
")",
":",
"for",
"i",
"in",
"range",
"(",
"lo",
",",
"hi",
")",
":",
"yield",
"'%s %s'",
"%",
"(",
"tag",
",",
"x",
"[",
"i",
"]",
")"
] | https://github.com/CedricGuillemet/Imogen/blob/ee417b42747ed5b46cb11b02ef0c3630000085b3/bin/Lib/difflib.py#L909-L912 | ||
Tencent/bk-sops | 2a6bd1573b7b42812cb8a5b00929e98ab916b18d | gcloud/template_base/domains/schema_converter.py | python | YamlSchemaConverter._reconvert_tree | (self, template: dict, cur_templates: dict) | return reconverted_tree | 对单流程树从YAML字段恢复为原始字段 | 对单流程树从YAML字段恢复为原始字段 | [
"对单流程树从YAML字段恢复为原始字段"
] | def _reconvert_tree(self, template: dict, cur_templates: dict):
"""对单流程树从YAML字段恢复为原始字段"""
reconverted_tree = {
"activities": {},
"constants": {},
"end_event": {},
"flows": {},
"gateways": {},
"line": [],
"location": [],
"outputs": [],
"start_event": {},
}
# 恢复节点值
self._reconvert_nodes_in_tree(template["nodes"], reconverted_tree, cur_templates)
nodes = {
**reconverted_tree["activities"],
**reconverted_tree["gateways"],
reconverted_tree["end_event"]["id"]: reconverted_tree["end_event"],
reconverted_tree["start_event"]["id"]: reconverted_tree["start_event"],
}
# 生成flows,计算incoming
self._reconvert_flows_in_tree(nodes, reconverted_tree)
# 恢复constants格式
if "constants" in template:
for constant_key, constant_attrs in template["constants"].items():
reconverted_constant, is_create = self._reconvert_constant(
constant={**constant_attrs, "key": constant_key}, cur_constants=reconverted_tree["constants"],
)
if is_create:
reconverted_tree["constants"][constant_key] = reconverted_constant
# constants添加index
index_num = 0
for _, constant in reconverted_tree["constants"].items():
constant.update({"index": index_num})
index_num += 1
replace_all_id(reconverted_tree)
draw_pipeline(reconverted_tree)
return reconverted_tree | [
"def",
"_reconvert_tree",
"(",
"self",
",",
"template",
":",
"dict",
",",
"cur_templates",
":",
"dict",
")",
":",
"reconverted_tree",
"=",
"{",
"\"activities\"",
":",
"{",
"}",
",",
"\"constants\"",
":",
"{",
"}",
",",
"\"end_event\"",
":",
"{",
"}",
","... | https://github.com/Tencent/bk-sops/blob/2a6bd1573b7b42812cb8a5b00929e98ab916b18d/gcloud/template_base/domains/schema_converter.py#L342-L382 | |
zzzeek/sqlalchemy | fc5c54fcd4d868c2a4c7ac19668d72f506fe821e | lib/sqlalchemy/sql/selectable.py | python | HasHints.with_hint | (self, selectable, text, dialect_name="*") | r"""Add an indexing or other executional context hint for the given
selectable to this :class:`_expression.Select` or other selectable
object.
The text of the hint is rendered in the appropriate
location for the database backend in use, relative
to the given :class:`_schema.Table` or :class:`_expression.Alias`
passed as the
``selectable`` argument. The dialect implementation
typically uses Python string substitution syntax
with the token ``%(name)s`` to render the name of
the table or alias. E.g. when using Oracle, the
following::
select(mytable).\
with_hint(mytable, "index(%(name)s ix_mytable)")
Would render SQL as::
select /*+ index(mytable ix_mytable) */ ... from mytable
The ``dialect_name`` option will limit the rendering of a particular
hint to a particular backend. Such as, to add hints for both Oracle
and Sybase simultaneously::
select(mytable).\
with_hint(mytable, "index(%(name)s ix_mytable)", 'oracle').\
with_hint(mytable, "WITH INDEX ix_mytable", 'mssql')
.. seealso::
:meth:`_expression.Select.with_statement_hint` | r"""Add an indexing or other executional context hint for the given
selectable to this :class:`_expression.Select` or other selectable
object. | [
"r",
"Add",
"an",
"indexing",
"or",
"other",
"executional",
"context",
"hint",
"for",
"the",
"given",
"selectable",
"to",
"this",
":",
"class",
":",
"_expression",
".",
"Select",
"or",
"other",
"selectable",
"object",
"."
] | def with_hint(self, selectable, text, dialect_name="*"):
r"""Add an indexing or other executional context hint for the given
selectable to this :class:`_expression.Select` or other selectable
object.
The text of the hint is rendered in the appropriate
location for the database backend in use, relative
to the given :class:`_schema.Table` or :class:`_expression.Alias`
passed as the
``selectable`` argument. The dialect implementation
typically uses Python string substitution syntax
with the token ``%(name)s`` to render the name of
the table or alias. E.g. when using Oracle, the
following::
select(mytable).\
with_hint(mytable, "index(%(name)s ix_mytable)")
Would render SQL as::
select /*+ index(mytable ix_mytable) */ ... from mytable
The ``dialect_name`` option will limit the rendering of a particular
hint to a particular backend. Such as, to add hints for both Oracle
and Sybase simultaneously::
select(mytable).\
with_hint(mytable, "index(%(name)s ix_mytable)", 'oracle').\
with_hint(mytable, "WITH INDEX ix_mytable", 'mssql')
.. seealso::
:meth:`_expression.Select.with_statement_hint`
"""
if selectable is None:
self._statement_hints += ((dialect_name, text),)
else:
self._hints = self._hints.union(
{
(
coercions.expect(roles.FromClauseRole, selectable),
dialect_name,
): text
}
) | [
"def",
"with_hint",
"(",
"self",
",",
"selectable",
",",
"text",
",",
"dialect_name",
"=",
"\"*\"",
")",
":",
"if",
"selectable",
"is",
"None",
":",
"self",
".",
"_statement_hints",
"+=",
"(",
"(",
"dialect_name",
",",
"text",
")",
",",
")",
"else",
":... | https://github.com/zzzeek/sqlalchemy/blob/fc5c54fcd4d868c2a4c7ac19668d72f506fe821e/lib/sqlalchemy/sql/selectable.py#L374-L419 | ||
bitprophet/ssh | e8bdad4c82a50158a749233dca58c29e47c60b76 | ssh/channel.py | python | Channel.send_ready | (self) | Returns true if data can be written to this channel without blocking.
This means the channel is either closed (so any write attempt would
return immediately) or there is at least one byte of space in the
outbound buffer. If there is at least one byte of space in the
outbound buffer, a L{send} call will succeed immediately and return
the number of bytes actually written.
@return: C{True} if a L{send} call on this channel would immediately
succeed or fail
@rtype: boolean | Returns true if data can be written to this channel without blocking.
This means the channel is either closed (so any write attempt would
return immediately) or there is at least one byte of space in the
outbound buffer. If there is at least one byte of space in the
outbound buffer, a L{send} call will succeed immediately and return
the number of bytes actually written. | [
"Returns",
"true",
"if",
"data",
"can",
"be",
"written",
"to",
"this",
"channel",
"without",
"blocking",
".",
"This",
"means",
"the",
"channel",
"is",
"either",
"closed",
"(",
"so",
"any",
"write",
"attempt",
"would",
"return",
"immediately",
")",
"or",
"t... | def send_ready(self):
"""
Returns true if data can be written to this channel without blocking.
This means the channel is either closed (so any write attempt would
return immediately) or there is at least one byte of space in the
outbound buffer. If there is at least one byte of space in the
outbound buffer, a L{send} call will succeed immediately and return
the number of bytes actually written.
@return: C{True} if a L{send} call on this channel would immediately
succeed or fail
@rtype: boolean
"""
self.lock.acquire()
try:
if self.closed or self.eof_sent:
return True
return self.out_window_size > 0
finally:
self.lock.release() | [
"def",
"send_ready",
"(",
"self",
")",
":",
"self",
".",
"lock",
".",
"acquire",
"(",
")",
"try",
":",
"if",
"self",
".",
"closed",
"or",
"self",
".",
"eof_sent",
":",
"return",
"True",
"return",
"self",
".",
"out_window_size",
">",
"0",
"finally",
"... | https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/channel.py#L674-L693 | ||
linxid/Machine_Learning_Study_Path | 558e82d13237114bbb8152483977806fc0c222af | Machine Learning In Action/Chapter5-LogisticRegression/venv/Lib/site-packages/pkg_resources/_vendor/pyparsing.py | python | QuotedString.__str__ | ( self ) | return self.strRepr | [] | def __str__( self ):
try:
return super(QuotedString,self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
return self.strRepr | [
"def",
"__str__",
"(",
"self",
")",
":",
"try",
":",
"return",
"super",
"(",
"QuotedString",
",",
"self",
")",
".",
"__str__",
"(",
")",
"except",
"Exception",
":",
"pass",
"if",
"self",
".",
"strRepr",
"is",
"None",
":",
"self",
".",
"strRepr",
"=",... | https://github.com/linxid/Machine_Learning_Study_Path/blob/558e82d13237114bbb8152483977806fc0c222af/Machine Learning In Action/Chapter5-LogisticRegression/venv/Lib/site-packages/pkg_resources/_vendor/pyparsing.py#L2922-L2931 | |||
PyHDI/veriloggen | 2382d200deabf59cfcfd741f5eba371010aaf2bb | veriloggen/verilog/from_verilog.py | python | VerilogReadVisitor.pop_module | (self) | [] | def pop_module(self):
self.m = self.module_stack.pop() | [
"def",
"pop_module",
"(",
"self",
")",
":",
"self",
".",
"m",
"=",
"self",
".",
"module_stack",
".",
"pop",
"(",
")"
] | https://github.com/PyHDI/veriloggen/blob/2382d200deabf59cfcfd741f5eba371010aaf2bb/veriloggen/verilog/from_verilog.py#L144-L145 | ||||
holzschu/Carnets | 44effb10ddfc6aa5c8b0687582a724ba82c6b547 | Library/lib/python3.7/site-packages/pip/_vendor/requests/api.py | python | head | (url, **kwargs) | return request('head', url, **kwargs) | r"""Sends a HEAD request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response | r"""Sends a HEAD request. | [
"r",
"Sends",
"a",
"HEAD",
"request",
"."
] | def head(url, **kwargs):
r"""Sends a HEAD request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', False)
return request('head', url, **kwargs) | [
"def",
"head",
"(",
"url",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"setdefault",
"(",
"'allow_redirects'",
",",
"False",
")",
"return",
"request",
"(",
"'head'",
",",
"url",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/pip/_vendor/requests/api.py#L91-L101 | |
sagemath/sage | f9b2db94f675ff16963ccdefba4f1a3393b3fe0d | src/sage/interfaces/mathics.py | python | Mathics._eval | (self, code) | return ev.evaluate(expr) | Evaluates a command inside the Mathics interpreter and returns the output
as a Mathics result.
EXAMPLES::
sage: mathics._eval('1+1').last_eval # optional - mathics
<Integer: 2> | Evaluates a command inside the Mathics interpreter and returns the output
as a Mathics result. | [
"Evaluates",
"a",
"command",
"inside",
"the",
"Mathics",
"interpreter",
"and",
"returns",
"the",
"output",
"as",
"a",
"Mathics",
"result",
"."
] | def _eval(self, code):
"""
Evaluates a command inside the Mathics interpreter and returns the output
as a Mathics result.
EXAMPLES::
sage: mathics._eval('1+1').last_eval # optional - mathics
<Integer: 2>
"""
self._lazy_init()
S = self._session
expr = S.evaluate(code)
from mathics.core.evaluation import Evaluation
ev = Evaluation(S.definitions)
return ev.evaluate(expr) | [
"def",
"_eval",
"(",
"self",
",",
"code",
")",
":",
"self",
".",
"_lazy_init",
"(",
")",
"S",
"=",
"self",
".",
"_session",
"expr",
"=",
"S",
".",
"evaluate",
"(",
"code",
")",
"from",
"mathics",
".",
"core",
".",
"evaluation",
"import",
"Evaluation"... | https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/interfaces/mathics.py#L551-L566 | |
marcusva/py-sdl2 | d549fc58de7aa204a119dc8dedef81b3cc888fb9 | sdl2/sdlgfx.py | python | lineColor | (renderer, x1, y1, x2, y2, color) | return _funcs["lineColor"](renderer, x1, y1, x2, y2, color) | Draws a line to the renderer with a given color.
If the rendering color has any transparency, blending will be enabled.
Args:
renderer (:obj:`SDL_Renderer`): The renderer to draw on.
x1 (int): The X coordinate of the first point of the line.
y1 (int): The Y coordinate of the first point of the line.
x2 (int): The X coordinate of the second point of the line.
y2 (int): The Y coordinate of the second point of the line.
color (int): The color to draw with as a 32-bit ``0xRRGGBBAA`` integer
(e.g. ``0xFF0000FF`` for solid red).
Returns:
int: 0 on success, or -1 on failure. | Draws a line to the renderer with a given color. | [
"Draws",
"a",
"line",
"to",
"the",
"renderer",
"with",
"a",
"given",
"color",
"."
] | def lineColor(renderer, x1, y1, x2, y2, color):
"""Draws a line to the renderer with a given color.
If the rendering color has any transparency, blending will be enabled.
Args:
renderer (:obj:`SDL_Renderer`): The renderer to draw on.
x1 (int): The X coordinate of the first point of the line.
y1 (int): The Y coordinate of the first point of the line.
x2 (int): The X coordinate of the second point of the line.
y2 (int): The Y coordinate of the second point of the line.
color (int): The color to draw with as a 32-bit ``0xRRGGBBAA`` integer
(e.g. ``0xFF0000FF`` for solid red).
Returns:
int: 0 on success, or -1 on failure.
"""
return _funcs["lineColor"](renderer, x1, y1, x2, y2, color) | [
"def",
"lineColor",
"(",
"renderer",
",",
"x1",
",",
"y1",
",",
"x2",
",",
"y2",
",",
"color",
")",
":",
"return",
"_funcs",
"[",
"\"lineColor\"",
"]",
"(",
"renderer",
",",
"x1",
",",
"y1",
",",
"x2",
",",
"y2",
",",
"color",
")"
] | https://github.com/marcusva/py-sdl2/blob/d549fc58de7aa204a119dc8dedef81b3cc888fb9/sdl2/sdlgfx.py#L570-L588 | |
replit-archive/empythoned | 977ec10ced29a3541a4973dc2b59910805695752 | dist/lib/python2.7/httplib.py | python | HTTPConnection.close | (self) | Close the connection to the HTTP server. | Close the connection to the HTTP server. | [
"Close",
"the",
"connection",
"to",
"the",
"HTTP",
"server",
"."
] | def close(self):
"""Close the connection to the HTTP server."""
if self.sock:
self.sock.close() # close it manually... there may be other refs
self.sock = None
if self.__response:
self.__response.close()
self.__response = None
self.__state = _CS_IDLE | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"sock",
":",
"self",
".",
"sock",
".",
"close",
"(",
")",
"# close it manually... there may be other refs",
"self",
".",
"sock",
"=",
"None",
"if",
"self",
".",
"__response",
":",
"self",
".",
"__... | https://github.com/replit-archive/empythoned/blob/977ec10ced29a3541a4973dc2b59910805695752/dist/lib/python2.7/httplib.py#L759-L767 | ||
pymeasure/pymeasure | b4d888e9ead85ef7f7af0031f2dbb44c9ce1825e | pymeasure/instruments/instrument.py | python | FakeInstrument.control | (get_command, set_command, docs,
validator=lambda v, vs: v, values=(), map_values=False,
get_process=lambda v: v, set_process=lambda v: v,
check_set_errors=False, check_get_errors=False,
**kwargs) | return Instrument.control(get_command="",
set_command=format_specifier,
docs=docs,
validator=validator,
values=values,
map_values=map_values,
get_process=get_process,
set_process=set_process,
check_set_errors=check_set_errors,
check_get_errors=check_get_errors,
**kwargs) | Fake Instrument.control.
Strip commands and only store and return values indicated by
format strings to mimic many simple commands.
This is analogous how the tests in test_instrument are handled. | Fake Instrument.control. | [
"Fake",
"Instrument",
".",
"control",
"."
] | def control(get_command, set_command, docs,
validator=lambda v, vs: v, values=(), map_values=False,
get_process=lambda v: v, set_process=lambda v: v,
check_set_errors=False, check_get_errors=False,
**kwargs):
"""Fake Instrument.control.
Strip commands and only store and return values indicated by
format strings to mimic many simple commands.
This is analogous how the tests in test_instrument are handled.
"""
# Regex search to find first format specifier in the command
fmt_spec_pattern = r'(%[\w.#-+ *]*[diouxXeEfFgGcrsa%])'
match = re.findall(fmt_spec_pattern, set_command)
if match:
# format_specifier = match.group(0)
format_specifier = ','.join(match)
else:
format_specifier = ''
# To preserve as much functionality as possible, call the real
# control method with modified get_command and set_command.
return Instrument.control(get_command="",
set_command=format_specifier,
docs=docs,
validator=validator,
values=values,
map_values=map_values,
get_process=get_process,
set_process=set_process,
check_set_errors=check_set_errors,
check_get_errors=check_get_errors,
**kwargs) | [
"def",
"control",
"(",
"get_command",
",",
"set_command",
",",
"docs",
",",
"validator",
"=",
"lambda",
"v",
",",
"vs",
":",
"v",
",",
"values",
"=",
"(",
")",
",",
"map_values",
"=",
"False",
",",
"get_process",
"=",
"lambda",
"v",
":",
"v",
",",
... | https://github.com/pymeasure/pymeasure/blob/b4d888e9ead85ef7f7af0031f2dbb44c9ce1825e/pymeasure/instruments/instrument.py#L377-L409 | |
scikit-learn/scikit-learn | 1d1aadd0711b87d2a11c80aad15df6f8cf156712 | examples/bicluster/plot_bicluster_newsgroups.py | python | most_common | (d) | return sorted(d.items(), key=operator.itemgetter(1), reverse=True) | Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7. | Items of a defaultdict(int) with the highest values. | [
"Items",
"of",
"a",
"defaultdict",
"(",
"int",
")",
"with",
"the",
"highest",
"values",
"."
] | def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(d.items(), key=operator.itemgetter(1), reverse=True) | [
"def",
"most_common",
"(",
"d",
")",
":",
"return",
"sorted",
"(",
"d",
".",
"items",
"(",
")",
",",
"key",
"=",
"operator",
".",
"itemgetter",
"(",
"1",
")",
",",
"reverse",
"=",
"True",
")"
] | https://github.com/scikit-learn/scikit-learn/blob/1d1aadd0711b87d2a11c80aad15df6f8cf156712/examples/bicluster/plot_bicluster_newsgroups.py#L127-L132 | |
Pyomo/pyomo | dbd4faee151084f343b893cc2b0c04cf2b76fd92 | pyomo/core/base/param.py | python | _ParamData.value | (self, val) | Set the value for this variable. | Set the value for this variable. | [
"Set",
"the",
"value",
"for",
"this",
"variable",
"."
] | def value(self, val):
"""Set the value for this variable."""
self.set_value(val) | [
"def",
"value",
"(",
"self",
",",
"val",
")",
":",
"self",
".",
"set_value",
"(",
"val",
")"
] | https://github.com/Pyomo/pyomo/blob/dbd4faee151084f343b893cc2b0c04cf2b76fd92/pyomo/core/base/param.py#L211-L213 | ||
numenta/numenta-apps | 02903b0062c89c2c259b533eea2df6e8bb44eaf3 | taurus_metric_collectors/taurus_metric_collectors/gen_metrics_config.py | python | _parseArgs | () | return dict(inputCsvPath=inputCsvPath) | Parses command-line args
:returns: a dict:
{"inputCsvPath": <inputCsvPath>} | Parses command-line args | [
"Parses",
"command",
"-",
"line",
"args"
] | def _parseArgs():
""" Parses command-line args
:returns: a dict:
{"inputCsvPath": <inputCsvPath>}
"""
helpString = (
"%prog <INPUT_CSV_PATH>\n\n"
"Generate content for products/taurus_metric_collectors/conf/metrics.json "
"from the given csv input file and output the json object to stdout. Does "
"not overwrite metrics.json.\n\n"
"The first line of the input csv file is the header with column names, "
"expected to contain \"Symbol\", \"Resource\" and \"Twitter\" column "
"headings (possibly among additional other columns that shall be ignored "
"by the script). The \"Twitter\" column heading is expected to be the last "
"column heading, with the column value containing an optional Twitter "
"screen name preceded by the '@' char. Additional Twitter screen names, if "
"any, are specified one each in subsequent columns.")
parser = OptionParser(helpString)
(_options, posArgs) = parser.parse_args()
if len(posArgs) != 1:
parser.error(
"Expected one positional args, but got %s: %s" % (len(posArgs), posArgs,))
inputCsvPath, = posArgs
return dict(inputCsvPath=inputCsvPath) | [
"def",
"_parseArgs",
"(",
")",
":",
"helpString",
"=",
"(",
"\"%prog <INPUT_CSV_PATH>\\n\\n\"",
"\"Generate content for products/taurus_metric_collectors/conf/metrics.json \"",
"\"from the given csv input file and output the json object to stdout. Does \"",
"\"not overwrite metrics.json.\\n\\n... | https://github.com/numenta/numenta-apps/blob/02903b0062c89c2c259b533eea2df6e8bb44eaf3/taurus_metric_collectors/taurus_metric_collectors/gen_metrics_config.py#L187-L216 | |
hydroshare/hydroshare | 7ba563b55412f283047fb3ef6da367d41dec58c6 | hs_core/models.py | python | AbstractMetaDataElement.update | (cls, element_id, **kwargs) | return element | Pass through kwargs to update specific metadata object. | Pass through kwargs to update specific metadata object. | [
"Pass",
"through",
"kwargs",
"to",
"update",
"specific",
"metadata",
"object",
"."
] | def update(cls, element_id, **kwargs):
"""Pass through kwargs to update specific metadata object."""
element = cls.objects.get(id=element_id)
for key, value in list(kwargs.items()):
setattr(element, key, value)
element.save()
return element | [
"def",
"update",
"(",
"cls",
",",
"element_id",
",",
"*",
"*",
"kwargs",
")",
":",
"element",
"=",
"cls",
".",
"objects",
".",
"get",
"(",
"id",
"=",
"element_id",
")",
"for",
"key",
",",
"value",
"in",
"list",
"(",
"kwargs",
".",
"items",
"(",
"... | https://github.com/hydroshare/hydroshare/blob/7ba563b55412f283047fb3ef6da367d41dec58c6/hs_core/models.py#L378-L384 | |
plotly/plotly.py | cfad7862594b35965c0e000813bd7805e8494a5b | packages/python/plotly/plotly/graph_objs/waterfall/_hoverlabel.py | python | Hoverlabel.bordercolor | (self) | return self["bordercolor"] | Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray | Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above | [
"Sets",
"the",
"border",
"color",
"of",
"the",
"hover",
"labels",
"for",
"this",
"trace",
".",
"The",
"bordercolor",
"property",
"is",
"a",
"color",
"and",
"may",
"be",
"specified",
"as",
":",
"-",
"A",
"hex",
"string",
"(",
"e",
".",
"g",
".",
"#ff0... | def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"] | [
"def",
"bordercolor",
"(",
"self",
")",
":",
"return",
"self",
"[",
"\"bordercolor\"",
"]"
] | https://github.com/plotly/plotly.py/blob/cfad7862594b35965c0e000813bd7805e8494a5b/packages/python/plotly/plotly/graph_objs/waterfall/_hoverlabel.py#L150-L201 | |
DataDog/integrations-core | 934674b29d94b70ccc008f76ea172d0cdae05e1e | datadog_checks_dev/datadog_checks/dev/tooling/dependencies.py | python | DependencyDefinition._normalized_marker | (self) | return new_marker | [] | def _normalized_marker(self):
if self.requirement.marker is None:
return self.requirement.marker
new_marker = str(self.requirement.marker).strip()
new_marker = new_marker.replace('\'', "\"")
return new_marker | [
"def",
"_normalized_marker",
"(",
"self",
")",
":",
"if",
"self",
".",
"requirement",
".",
"marker",
"is",
"None",
":",
"return",
"self",
".",
"requirement",
".",
"marker",
"new_marker",
"=",
"str",
"(",
"self",
".",
"requirement",
".",
"marker",
")",
".... | https://github.com/DataDog/integrations-core/blob/934674b29d94b70ccc008f76ea172d0cdae05e1e/datadog_checks_dev/datadog_checks/dev/tooling/dependencies.py#L25-L31 | |||
ownaginatious/fbchat-archive-parser | f1e66cea864f1c07b825fc036071f443693231d5 | fbchat_archive_parser/__init__.py | python | ChatMessage.__new__ | (cls, timestamp, sender, content, seq_num=0) | return super(ChatMessage, cls) \
.__new__(cls, timestamp, seq_num, sender, content) | timestamp -- the time the message was sent (datetime)
sender -- who sent the message (unicode py2/str py3)
content -- content of the message (unicode py2/str py3)
seq_num -- sequence (default 0) | timestamp -- the time the message was sent (datetime)
sender -- who sent the message (unicode py2/str py3)
content -- content of the message (unicode py2/str py3)
seq_num -- sequence (default 0) | [
"timestamp",
"--",
"the",
"time",
"the",
"message",
"was",
"sent",
"(",
"datetime",
")",
"sender",
"--",
"who",
"sent",
"the",
"message",
"(",
"unicode",
"py2",
"/",
"str",
"py3",
")",
"content",
"--",
"content",
"of",
"the",
"message",
"(",
"unicode",
... | def __new__(cls, timestamp, sender, content, seq_num=0):
"""
timestamp -- the time the message was sent (datetime)
sender -- who sent the message (unicode py2/str py3)
content -- content of the message (unicode py2/str py3)
seq_num -- sequence (default 0)
"""
return super(ChatMessage, cls) \
.__new__(cls, timestamp, seq_num, sender, content) | [
"def",
"__new__",
"(",
"cls",
",",
"timestamp",
",",
"sender",
",",
"content",
",",
"seq_num",
"=",
"0",
")",
":",
"return",
"super",
"(",
"ChatMessage",
",",
"cls",
")",
".",
"__new__",
"(",
"cls",
",",
"timestamp",
",",
"seq_num",
",",
"sender",
",... | https://github.com/ownaginatious/fbchat-archive-parser/blob/f1e66cea864f1c07b825fc036071f443693231d5/fbchat_archive_parser/__init__.py#L74-L82 | |
keiffster/program-y | 8c99b56f8c32f01a7b9887b5daae9465619d0385 | src/programy/clients/render/json.py | python | JSONRenderer.handle_video | (self, client_context, video) | return video | [] | def handle_video(self, client_context, video):
if self._client:
self._client.process_response(client_context, video)
return video | [
"def",
"handle_video",
"(",
"self",
",",
"client_context",
",",
"video",
")",
":",
"if",
"self",
".",
"_client",
":",
"self",
".",
"_client",
".",
"process_response",
"(",
"client_context",
",",
"video",
")",
"return",
"video"
] | https://github.com/keiffster/program-y/blob/8c99b56f8c32f01a7b9887b5daae9465619d0385/src/programy/clients/render/json.py#L56-L59 | |||
bleachbit/bleachbit | 88fc4452936d02b56a76f07ce2142306bb47262b | bleachbit/General.py | python | makedirs | (path) | Make directory recursively considering sudo permissions.
'Path' should not end in a delimiter. | Make directory recursively considering sudo permissions.
'Path' should not end in a delimiter. | [
"Make",
"directory",
"recursively",
"considering",
"sudo",
"permissions",
".",
"Path",
"should",
"not",
"end",
"in",
"a",
"delimiter",
"."
] | def makedirs(path):
"""Make directory recursively considering sudo permissions.
'Path' should not end in a delimiter."""
logger.debug('makedirs(%s)', path)
if os.path.lexists(path):
return
parentdir = os.path.split(path)[0]
if not os.path.lexists(parentdir):
makedirs(parentdir)
os.mkdir(path, 0o700)
if sudo_mode():
chownself(path) | [
"def",
"makedirs",
"(",
"path",
")",
":",
"logger",
".",
"debug",
"(",
"'makedirs(%s)'",
",",
"path",
")",
"if",
"os",
".",
"path",
".",
"lexists",
"(",
"path",
")",
":",
"return",
"parentdir",
"=",
"os",
".",
"path",
".",
"split",
"(",
"path",
")"... | https://github.com/bleachbit/bleachbit/blob/88fc4452936d02b56a76f07ce2142306bb47262b/bleachbit/General.py#L106-L117 | ||
inducer/relate | 206689b907ab2a3c06f76c71a5717bfee1052d60 | course/flow.py | python | assemble_page_grades | (
flow_sessions: List[FlowSession]
) | return [get_grades_for_visit_group(group) for group in answer_visit_ids] | Given a list of flow sessions, return a list of lists of FlowPageVisitGrade
objects corresponding to the most recent page grades for each page of the
flow session. If a page is not graded, the corresponding entry is None.
Note that, even if the flow sessions belong to the same flow, the length
of the lists may vary since the flow page count may vary per session. | Given a list of flow sessions, return a list of lists of FlowPageVisitGrade
objects corresponding to the most recent page grades for each page of the
flow session. If a page is not graded, the corresponding entry is None. | [
"Given",
"a",
"list",
"of",
"flow",
"sessions",
"return",
"a",
"list",
"of",
"lists",
"of",
"FlowPageVisitGrade",
"objects",
"corresponding",
"to",
"the",
"most",
"recent",
"page",
"grades",
"for",
"each",
"page",
"of",
"the",
"flow",
"session",
".",
"If",
... | def assemble_page_grades(
flow_sessions: List[FlowSession]
) -> List[List[Optional[FlowPageVisitGrade]]]:
"""
Given a list of flow sessions, return a list of lists of FlowPageVisitGrade
objects corresponding to the most recent page grades for each page of the
flow session. If a page is not graded, the corresponding entry is None.
Note that, even if the flow sessions belong to the same flow, the length
of the lists may vary since the flow page count may vary per session.
"""
id_to_fsess_idx = {fsess.id: i for i, fsess in enumerate(flow_sessions)}
answer_visit_ids: List[List[Optional[int]]] = [
[None] * fsess.page_count for fsess in flow_sessions
]
# Get all answer visits corresponding to the sessions. The query result is
# typically very large.
all_answer_visits = (
get_multiple_flow_session_graded_answers_qset(flow_sessions)
.order_by("visit_time")
.values("id", "flow_session_id", "page_data__page_ordinal",
"is_submitted_answer"))
for answer_visit in all_answer_visits:
fsess_idx = id_to_fsess_idx[answer_visit["flow_session_id"]]
page_ordinal = answer_visit["page_data__page_ordinal"]
if page_ordinal is not None:
answer_visit_ids[fsess_idx][page_ordinal] = answer_visit["id"]
if not flow_sessions[fsess_idx].in_progress:
assert answer_visit["is_submitted_answer"] is True
flat_answer_visit_ids = []
for visit_id_list in answer_visit_ids:
for visit_id in visit_id_list:
if visit_id is not None:
flat_answer_visit_ids.append(visit_id)
# Get all grade visits associated with the answer visits.
grades = (FlowPageVisitGrade.objects
.filter(visit__in=flat_answer_visit_ids)
.order_by("visit__id")
.order_by("grade_time"))
grades_by_answer_visit = {}
for grade in grades:
grades_by_answer_visit[grade.visit_id] = grade
def get_grades_for_visit_group(
visit_group: List[Optional[int]]
) -> List[Optional[FlowPageVisit]]:
return [grades_by_answer_visit.get(visit_id)
for visit_id in visit_group]
return [get_grades_for_visit_group(group) for group in answer_visit_ids] | [
"def",
"assemble_page_grades",
"(",
"flow_sessions",
":",
"List",
"[",
"FlowSession",
"]",
")",
"->",
"List",
"[",
"List",
"[",
"Optional",
"[",
"FlowPageVisitGrade",
"]",
"]",
"]",
":",
"id_to_fsess_idx",
"=",
"{",
"fsess",
".",
"id",
":",
"i",
"for",
"... | https://github.com/inducer/relate/blob/206689b907ab2a3c06f76c71a5717bfee1052d60/course/flow.py#L502-L558 | |
skelsec/pypykatz | dd129ff36e00593d1340776b517f7e749ad8d314 | pypykatz/commons/common.py | python | GenericReader.__init__ | (self, data, processor_architecture = KatzSystemArchitecture.X64) | data is bytes | data is bytes | [
"data",
"is",
"bytes"
] | def __init__(self, data, processor_architecture = KatzSystemArchitecture.X64):
"""
data is bytes
"""
self.processor_architecture = processor_architecture
self.start_address = 0
self.end_address = len(data)
self.size = len(data)
self.data = data
self.current_position = 0 | [
"def",
"__init__",
"(",
"self",
",",
"data",
",",
"processor_architecture",
"=",
"KatzSystemArchitecture",
".",
"X64",
")",
":",
"self",
".",
"processor_architecture",
"=",
"processor_architecture",
"self",
".",
"start_address",
"=",
"0",
"self",
".",
"end_address... | https://github.com/skelsec/pypykatz/blob/dd129ff36e00593d1340776b517f7e749ad8d314/pypykatz/commons/common.py#L23-L32 | ||
leo-editor/leo-editor | 383d6776d135ef17d73d935a2f0ecb3ac0e99494 | leo/plugins/obsolete/wxGui.py | python | wxLeoFrame.contractPane | (self,event=None) | Contract the selected pane. | Contract the selected pane. | [
"Contract",
"the",
"selected",
"pane",
"."
] | def contractPane (self,event=None):
'''Contract the selected pane.'''
f = self ; c = f.c
w = c.get_requested_focus()
wname = c.widget_name(w)
# g.trace(wname)
if not w: return
if wname.startswith('body'):
f.contractBodyPane()
elif wname.startswith('log'):
f.contractLogPane()
elif wname.startswith('head') or wname.startswith('canvas'):
f.contractOutlinePane() | [
"def",
"contractPane",
"(",
"self",
",",
"event",
"=",
"None",
")",
":",
"f",
"=",
"self",
"c",
"=",
"f",
".",
"c",
"w",
"=",
"c",
".",
"get_requested_focus",
"(",
")",
"wname",
"=",
"c",
".",
"widget_name",
"(",
"w",
")",
"# g.trace(wname)",
"if",... | https://github.com/leo-editor/leo-editor/blob/383d6776d135ef17d73d935a2f0ecb3ac0e99494/leo/plugins/obsolete/wxGui.py#L2568-L2584 | ||
muccc/iridium-toolkit | 6f1098f4cfb3bc0f6178246943311d16a5ee04e5 | reedsolo.py | python | rs_calc_syndromes | (msg, nsym, fcr=0, generator=2) | return [0] + [gf_poly_eval(msg, gf_pow(generator, i+fcr)) for i in xrange(nsym)] | Given the received codeword msg and the number of error correcting symbols (nsym), computes the syndromes polynomial.
Mathematically, it's essentially equivalent to a Fourrier Transform (Chien search being the inverse). | Given the received codeword msg and the number of error correcting symbols (nsym), computes the syndromes polynomial.
Mathematically, it's essentially equivalent to a Fourrier Transform (Chien search being the inverse). | [
"Given",
"the",
"received",
"codeword",
"msg",
"and",
"the",
"number",
"of",
"error",
"correcting",
"symbols",
"(",
"nsym",
")",
"computes",
"the",
"syndromes",
"polynomial",
".",
"Mathematically",
"it",
"s",
"essentially",
"equivalent",
"to",
"a",
"Fourrier",
... | def rs_calc_syndromes(msg, nsym, fcr=0, generator=2):
'''Given the received codeword msg and the number of error correcting symbols (nsym), computes the syndromes polynomial.
Mathematically, it's essentially equivalent to a Fourrier Transform (Chien search being the inverse).
'''
# Note the "[0] +" : we add a 0 coefficient for the lowest degree (the constant). This effectively shifts the syndrome, and will shift every computations depending on the syndromes (such as the errors locator polynomial, errors evaluator polynomial, etc. but not the errors positions).
# This is not necessary as anyway syndromes are defined such as there are only non-zero coefficients (the only 0 is the shift of the constant here) and subsequent computations will/must account for the shift by skipping the first iteration (eg, the often seen range(1, n-k+1)), but you can also avoid prepending the 0 coeff and adapt every subsequent computations to start from 0 instead of 1.
return [0] + [gf_poly_eval(msg, gf_pow(generator, i+fcr)) for i in xrange(nsym)] | [
"def",
"rs_calc_syndromes",
"(",
"msg",
",",
"nsym",
",",
"fcr",
"=",
"0",
",",
"generator",
"=",
"2",
")",
":",
"# Note the \"[0] +\" : we add a 0 coefficient for the lowest degree (the constant). This effectively shifts the syndrome, and will shift every computations depending on t... | https://github.com/muccc/iridium-toolkit/blob/6f1098f4cfb3bc0f6178246943311d16a5ee04e5/reedsolo.py#L452-L458 | |
SanPen/GridCal | d3f4566d2d72c11c7e910c9d162538ef0e60df31 | src/GridCal/Gui/GridEditorWidget/dc_line_graphics.py | python | DcLineGraphicItem.setEndPos | (self, endpos) | Set the starting position
@param endpos:
@return: | Set the starting position | [
"Set",
"the",
"starting",
"position"
] | def setEndPos(self, endpos):
"""
Set the starting position
@param endpos:
@return:
"""
self.pos2 = endpos
self.redraw() | [
"def",
"setEndPos",
"(",
"self",
",",
"endpos",
")",
":",
"self",
".",
"pos2",
"=",
"endpos",
"self",
".",
"redraw",
"(",
")"
] | https://github.com/SanPen/GridCal/blob/d3f4566d2d72c11c7e910c9d162538ef0e60df31/src/GridCal/Gui/GridEditorWidget/dc_line_graphics.py#L532-L539 | ||
holzschu/Carnets | 44effb10ddfc6aa5c8b0687582a724ba82c6b547 | Library/lib/python3.7/site-packages/sympy/polys/galoistools.py | python | gf_irred_p_rabin | (f, p, K) | return h == x | Rabin's polynomial irreducibility test over finite fields.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_irred_p_rabin
>>> gf_irred_p_rabin(ZZ.map([1, 4, 2, 2, 3, 2, 4, 1, 4, 0, 4]), 5, ZZ)
True
>>> gf_irred_p_rabin(ZZ.map([3, 2, 4]), 5, ZZ)
False | Rabin's polynomial irreducibility test over finite fields. | [
"Rabin",
"s",
"polynomial",
"irreducibility",
"test",
"over",
"finite",
"fields",
"."
] | def gf_irred_p_rabin(f, p, K):
"""
Rabin's polynomial irreducibility test over finite fields.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_irred_p_rabin
>>> gf_irred_p_rabin(ZZ.map([1, 4, 2, 2, 3, 2, 4, 1, 4, 0, 4]), 5, ZZ)
True
>>> gf_irred_p_rabin(ZZ.map([3, 2, 4]), 5, ZZ)
False
"""
n = gf_degree(f)
if n <= 1:
return True
_, f = gf_monic(f, p, K)
x = [K.one, K.zero]
indices = { n//d for d in factorint(n) }
b = gf_frobenius_monomial_base(f, p, K)
h = b[1]
for i in range(1, n):
if i in indices:
g = gf_sub(h, x, p, K)
if gf_gcd(f, g, p, K) != [K.one]:
return False
h = gf_frobenius_map(h, f, b, p, K)
return h == x | [
"def",
"gf_irred_p_rabin",
"(",
"f",
",",
"p",
",",
"K",
")",
":",
"n",
"=",
"gf_degree",
"(",
"f",
")",
"if",
"n",
"<=",
"1",
":",
"return",
"True",
"_",
",",
"f",
"=",
"gf_monic",
"(",
"f",
",",
"p",
",",
"K",
")",
"x",
"=",
"[",
"K",
"... | https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/sympy/polys/galoistools.py#L1446-L1485 | |
sametmax/Django--an-app-at-a-time | 99eddf12ead76e6dfbeb09ce0bae61e282e22f8a | ignore_this_directory/django/urls/base.py | python | clear_script_prefix | () | Unset the script prefix for the current thread. | Unset the script prefix for the current thread. | [
"Unset",
"the",
"script",
"prefix",
"for",
"the",
"current",
"thread",
"."
] | def clear_script_prefix():
"""
Unset the script prefix for the current thread.
"""
try:
del _prefixes.value
except AttributeError:
pass | [
"def",
"clear_script_prefix",
"(",
")",
":",
"try",
":",
"del",
"_prefixes",
".",
"value",
"except",
"AttributeError",
":",
"pass"
] | https://github.com/sametmax/Django--an-app-at-a-time/blob/99eddf12ead76e6dfbeb09ce0bae61e282e22f8a/ignore_this_directory/django/urls/base.py#L120-L127 | ||
sunnyxiaohu/R-C3D.pytorch | e8731af7b95f1dc934f6604f9c09e3c4ead74db5 | lib/model/tdcnn/resnet.py | python | resnet10 | (**kwargs) | return model | Constructs a ResNet-18 model. | Constructs a ResNet-18 model. | [
"Constructs",
"a",
"ResNet",
"-",
"18",
"model",
"."
] | def resnet10(**kwargs):
"""Constructs a ResNet-18 model.
"""
model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)
return model | [
"def",
"resnet10",
"(",
"*",
"*",
"kwargs",
")",
":",
"model",
"=",
"ResNet",
"(",
"BasicBlock",
",",
"[",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
",",
"*",
"*",
"kwargs",
")",
"return",
"model"
] | https://github.com/sunnyxiaohu/R-C3D.pytorch/blob/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/model/tdcnn/resnet.py#L217-L221 | |
ProjectQ-Framework/ProjectQ | 0d32c1610ba4e9aefd7f19eb52dadb4fbe5f9005 | projectq/backends/_awsbraket/_awsbraket.py | python | AWSBraketBackend.get_probabilities | (self, qureg) | return probability_dict | Return the list of basis states with corresponding probabilities.
If input qureg is a subset of the register used for the experiment, then returns the projected probabilities
over the other states.
The measured bits are ordered according to the supplied quantum register, i.e., the left-most bit in the
state-string corresponds to the first qubit in the supplied quantum register.
Args:
qureg (list<Qubit>): Quantum register determining the order of the qubits.
Returns:
probability_dict (dict): Dictionary mapping n-bit strings to probabilities.
Raises:
RuntimeError: If no data is available (i.e., if the circuit has not been executed). Or if a qubit was
supplied which was not present in the circuit (might have gotten optimized away).
Warning:
Only call this function after the circuit has been executed!
This is maintained in the same form of IBM and AQT for compatibility but in AWSBraket, a previously
executed circuit will store the results in the S3 bucket and it can be retreived at any point in time
thereafter.
No circuit execution should be required at the time of retrieving the results and probabilities if the
circuit has already been executed.
In order to obtain the probabilities of a previous job you have to get the TaskArn and remember the qubits
and ordering used in the original job. | Return the list of basis states with corresponding probabilities. | [
"Return",
"the",
"list",
"of",
"basis",
"states",
"with",
"corresponding",
"probabilities",
"."
] | def get_probabilities(self, qureg):
"""
Return the list of basis states with corresponding probabilities.
If input qureg is a subset of the register used for the experiment, then returns the projected probabilities
over the other states.
The measured bits are ordered according to the supplied quantum register, i.e., the left-most bit in the
state-string corresponds to the first qubit in the supplied quantum register.
Args:
qureg (list<Qubit>): Quantum register determining the order of the qubits.
Returns:
probability_dict (dict): Dictionary mapping n-bit strings to probabilities.
Raises:
RuntimeError: If no data is available (i.e., if the circuit has not been executed). Or if a qubit was
supplied which was not present in the circuit (might have gotten optimized away).
Warning:
Only call this function after the circuit has been executed!
This is maintained in the same form of IBM and AQT for compatibility but in AWSBraket, a previously
executed circuit will store the results in the S3 bucket and it can be retreived at any point in time
thereafter.
No circuit execution should be required at the time of retrieving the results and probabilities if the
circuit has already been executed.
In order to obtain the probabilities of a previous job you have to get the TaskArn and remember the qubits
and ordering used in the original job.
"""
if len(self._probabilities) == 0:
raise RuntimeError("Please, run the circuit first!")
probability_dict = {}
for state in self._probabilities:
mapped_state = ['0'] * len(qureg)
for i, qubit in enumerate(qureg):
if self._logical_to_physical(qubit.id) >= len(state): # pragma: no cover
raise IndexError('Physical ID {} > length of internal probabilities array'.format(qubit.id))
mapped_state[i] = state[self._logical_to_physical(qubit.id)]
probability = self._probabilities[state]
mapped_state = "".join(mapped_state)
if mapped_state not in probability_dict:
probability_dict[mapped_state] = probability
else:
probability_dict[mapped_state] += probability
return probability_dict | [
"def",
"get_probabilities",
"(",
"self",
",",
"qureg",
")",
":",
"if",
"len",
"(",
"self",
".",
"_probabilities",
")",
"==",
"0",
":",
"raise",
"RuntimeError",
"(",
"\"Please, run the circuit first!\"",
")",
"probability_dict",
"=",
"{",
"}",
"for",
"state",
... | https://github.com/ProjectQ-Framework/ProjectQ/blob/0d32c1610ba4e9aefd7f19eb52dadb4fbe5f9005/projectq/backends/_awsbraket/_awsbraket.py#L340-L387 | |
holzschu/Carnets | 44effb10ddfc6aa5c8b0687582a724ba82c6b547 | Library/lib/python3.7/site-packages/astropy-4.0-py3.7-macosx-10.9-x86_64.egg/astropy/io/fits/scripts/fitsheader.py | python | print_headers_as_table | (args) | Prints FITS header(s) in a machine-readable table format.
Parameters
----------
args : argparse.Namespace
Arguments passed from the command-line as defined below. | Prints FITS header(s) in a machine-readable table format. | [
"Prints",
"FITS",
"header",
"(",
"s",
")",
"in",
"a",
"machine",
"-",
"readable",
"table",
"format",
"."
] | def print_headers_as_table(args):
"""Prints FITS header(s) in a machine-readable table format.
Parameters
----------
args : argparse.Namespace
Arguments passed from the command-line as defined below.
"""
tables = []
# Create a Table object for each file
for filename in args.filename: # Support wildcards
formatter = None
try:
formatter = TableHeaderFormatter(filename)
tbl = formatter.parse(args.extensions,
args.keywords,
args.compressed)
if tbl:
tables.append(tbl)
except OSError as e:
log.error(str(e)) # file not found or unreadable
finally:
if formatter:
formatter.close()
# Concatenate the tables
if len(tables) == 0:
return False
elif len(tables) == 1:
resulting_table = tables[0]
else:
from astropy import table
resulting_table = table.vstack(tables)
# Print the string representation of the concatenated table
resulting_table.write(sys.stdout, format=args.table) | [
"def",
"print_headers_as_table",
"(",
"args",
")",
":",
"tables",
"=",
"[",
"]",
"# Create a Table object for each file",
"for",
"filename",
"in",
"args",
".",
"filename",
":",
"# Support wildcards",
"formatter",
"=",
"None",
"try",
":",
"formatter",
"=",
"TableHe... | https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/astropy-4.0-py3.7-macosx-10.9-x86_64.egg/astropy/io/fits/scripts/fitsheader.py#L278-L312 | ||
haiwen/seahub | e92fcd44e3e46260597d8faa9347cb8222b8b10d | seahub/wopi/views.py | python | generate_file_lock_key_value | (request) | return lock_cache_key, x_wopi_lock | [] | def generate_file_lock_key_value(request):
token = request.GET.get('access_token', None)
info_dict = get_file_info_by_token(token)
repo_id = info_dict['repo_id']
file_path = info_dict['file_path']
repo = seafile_api.get_repo(repo_id)
if repo.is_virtual:
origin_repo_id = repo.origin_repo_id
origin_file_path = posixpath.join(repo.origin_path, file_path.strip('/'))
file_path_hash = hashlib.sha256(origin_file_path.encode('utf8')).hexdigest()
lock_cache_key = '_'.join(['HTTP_X_WOPI_LOCK', origin_repo_id, file_path_hash])
else:
file_path_hash = hashlib.sha256(file_path.encode('utf8')).hexdigest()
lock_cache_key = '_'.join(['HTTP_X_WOPI_LOCK', repo_id, file_path_hash])
x_wopi_lock = request.META.get('HTTP_X_WOPI_LOCK', None)
return lock_cache_key, x_wopi_lock | [
"def",
"generate_file_lock_key_value",
"(",
"request",
")",
":",
"token",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'access_token'",
",",
"None",
")",
"info_dict",
"=",
"get_file_info_by_token",
"(",
"token",
")",
"repo_id",
"=",
"info_dict",
"[",
"'repo_i... | https://github.com/haiwen/seahub/blob/e92fcd44e3e46260597d8faa9347cb8222b8b10d/seahub/wopi/views.py#L40-L60 | |||
scikit-hep/awkward-0.x | dd885bef15814f588b58944d2505296df4aaae0e | awkward0/array/masked.py | python | IndexedMaskedArray.argmax | (self) | [] | def argmax(self):
if self._util_hasjagged(self):
return self.copy(content=self._content.argmax())
else:
index = self._content[self._mask[self.isunmasked()]].argmax()
return self.numpy.searchsorted(self.numpy.cumsum(self.ismasked()), index, side="right") | [
"def",
"argmax",
"(",
"self",
")",
":",
"if",
"self",
".",
"_util_hasjagged",
"(",
"self",
")",
":",
"return",
"self",
".",
"copy",
"(",
"content",
"=",
"self",
".",
"_content",
".",
"argmax",
"(",
")",
")",
"else",
":",
"index",
"=",
"self",
".",
... | https://github.com/scikit-hep/awkward-0.x/blob/dd885bef15814f588b58944d2505296df4aaae0e/awkward0/array/masked.py#L903-L908 | ||||
mesalock-linux/mesapy | ed546d59a21b36feb93e2309d5c6b75aa0ad95c9 | lib-python/2.7/lib2to3/refactor.py | python | RefactoringTool.refactor | (self, items, write=False, doctests_only=False) | Refactor a list of files and directories. | Refactor a list of files and directories. | [
"Refactor",
"a",
"list",
"of",
"files",
"and",
"directories",
"."
] | def refactor(self, items, write=False, doctests_only=False):
"""Refactor a list of files and directories."""
for dir_or_file in items:
if os.path.isdir(dir_or_file):
self.refactor_dir(dir_or_file, write, doctests_only)
else:
self.refactor_file(dir_or_file, write, doctests_only) | [
"def",
"refactor",
"(",
"self",
",",
"items",
",",
"write",
"=",
"False",
",",
"doctests_only",
"=",
"False",
")",
":",
"for",
"dir_or_file",
"in",
"items",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"dir_or_file",
")",
":",
"self",
".",
"refac... | https://github.com/mesalock-linux/mesapy/blob/ed546d59a21b36feb93e2309d5c6b75aa0ad95c9/lib-python/2.7/lib2to3/refactor.py#L294-L301 | ||
ma1co/Sony-PMCA-RE | d4da4882e4d59b35f59e4ac919a866e2daf4bbdd | pmca/spk/__init__.py | python | encryptData | (key, data) | return b''.join(aes.encrypt(util.pad(c, constants.paddingSize)) for c in util.chunk(data, constants.blockSize)) | Encrypts the apk data using the specified AES key | Encrypts the apk data using the specified AES key | [
"Encrypts",
"the",
"apk",
"data",
"using",
"the",
"specified",
"AES",
"key"
] | def encryptData(key, data):
"""Encrypts the apk data using the specified AES key"""
aes = AES.new(key, AES.MODE_ECB)
return b''.join(aes.encrypt(util.pad(c, constants.paddingSize)) for c in util.chunk(data, constants.blockSize)) | [
"def",
"encryptData",
"(",
"key",
",",
"data",
")",
":",
"aes",
"=",
"AES",
".",
"new",
"(",
"key",
",",
"AES",
".",
"MODE_ECB",
")",
"return",
"b''",
".",
"join",
"(",
"aes",
".",
"encrypt",
"(",
"util",
".",
"pad",
"(",
"c",
",",
"constants",
... | https://github.com/ma1co/Sony-PMCA-RE/blob/d4da4882e4d59b35f59e4ac919a866e2daf4bbdd/pmca/spk/__init__.py#L79-L82 | |
Nuitka/Nuitka | 39262276993757fa4e299f497654065600453fc9 | nuitka/build/inline_copy/lib/scons-3.1.2/SCons/Taskmaster.py | python | Stats.__init__ | (self) | Instantiates a Taskmaster.Stats object, initializing all
appropriate counters to zero. | Instantiates a Taskmaster.Stats object, initializing all
appropriate counters to zero. | [
"Instantiates",
"a",
"Taskmaster",
".",
"Stats",
"object",
"initializing",
"all",
"appropriate",
"counters",
"to",
"zero",
"."
] | def __init__(self):
"""
Instantiates a Taskmaster.Stats object, initializing all
appropriate counters to zero.
"""
self.considered = 0
self.already_handled = 0
self.problem = 0
self.child_failed = 0
self.not_built = 0
self.side_effects = 0
self.build = 0 | [
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"considered",
"=",
"0",
"self",
".",
"already_handled",
"=",
"0",
"self",
".",
"problem",
"=",
"0",
"self",
".",
"child_failed",
"=",
"0",
"self",
".",
"not_built",
"=",
"0",
"self",
".",
"side_e... | https://github.com/Nuitka/Nuitka/blob/39262276993757fa4e299f497654065600453fc9/nuitka/build/inline_copy/lib/scons-3.1.2/SCons/Taskmaster.py#L92-L103 | ||
rq/rq | c5a1ef17345e17269085e7f72858ac9bd6faf1dd | rq/compat/dictconfig.py | python | DictConfigurator.configure_formatter | (self, config) | return result | Configure a formatter from a dictionary. | Configure a formatter from a dictionary. | [
"Configure",
"a",
"formatter",
"from",
"a",
"dictionary",
"."
] | def configure_formatter(self, config):
"""Configure a formatter from a dictionary."""
if '()' in config:
factory = config['()'] # for use in exception handler
try:
result = self.configure_custom(config)
except TypeError as te:
if "'format'" not in str(te):
raise
#Name of parameter changed from fmt to format.
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
config['fmt'] = config.pop('format')
config['()'] = factory
result = self.configure_custom(config)
else:
fmt = config.get('format', None)
dfmt = config.get('datefmt', None)
result = logging.Formatter(fmt, dfmt)
return result | [
"def",
"configure_formatter",
"(",
"self",
",",
"config",
")",
":",
"if",
"'()'",
"in",
"config",
":",
"factory",
"=",
"config",
"[",
"'()'",
"]",
"# for use in exception handler",
"try",
":",
"result",
"=",
"self",
".",
"configure_custom",
"(",
"config",
")... | https://github.com/rq/rq/blob/c5a1ef17345e17269085e7f72858ac9bd6faf1dd/rq/compat/dictconfig.py#L419-L439 | |
ShadowXZT/pytorch_RFCN | 0e532444263938aa4d000113dc6aac2e72b4b925 | faster_rcnn/roi_data_layer/minibatch2.py | python | _get_image_blob | (roidb, scale_inds) | return blob, im_scales | Builds an input blob from the images in the roidb at the specified
scales. | Builds an input blob from the images in the roidb at the specified
scales. | [
"Builds",
"an",
"input",
"blob",
"from",
"the",
"images",
"in",
"the",
"roidb",
"at",
"the",
"specified",
"scales",
"."
] | def _get_image_blob(roidb, scale_inds):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in xrange(num_images):
im = cv2.imread(roidb[i]['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_scale = cfg.TRAIN.SCALES_BASE[scale_inds[i]]
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales | [
"def",
"_get_image_blob",
"(",
"roidb",
",",
"scale_inds",
")",
":",
"num_images",
"=",
"len",
"(",
"roidb",
")",
"processed_ims",
"=",
"[",
"]",
"im_scales",
"=",
"[",
"]",
"for",
"i",
"in",
"xrange",
"(",
"num_images",
")",
":",
"im",
"=",
"cv2",
"... | https://github.com/ShadowXZT/pytorch_RFCN/blob/0e532444263938aa4d000113dc6aac2e72b4b925/faster_rcnn/roi_data_layer/minibatch2.py#L173-L197 | |
python-pillow/Pillow | fd2b07c454b20e1e9af0cea64923b21250f8f8d6 | src/PIL/ImageDraw.py | python | floodfill | (image, xy, value, border=None, thresh=0) | (experimental) Fills a bounded region with a given color.
:param image: Target image.
:param xy: Seed position (a 2-item coordinate tuple). See
:ref:`coordinate-system`.
:param value: Fill color.
:param border: Optional border value. If given, the region consists of
pixels with a color different from the border color. If not given,
the region consists of pixels having the same color as the seed
pixel.
:param thresh: Optional threshold value which specifies a maximum
tolerable difference of a pixel value from the 'background' in
order for it to be replaced. Useful for filling regions of
non-homogeneous, but similar, colors. | (experimental) Fills a bounded region with a given color. | [
"(",
"experimental",
")",
"Fills",
"a",
"bounded",
"region",
"with",
"a",
"given",
"color",
"."
] | def floodfill(image, xy, value, border=None, thresh=0):
"""
(experimental) Fills a bounded region with a given color.
:param image: Target image.
:param xy: Seed position (a 2-item coordinate tuple). See
:ref:`coordinate-system`.
:param value: Fill color.
:param border: Optional border value. If given, the region consists of
pixels with a color different from the border color. If not given,
the region consists of pixels having the same color as the seed
pixel.
:param thresh: Optional threshold value which specifies a maximum
tolerable difference of a pixel value from the 'background' in
order for it to be replaced. Useful for filling regions of
non-homogeneous, but similar, colors.
"""
# based on an implementation by Eric S. Raymond
# amended by yo1995 @20180806
pixel = image.load()
x, y = xy
try:
background = pixel[x, y]
if _color_diff(value, background) <= thresh:
return # seed point already has fill color
pixel[x, y] = value
except (ValueError, IndexError):
return # seed point outside image
edge = {(x, y)}
# use a set to keep record of current and previous edge pixels
# to reduce memory consumption
full_edge = set()
while edge:
new_edge = set()
for (x, y) in edge: # 4 adjacent method
for (s, t) in ((x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)):
# If already processed, or if a coordinate is negative, skip
if (s, t) in full_edge or s < 0 or t < 0:
continue
try:
p = pixel[s, t]
except (ValueError, IndexError):
pass
else:
full_edge.add((s, t))
if border is None:
fill = _color_diff(p, background) <= thresh
else:
fill = p != value and p != border
if fill:
pixel[s, t] = value
new_edge.add((s, t))
full_edge = edge # discard pixels processed
edge = new_edge | [
"def",
"floodfill",
"(",
"image",
",",
"xy",
",",
"value",
",",
"border",
"=",
"None",
",",
"thresh",
"=",
"0",
")",
":",
"# based on an implementation by Eric S. Raymond",
"# amended by yo1995 @20180806",
"pixel",
"=",
"image",
".",
"load",
"(",
")",
"x",
","... | https://github.com/python-pillow/Pillow/blob/fd2b07c454b20e1e9af0cea64923b21250f8f8d6/src/PIL/ImageDraw.py#L824-L877 | ||
out0fmemory/GoAgent-Always-Available | c4254984fea633ce3d1893fe5901debd9f22c2a9 | server/lib/google/appengine/tools/yaml_translator.py | python | AppYamlTranslator.ErrorHandlerPath | (self, error_handler) | return path | Returns the relative path name for the given error handler.
Args:
error_handler: an app_engine_web_xml.ErrorHandler.
Returns:
the relative path name for the handler.
Raises:
AppEngineConfigException: if the named file is not an existing static
file. | Returns the relative path name for the given error handler. | [
"Returns",
"the",
"relative",
"path",
"name",
"for",
"the",
"given",
"error",
"handler",
"."
] | def ErrorHandlerPath(self, error_handler):
"""Returns the relative path name for the given error handler.
Args:
error_handler: an app_engine_web_xml.ErrorHandler.
Returns:
the relative path name for the handler.
Raises:
AppEngineConfigException: if the named file is not an existing static
file.
"""
name = error_handler.name
if not name.startswith('/'):
name = '/' + name
path = '__static__' + name
if path not in self.static_files:
raise AppEngineConfigException(
'No static file found for error handler: %s, out of %s' %
(name, self.static_files))
return path | [
"def",
"ErrorHandlerPath",
"(",
"self",
",",
"error_handler",
")",
":",
"name",
"=",
"error_handler",
".",
"name",
"if",
"not",
"name",
".",
"startswith",
"(",
"'/'",
")",
":",
"name",
"=",
"'/'",
"+",
"name",
"path",
"=",
"'__static__'",
"+",
"name",
... | https://github.com/out0fmemory/GoAgent-Always-Available/blob/c4254984fea633ce3d1893fe5901debd9f22c2a9/server/lib/google/appengine/tools/yaml_translator.py#L333-L354 | |
tlsfuzzer/tlsfuzzer | fe2e4af145446d603a9da2e202e10ea80ccd298d | tlsfuzzer/messages.py | python | CertificateVerifyGenerator._sig_alg_for_ecdsa_key | (accept_sig_algs, version, key) | return (getattr(HashAlgorithm, hash_name), SignatureAlgorithm.ecdsa) | Select an acceptable signature algorithm for a given ecdsa key. | Select an acceptable signature algorithm for a given ecdsa key. | [
"Select",
"an",
"acceptable",
"signature",
"algorithm",
"for",
"a",
"given",
"ecdsa",
"key",
"."
] | def _sig_alg_for_ecdsa_key(accept_sig_algs, version, key):
"""Select an acceptable signature algorithm for a given ecdsa key."""
if version < (3, 3):
# in TLS 1.1 and earlier, there is no algorithm selection,
# pick one closest, as far as used algorithms are concerned, to
# the TLS 1.2 algorithm
return (HashAlgorithm.sha1, SignatureAlgorithm.ecdsa)
if version < (3, 4):
# in TLS 1.2 we can mix and match hashes and curves
return next((i for i in accept_sig_algs
if i in ECDSA_SIG_ALL), ECDSA_SIG_ALL[0])
# but in TLS 1.3 we need to select a hash that matches our key
hash_name = curve_name_to_hash_tls13(key.curve_name)
# while it may select one that wasn't advertised by server,
# this is better last resort than sending a sha1+rsa sigalg
return (getattr(HashAlgorithm, hash_name), SignatureAlgorithm.ecdsa) | [
"def",
"_sig_alg_for_ecdsa_key",
"(",
"accept_sig_algs",
",",
"version",
",",
"key",
")",
":",
"if",
"version",
"<",
"(",
"3",
",",
"3",
")",
":",
"# in TLS 1.1 and earlier, there is no algorithm selection,",
"# pick one closest, as far as used algorithms are concerned, to",
... | https://github.com/tlsfuzzer/tlsfuzzer/blob/fe2e4af145446d603a9da2e202e10ea80ccd298d/tlsfuzzer/messages.py#L1032-L1047 | |
nlpub/pymystem3 | 5fcc151c7d80c5babd1a6f20e710f3ace81db98d | setup.py | python | read | (filename) | Return the contents of a file.
:param filename: file path
:type filename: :class:`str`
:return: the file's content
:rtype: :class:`str` | Return the contents of a file. | [
"Return",
"the",
"contents",
"of",
"a",
"file",
"."
] | def read(filename):
"""Return the contents of a file.
:param filename: file path
:type filename: :class:`str`
:return: the file's content
:rtype: :class:`str`
"""
with codecs.open(os.path.join(os.path.dirname(__file__), filename), 'r', 'utf-8') as f:
return f.read() | [
"def",
"read",
"(",
"filename",
")",
":",
"with",
"codecs",
".",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"filename",
")",
",",
"'r'",
",",
"'utf-8'",
")",
"as",
"f",
":",
"... | https://github.com/nlpub/pymystem3/blob/5fcc151c7d80c5babd1a6f20e710f3ace81db98d/setup.py#L145-L154 | ||
Galvant/InstrumentKit | 6d216bd7f8e9ec7918762fe5fb7a306d5bd0eb1f | instruments/thorlabs/thorlabsapt.py | python | ThorLabsAPT.serial_number | (self) | return self._serial_number | Gets the serial number for the APT controller
:type: `str` | Gets the serial number for the APT controller | [
"Gets",
"the",
"serial",
"number",
"for",
"the",
"APT",
"controller"
] | def serial_number(self):
"""
Gets the serial number for the APT controller
:type: `str`
"""
return self._serial_number | [
"def",
"serial_number",
"(",
"self",
")",
":",
"return",
"self",
".",
"_serial_number"
] | https://github.com/Galvant/InstrumentKit/blob/6d216bd7f8e9ec7918762fe5fb7a306d5bd0eb1f/instruments/thorlabs/thorlabsapt.py#L159-L165 | |
SteveDoyle2/pyNastran | eda651ac2d4883d95a34951f8a002ff94f642a1a | pyNastran/utils/dict_to_h5py.py | python | cast | (h5_file: Dataset, key: str, value, nlevels: int) | return value2 | casts a value | casts a value | [
"casts",
"a",
"value"
] | def cast(h5_file: Dataset, key: str, value, nlevels: int):
"""casts a value"""
# value
#print('%s****castingA' % (nlevels*' '))
#print(key, value)
try:
value2 = _cast(h5_file.get(key))
except AttributeError:
print(key)
raise
#print('%s****%s' % (nlevels*' ', value2))
#print('%s %r : %s %s' % (nlevels*' ', key, value2, type(value2)))
return value2 | [
"def",
"cast",
"(",
"h5_file",
":",
"Dataset",
",",
"key",
":",
"str",
",",
"value",
",",
"nlevels",
":",
"int",
")",
":",
"# value",
"#print('%s****castingA' % (nlevels*' '))",
"#print(key, value)",
"try",
":",
"value2",
"=",
"_cast",
"(",
"h5_file",
".",
... | https://github.com/SteveDoyle2/pyNastran/blob/eda651ac2d4883d95a34951f8a002ff94f642a1a/pyNastran/utils/dict_to_h5py.py#L445-L457 | |
twilio/twilio-python | 6e1e811ea57a1edfadd5161ace87397c563f6915 | twilio/rest/numbers/v2/regulatory_compliance/bundle/evaluation.py | python | EvaluationInstance.sid | (self) | return self._properties['sid'] | :returns: The unique string that identifies the Evaluation resource
:rtype: unicode | :returns: The unique string that identifies the Evaluation resource
:rtype: unicode | [
":",
"returns",
":",
"The",
"unique",
"string",
"that",
"identifies",
"the",
"Evaluation",
"resource",
":",
"rtype",
":",
"unicode"
] | def sid(self):
"""
:returns: The unique string that identifies the Evaluation resource
:rtype: unicode
"""
return self._properties['sid'] | [
"def",
"sid",
"(",
"self",
")",
":",
"return",
"self",
".",
"_properties",
"[",
"'sid'",
"]"
] | https://github.com/twilio/twilio-python/blob/6e1e811ea57a1edfadd5161ace87397c563f6915/twilio/rest/numbers/v2/regulatory_compliance/bundle/evaluation.py#L289-L294 | |
jgagneastro/coffeegrindsize | 22661ebd21831dba4cf32bfc6ba59fe3d49f879c | App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/pandas/io/sql.py | python | SQLiteTable._create_table_setup | (self) | return create_stmts | Return a list of SQL statements that creates a table reflecting the
structure of a DataFrame. The first entry will be a CREATE TABLE
statement while the rest will be CREATE INDEX statements. | Return a list of SQL statements that creates a table reflecting the
structure of a DataFrame. The first entry will be a CREATE TABLE
statement while the rest will be CREATE INDEX statements. | [
"Return",
"a",
"list",
"of",
"SQL",
"statements",
"that",
"creates",
"a",
"table",
"reflecting",
"the",
"structure",
"of",
"a",
"DataFrame",
".",
"The",
"first",
"entry",
"will",
"be",
"a",
"CREATE",
"TABLE",
"statement",
"while",
"the",
"rest",
"will",
"b... | def _create_table_setup(self):
"""
Return a list of SQL statements that creates a table reflecting the
structure of a DataFrame. The first entry will be a CREATE TABLE
statement while the rest will be CREATE INDEX statements.
"""
column_names_and_types = self._get_column_names_and_types(
self._sql_type_name
)
pat = re.compile(r'\s+')
column_names = [col_name for col_name, _, _ in column_names_and_types]
if any(map(pat.search, column_names)):
warnings.warn(_SAFE_NAMES_WARNING, stacklevel=6)
escape = _get_valid_sqlite_name
create_tbl_stmts = [escape(cname) + ' ' + ctype
for cname, ctype, _ in column_names_and_types]
if self.keys is not None and len(self.keys):
if not is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
cnames_br = ", ".join(escape(c) for c in keys)
create_tbl_stmts.append(
"CONSTRAINT {tbl}_pk PRIMARY KEY ({cnames_br})".format(
tbl=self.name, cnames_br=cnames_br))
create_stmts = ["CREATE TABLE " + escape(self.name) + " (\n" +
',\n '.join(create_tbl_stmts) + "\n)"]
ix_cols = [cname for cname, _, is_index in column_names_and_types
if is_index]
if len(ix_cols):
cnames = "_".join(ix_cols)
cnames_br = ",".join(escape(c) for c in ix_cols)
create_stmts.append(
"CREATE INDEX " + escape("ix_" + self.name + "_" + cnames) +
"ON " + escape(self.name) + " (" + cnames_br + ")")
return create_stmts | [
"def",
"_create_table_setup",
"(",
"self",
")",
":",
"column_names_and_types",
"=",
"self",
".",
"_get_column_names_and_types",
"(",
"self",
".",
"_sql_type_name",
")",
"pat",
"=",
"re",
".",
"compile",
"(",
"r'\\s+'",
")",
"column_names",
"=",
"[",
"col_name",
... | https://github.com/jgagneastro/coffeegrindsize/blob/22661ebd21831dba4cf32bfc6ba59fe3d49f879c/App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/pandas/io/sql.py#L1321-L1363 | |
nltk/nltk | 3f74ac55681667d7ef78b664557487145f51eb02 | nltk/tokenize/texttiling.py | python | TextTilingTokenizer._identify_boundaries | (self, depth_scores) | return boundaries | Identifies boundaries at the peaks of similarity score
differences | Identifies boundaries at the peaks of similarity score
differences | [
"Identifies",
"boundaries",
"at",
"the",
"peaks",
"of",
"similarity",
"score",
"differences"
] | def _identify_boundaries(self, depth_scores):
"""Identifies boundaries at the peaks of similarity score
differences"""
boundaries = [0 for x in depth_scores]
avg = sum(depth_scores) / len(depth_scores)
stdev = numpy.std(depth_scores)
# SB: what is the purpose of this conditional?
if self.cutoff_policy == LC:
cutoff = avg - stdev / 2.0
else:
cutoff = avg - stdev / 2.0
depth_tuples = sorted(zip(depth_scores, range(len(depth_scores))))
depth_tuples.reverse()
hp = list(filter(lambda x: x[0] > cutoff, depth_tuples))
for dt in hp:
boundaries[dt[1]] = 1
for dt2 in hp: # undo if there is a boundary close already
if (
dt[1] != dt2[1]
and abs(dt2[1] - dt[1]) < 4
and boundaries[dt2[1]] == 1
):
boundaries[dt[1]] = 0
return boundaries | [
"def",
"_identify_boundaries",
"(",
"self",
",",
"depth_scores",
")",
":",
"boundaries",
"=",
"[",
"0",
"for",
"x",
"in",
"depth_scores",
"]",
"avg",
"=",
"sum",
"(",
"depth_scores",
")",
"/",
"len",
"(",
"depth_scores",
")",
"stdev",
"=",
"numpy",
".",
... | https://github.com/nltk/nltk/blob/3f74ac55681667d7ef78b664557487145f51eb02/nltk/tokenize/texttiling.py#L285-L313 | |
CPJKU/madmom | 3bc8334099feb310acfce884ebdb76a28e01670d | madmom/evaluation/tempo.py | python | TempoMeanEvaluation.acc2 | (self) | return np.nanmean([e.acc2 for e in self.eval_objects]) | Accuracy 2. | Accuracy 2. | [
"Accuracy",
"2",
"."
] | def acc2(self):
"""Accuracy 2."""
return np.nanmean([e.acc2 for e in self.eval_objects]) | [
"def",
"acc2",
"(",
"self",
")",
":",
"return",
"np",
".",
"nanmean",
"(",
"[",
"e",
".",
"acc2",
"for",
"e",
"in",
"self",
".",
"eval_objects",
"]",
")"
] | https://github.com/CPJKU/madmom/blob/3bc8334099feb310acfce884ebdb76a28e01670d/madmom/evaluation/tempo.py#L276-L278 | |
sqall01/alertR | e1d1a83e54f876cc4cd7bd87387e05cb75d4dc13 | sensorClientICalendar/lib/client/serverCommunication.py | python | ServerCommunication._handler_sensor_alert | (self,
incomingMessage: Dict[str, Any]) | return False | Internal function that handles received sensor alerts (for nodes of type manager or alert).
:param incomingMessage:
:return: success or failure | Internal function that handles received sensor alerts (for nodes of type manager or alert). | [
"Internal",
"function",
"that",
"handles",
"received",
"sensor",
"alerts",
"(",
"for",
"nodes",
"of",
"type",
"manager",
"or",
"alert",
")",
"."
] | def _handler_sensor_alert(self,
incomingMessage: Dict[str, Any]) -> bool:
"""
Internal function that handles received sensor alerts (for nodes of type manager or alert).
:param incomingMessage:
:return: success or failure
"""
logging.debug("[%s]: Received sensor alert '%s' with state %d."
% (self._log_tag, incomingMessage["payload"]["description"], incomingMessage["payload"]["state"]))
# extract sensor alert values
sensorAlert = ManagerObjSensorAlert()
sensorAlert.timeReceived = int(time.time())
try:
msg_time = incomingMessage["msgTime"]
sensorAlert.sensorId = incomingMessage["payload"]["sensorId"]
sensorAlert.state = incomingMessage["payload"]["state"]
sensorAlert.alertLevels = incomingMessage["payload"]["alertLevels"]
sensorAlert.description = incomingMessage["payload"]["description"]
# parse transfer data
sensorAlert.hasOptionalData = incomingMessage["payload"]["hasOptionalData"]
if sensorAlert.hasOptionalData:
sensorAlert.optionalData = incomingMessage["payload"]["optionalData"]
else:
sensorAlert.optionalData = dict()
sensorAlert.changeState = incomingMessage["payload"]["changeState"]
sensorAlert.hasLatestData = incomingMessage["payload"]["hasLatestData"]
sensorAlert.dataType = incomingMessage["payload"]["dataType"]
sensor_data_cls = SensorDataType.get_sensor_data_class(sensorAlert.dataType)
sensorAlert.data = sensor_data_cls.copy_from_dict(incomingMessage["payload"]["data"])
except Exception:
logging.exception("[%s]: Received sensor alert invalid." % self._log_tag)
return False
# handle received sensor alert
if self._event_handler.sensor_alert(msg_time, sensorAlert):
return True
return False | [
"def",
"_handler_sensor_alert",
"(",
"self",
",",
"incomingMessage",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
")",
"->",
"bool",
":",
"logging",
".",
"debug",
"(",
"\"[%s]: Received sensor alert '%s' with state %d.\"",
"%",
"(",
"self",
".",
"_log_tag",
",",
... | https://github.com/sqall01/alertR/blob/e1d1a83e54f876cc4cd7bd87387e05cb75d4dc13/sensorClientICalendar/lib/client/serverCommunication.py#L92-L136 | |
fonttools/fonttools | 892322aaff6a89bea5927379ec06bc0da3dfb7df | Lib/fontTools/ttLib/tables/E_B_D_T_.py | python | ebdt_bitmap_format_5.decompile | (self) | [] | def decompile(self):
self.imageData = self.data | [
"def",
"decompile",
"(",
"self",
")",
":",
"self",
".",
"imageData",
"=",
"self",
".",
"data"
] | https://github.com/fonttools/fonttools/blob/892322aaff6a89bea5927379ec06bc0da3dfb7df/Lib/fontTools/ttLib/tables/E_B_D_T_.py#L632-L633 | ||||
Abjad/abjad | d0646dfbe83db3dc5ab268f76a0950712b87b7fd | abjad/lyproxy.py | python | LilyPondContext.engravers | (self) | return engravers_ | r"""
Gets engravers belonging to LilyPond context.
.. container:: example
>>> context = abjad.LilyPondContext('MensuralStaff')
>>> for engraver in context.engravers:
... engraver
...
LilyPondEngraver(name='Accidental_engraver')
LilyPondEngraver(name='Axis_group_engraver')
LilyPondEngraver(name='Bar_engraver')
LilyPondEngraver(name='Clef_engraver')
LilyPondEngraver(name='Collision_engraver')
LilyPondEngraver(name='Cue_clef_engraver')
LilyPondEngraver(name='Custos_engraver')
LilyPondEngraver(name='Dot_column_engraver')
LilyPondEngraver(name='Figured_bass_engraver')
LilyPondEngraver(name='Figured_bass_position_engraver')
LilyPondEngraver(name='Fingering_column_engraver')
LilyPondEngraver(name='Font_size_engraver')
LilyPondEngraver(name='Grob_pq_engraver')
LilyPondEngraver(name='Instrument_name_engraver')
LilyPondEngraver(name='Key_engraver')
LilyPondEngraver(name='Ledger_line_engraver')
LilyPondEngraver(name='Ottava_spanner_engraver')
LilyPondEngraver(name='Output_property_engraver')
LilyPondEngraver(name='Piano_pedal_align_engraver')
LilyPondEngraver(name='Piano_pedal_engraver')
LilyPondEngraver(name='Pure_from_neighbor_engraver')
LilyPondEngraver(name='Rest_collision_engraver')
LilyPondEngraver(name='Script_row_engraver')
LilyPondEngraver(name='Separating_line_group_engraver')
LilyPondEngraver(name='Staff_collecting_engraver')
LilyPondEngraver(name='Staff_symbol_engraver')
LilyPondEngraver(name='Time_signature_engraver') | r"""
Gets engravers belonging to LilyPond context. | [
"r",
"Gets",
"engravers",
"belonging",
"to",
"LilyPond",
"context",
"."
] | def engravers(self) -> typing.Tuple["LilyPondEngraver", ...]:
r"""
Gets engravers belonging to LilyPond context.
.. container:: example
>>> context = abjad.LilyPondContext('MensuralStaff')
>>> for engraver in context.engravers:
... engraver
...
LilyPondEngraver(name='Accidental_engraver')
LilyPondEngraver(name='Axis_group_engraver')
LilyPondEngraver(name='Bar_engraver')
LilyPondEngraver(name='Clef_engraver')
LilyPondEngraver(name='Collision_engraver')
LilyPondEngraver(name='Cue_clef_engraver')
LilyPondEngraver(name='Custos_engraver')
LilyPondEngraver(name='Dot_column_engraver')
LilyPondEngraver(name='Figured_bass_engraver')
LilyPondEngraver(name='Figured_bass_position_engraver')
LilyPondEngraver(name='Fingering_column_engraver')
LilyPondEngraver(name='Font_size_engraver')
LilyPondEngraver(name='Grob_pq_engraver')
LilyPondEngraver(name='Instrument_name_engraver')
LilyPondEngraver(name='Key_engraver')
LilyPondEngraver(name='Ledger_line_engraver')
LilyPondEngraver(name='Ottava_spanner_engraver')
LilyPondEngraver(name='Output_property_engraver')
LilyPondEngraver(name='Piano_pedal_align_engraver')
LilyPondEngraver(name='Piano_pedal_engraver')
LilyPondEngraver(name='Pure_from_neighbor_engraver')
LilyPondEngraver(name='Rest_collision_engraver')
LilyPondEngraver(name='Script_row_engraver')
LilyPondEngraver(name='Separating_line_group_engraver')
LilyPondEngraver(name='Staff_collecting_engraver')
LilyPondEngraver(name='Staff_symbol_engraver')
LilyPondEngraver(name='Time_signature_engraver')
"""
engravers = set()
dictionary = contexts[self.name]
assert isinstance(dictionary, dict), repr(dictionary)
for engraver_name in dictionary["consists"]:
engraver = LilyPondEngraver(name=engraver_name)
engravers.add(engraver)
engravers_ = tuple(sorted(engravers, key=lambda x: x.name))
return engravers_ | [
"def",
"engravers",
"(",
"self",
")",
"->",
"typing",
".",
"Tuple",
"[",
"\"LilyPondEngraver\"",
",",
"...",
"]",
":",
"engravers",
"=",
"set",
"(",
")",
"dictionary",
"=",
"contexts",
"[",
"self",
".",
"name",
"]",
"assert",
"isinstance",
"(",
"dictiona... | https://github.com/Abjad/abjad/blob/d0646dfbe83db3dc5ab268f76a0950712b87b7fd/abjad/lyproxy.py#L397-L443 | |
JiYou/openstack | 8607dd488bde0905044b303eb6e52bdea6806923 | packages/source/nova/nova/network/manager.py | python | RPCAllocateFixedIP._rpc_allocate_fixed_ip | (self, context, instance_id, network_id,
**kwargs) | return self.allocate_fixed_ip(context, instance_id, network, **kwargs) | Sits in between _allocate_fixed_ips and allocate_fixed_ip to
perform network lookup on the far side of rpc. | Sits in between _allocate_fixed_ips and allocate_fixed_ip to
perform network lookup on the far side of rpc. | [
"Sits",
"in",
"between",
"_allocate_fixed_ips",
"and",
"allocate_fixed_ip",
"to",
"perform",
"network",
"lookup",
"on",
"the",
"far",
"side",
"of",
"rpc",
"."
] | def _rpc_allocate_fixed_ip(self, context, instance_id, network_id,
**kwargs):
"""Sits in between _allocate_fixed_ips and allocate_fixed_ip to
perform network lookup on the far side of rpc.
"""
network = self._get_network_by_id(context, network_id)
return self.allocate_fixed_ip(context, instance_id, network, **kwargs) | [
"def",
"_rpc_allocate_fixed_ip",
"(",
"self",
",",
"context",
",",
"instance_id",
",",
"network_id",
",",
"*",
"*",
"kwargs",
")",
":",
"network",
"=",
"self",
".",
"_get_network_by_id",
"(",
"context",
",",
"network_id",
")",
"return",
"self",
".",
"allocat... | https://github.com/JiYou/openstack/blob/8607dd488bde0905044b303eb6e52bdea6806923/packages/source/nova/nova/network/manager.py#L222-L228 | |
pyparallel/pyparallel | 11e8c6072d48c8f13641925d17b147bf36ee0ba3 | Lib/tkinter/__init__.py | python | Wm.wm_forget | (self, window) | The window will be unmappend from the screen and will no longer
be managed by wm. toplevel windows will be treated like frame
windows once they are no longer managed by wm, however, the menu
option configuration will be remembered and the menus will return
once the widget is managed again. | The window will be unmappend from the screen and will no longer
be managed by wm. toplevel windows will be treated like frame
windows once they are no longer managed by wm, however, the menu
option configuration will be remembered and the menus will return
once the widget is managed again. | [
"The",
"window",
"will",
"be",
"unmappend",
"from",
"the",
"screen",
"and",
"will",
"no",
"longer",
"be",
"managed",
"by",
"wm",
".",
"toplevel",
"windows",
"will",
"be",
"treated",
"like",
"frame",
"windows",
"once",
"they",
"are",
"no",
"longer",
"manage... | def wm_forget(self, window): # new in Tk 8.5
"""The window will be unmappend from the screen and will no longer
be managed by wm. toplevel windows will be treated like frame
windows once they are no longer managed by wm, however, the menu
option configuration will be remembered and the menus will return
once the widget is managed again."""
self.tk.call('wm', 'forget', window) | [
"def",
"wm_forget",
"(",
"self",
",",
"window",
")",
":",
"# new in Tk 8.5",
"self",
".",
"tk",
".",
"call",
"(",
"'wm'",
",",
"'forget'",
",",
"window",
")"
] | https://github.com/pyparallel/pyparallel/blob/11e8c6072d48c8f13641925d17b147bf36ee0ba3/Lib/tkinter/__init__.py#L1610-L1616 | ||
saltstack/salt-contrib | 062355938ad1cced273056e9c23dc344c6a2c858 | modules/keystone.py | python | user_list | () | return ret | Return a list of available users (keystone user-list)
CLI Example::
salt '*' keystone.user_list | Return a list of available users (keystone user-list) | [
"Return",
"a",
"list",
"of",
"available",
"users",
"(",
"keystone",
"user",
"-",
"list",
")"
] | def user_list():
'''
Return a list of available users (keystone user-list)
CLI Example::
salt '*' keystone.user_list
'''
kstone = auth()
ret = {}
for user in kstone.users.list():
ret[user.name] = {
'id': user.id,
'name': user.name,
'email': user.email,
'enabled': user.enabled,
'tenant_id': user.tenantId,
}
return ret | [
"def",
"user_list",
"(",
")",
":",
"kstone",
"=",
"auth",
"(",
")",
"ret",
"=",
"{",
"}",
"for",
"user",
"in",
"kstone",
".",
"users",
".",
"list",
"(",
")",
":",
"ret",
"[",
"user",
".",
"name",
"]",
"=",
"{",
"'id'",
":",
"user",
".",
"id",... | https://github.com/saltstack/salt-contrib/blob/062355938ad1cced273056e9c23dc344c6a2c858/modules/keystone.py#L419-L437 | |
sdispater/tomlkit | 7b450661e02d161cbf9a3bec3b3955cbcb64efef | tomlkit/items.py | python | Integer.value | (self) | return self | The wrapped integer value | The wrapped integer value | [
"The",
"wrapped",
"integer",
"value"
] | def value(self) -> int:
"""The wrapped integer value"""
return self | [
"def",
"value",
"(",
"self",
")",
"->",
"int",
":",
"return",
"self"
] | https://github.com/sdispater/tomlkit/blob/7b450661e02d161cbf9a3bec3b3955cbcb64efef/tomlkit/items.py#L514-L516 | |
pycalphad/pycalphad | 631c41c3d041d4e8a47c57d0f25d078344b9da52 | pycalphad/io/tdb.py | python | write_tdb | (dbf, fd, groupby='subsystem', if_incompatible='warn') | Write a TDB file from a pycalphad Database object.
The goal is to produce TDBs that conform to the most restrictive subset of database specifications. Some of these
can be adjusted for automatically, such as the Thermo-Calc line length limit of 78. Others require changing the
database in non-trivial ways, such as the maximum length of function names (8). The default is to warn the user when
attempting to write an incompatible database and the user must choose whether to warn and write the file anyway or
to fix the incompatibility.
Currently the supported compatibility fixes are:
- Line length <= 78 characters (Thermo-Calc)
- Function names <= 8 characters (Thermo-Calc)
The current unsupported fixes include:
- Keyword length <= 2000 characters (Thermo-Calc)
- Element names <= 2 characters (Thermo-Calc)
- Phase names <= 24 characters (Thermo-Calc)
Other TDB compatibility issues required by Thermo-Calc or other software should be reported to the issue tracker.
Parameters
----------
dbf : Database
A pycalphad Database.
fd : file-like
File descriptor.
groupby : ['subsystem', 'phase'], optional
Desired grouping of parameters in the file.
if_incompatible : string, optional ['raise', 'warn', 'fix']
Strategy if the database does not conform to the most restrictive database specification.
The 'warn' option (default) will write out the incompatible database with a warning.
The 'raise' option will raise a DatabaseExportError.
The 'ignore' option will write out the incompatible database silently.
The 'fix' option will rectify the incompatibilities e.g. through name mangling. | Write a TDB file from a pycalphad Database object. | [
"Write",
"a",
"TDB",
"file",
"from",
"a",
"pycalphad",
"Database",
"object",
"."
] | def write_tdb(dbf, fd, groupby='subsystem', if_incompatible='warn'):
"""
Write a TDB file from a pycalphad Database object.
The goal is to produce TDBs that conform to the most restrictive subset of database specifications. Some of these
can be adjusted for automatically, such as the Thermo-Calc line length limit of 78. Others require changing the
database in non-trivial ways, such as the maximum length of function names (8). The default is to warn the user when
attempting to write an incompatible database and the user must choose whether to warn and write the file anyway or
to fix the incompatibility.
Currently the supported compatibility fixes are:
- Line length <= 78 characters (Thermo-Calc)
- Function names <= 8 characters (Thermo-Calc)
The current unsupported fixes include:
- Keyword length <= 2000 characters (Thermo-Calc)
- Element names <= 2 characters (Thermo-Calc)
- Phase names <= 24 characters (Thermo-Calc)
Other TDB compatibility issues required by Thermo-Calc or other software should be reported to the issue tracker.
Parameters
----------
dbf : Database
A pycalphad Database.
fd : file-like
File descriptor.
groupby : ['subsystem', 'phase'], optional
Desired grouping of parameters in the file.
if_incompatible : string, optional ['raise', 'warn', 'fix']
Strategy if the database does not conform to the most restrictive database specification.
The 'warn' option (default) will write out the incompatible database with a warning.
The 'raise' option will raise a DatabaseExportError.
The 'ignore' option will write out the incompatible database silently.
The 'fix' option will rectify the incompatibilities e.g. through name mangling.
"""
# Before writing anything, check that the TDB is valid and take the appropriate action if not
if if_incompatible not in ['warn', 'raise', 'ignore', 'fix']:
raise ValueError('Incorrect options passed to \'if_invalid\'. Valid args are \'raise\', \'warn\', or \'fix\'.')
# Handle function names > 8 characters
long_function_names = {k for k in dbf.symbols.keys() if len(k) > 8}
if len(long_function_names) > 0:
if if_incompatible == 'raise':
raise DatabaseExportError('The following function names are beyond the 8 character TDB limit: {}. Use the keyword argument \'if_incompatible\' to control this behavior.'.format(long_function_names))
elif if_incompatible == 'fix':
# if we are going to make changes, make the changes to a copy and leave the original object untouched
dbf = deepcopy(dbf) # TODO: if we do multiple fixes, we should only copy once
symbol_name_map = {}
for name in long_function_names:
hashed_name = 'F' + str(hashlib.md5(name.encode('UTF-8')).hexdigest()).upper()[:7] # this is implictly upper(), but it is explicit here
symbol_name_map[name] = hashed_name
_apply_new_symbol_names(dbf, symbol_name_map)
elif if_incompatible == 'warn':
warnings.warn('Ignoring that the following function names are beyond the 8 character TDB limit: {}. Use the keyword argument \'if_incompatible\' to control this behavior.'.format(long_function_names))
# Begin constructing the written database
writetime = datetime.datetime.now()
maxlen = 78
output = ""
# Comment header block
# Import here to prevent circular imports
from pycalphad import __version__
try:
# getuser() will raise on Windows if it can't find a username: https://bugs.python.org/issue32731
username = getpass.getuser()
except:
# if we can't find a good username, just choose a default and move on
username = 'user'
output += ("$" * maxlen) + "\n"
output += "$ Date: {}\n".format(writetime.strftime("%Y-%m-%d %H:%M"))
output += "$ Components: {}\n".format(', '.join(sorted(dbf.elements)))
output += "$ Phases: {}\n".format(', '.join(sorted(dbf.phases.keys())))
output += "$ Generated by {} (pycalphad {})\n".format(username, __version__)
output += ("$" * maxlen) + "\n\n"
for element in sorted(dbf.elements):
ref = dbf.refstates.get(element, {})
refphase = ref.get('phase', 'BLANK')
mass = ref.get('mass', 0.0)
H298 = ref.get('H298', 0.0)
S298 = ref.get('S298', 0.0)
output += "ELEMENT {0} {1} {2} {3} {4} !\n".format(element.upper(), refphase, mass, H298, S298)
if len(dbf.elements) > 0:
output += "\n"
for species in sorted(dbf.species, key=lambda s: s.name):
if species.name not in dbf.elements:
# construct the charge part of the specie
if species.charge != 0:
if species.charge >0:
charge_sign = '+'
else:
charge_sign = ''
charge = '/{}{}'.format(charge_sign, species.charge)
else:
charge = ''
species_constituents = ''.join(['{}{}'.format(el, val) for el, val in sorted(species.constituents.items(), key=lambda t: t[0])])
output += "SPECIES {0} {1}{2} !\n".format(species.name.upper(), species_constituents, charge)
if len(dbf.species) > 0:
output += "\n"
# Write FUNCTION block
for name, expr in sorted(dbf.symbols.items()):
if not isinstance(expr, Piecewise):
# Non-piecewise exprs need to be wrapped to print
# Otherwise TC's TDB parser will complain
expr = Piecewise((expr, And(v.T >= 1, v.T < 10000)))
expr = TCPrinter().doprint(expr).upper()
if ';' not in expr:
expr += '; N'
output += "FUNCTION {0} {1} !\n".format(name.upper(), expr)
output += "\n"
# Boilerplate code
output += "TYPE_DEFINITION % SEQ * !\n"
output += "DEFINE_SYSTEM_DEFAULT ELEMENT 2 !\n"
default_elements = [i.upper() for i in sorted(dbf.elements) if i.upper() == 'VA' or i.upper() == '/-']
if len(default_elements) > 0:
output += 'DEFAULT_COMMAND DEFINE_SYSTEM_ELEMENT {} !\n'.format(' '.join(default_elements))
output += "\n"
typedef_chars = list("^&*()'ABCDEFGHIJKLMNOPQSRTUVWXYZ")[::-1]
# Write necessary TYPE_DEF based on model hints
typedefs = defaultdict(lambda: ["%"])
for name, phase_obj in sorted(dbf.phases.items()):
model_hints = phase_obj.model_hints.copy()
possible_options = set(phase_options.keys()).intersection(model_hints)
# Phase options are handled later
for option in possible_options:
del model_hints[option]
if ('ordered_phase' in model_hints.keys()) and (model_hints['ordered_phase'] == name):
new_char = typedef_chars.pop()
typedefs[name].append(new_char)
typedefs[model_hints['disordered_phase']].append(new_char)
output += 'TYPE_DEFINITION {} GES AMEND_PHASE_DESCRIPTION {} DISORDERED_PART {} !\n'\
.format(new_char, model_hints['ordered_phase'].upper(),
model_hints['disordered_phase'].upper())
del model_hints['ordered_phase']
del model_hints['disordered_phase']
if ('disordered_phase' in model_hints.keys()) and (model_hints['disordered_phase'] == name):
# We handle adding the correct typedef when we write the ordered phase
del model_hints['ordered_phase']
del model_hints['disordered_phase']
if 'ihj_magnetic_afm_factor' in model_hints.keys():
new_char = typedef_chars.pop()
typedefs[name].append(new_char)
output += 'TYPE_DEFINITION {} GES AMEND_PHASE_DESCRIPTION {} MAGNETIC {} {} !\n'\
.format(new_char, name.upper(), model_hints['ihj_magnetic_afm_factor'],
model_hints['ihj_magnetic_structure_factor'])
del model_hints['ihj_magnetic_afm_factor']
del model_hints['ihj_magnetic_structure_factor']
if len(model_hints) > 0:
# Some model hints were not properly consumed
raise ValueError('Not all model hints are supported: {}'.format(model_hints))
# Perform a second loop now that all typedefs / model hints are consistent
for name, phase_obj in sorted(dbf.phases.items()):
# model_hints may also contain "phase options", e.g., ionic liquid
model_hints = phase_obj.model_hints.copy()
name_with_options = str(name.upper())
possible_options = set(phase_options.keys()).intersection(model_hints.keys())
if len(possible_options) > 0:
name_with_options += ':'
for option in possible_options:
name_with_options += phase_options[option]
output += "PHASE {0} {1} {2} {3} !\n".format(name_with_options, ''.join(typedefs[name]),
len(phase_obj.sublattices),
' '.join([str(i) for i in phase_obj.sublattices]))
constituents = ':'.join([','.join([spec.name for spec in sorted(subl)]) for subl in phase_obj.constituents])
output += "CONSTITUENT {0} :{1}: !\n".format(name_with_options, constituents)
output += "\n"
# PARAMETERs by subsystem
param_sorted = defaultdict(lambda: list())
paramtuple = namedtuple('ParamTuple', ['phase_name', 'parameter_type', 'complexity', 'constituent_array',
'parameter_order', 'diffusing_species', 'parameter', 'reference'])
for param in dbf._parameters.all():
if groupby == 'subsystem':
components = set()
for subl in param['constituent_array']:
components |= set(subl)
if param['diffusing_species'] != Species(None):
components |= {param['diffusing_species']}
# Wildcard operator is not a component
components -= {'*'}
desired_active_pure_elements = [list(x.constituents.keys()) for x in components]
components = set([el.upper() for constituents in desired_active_pure_elements for el in constituents])
# Remove vacancy if it's not the only component (pure vacancy endmember)
if len(components) > 1:
components -= {'VA'}
components = tuple(sorted([c.upper() for c in components]))
grouping = components
elif groupby == 'phase':
grouping = param['phase_name'].upper()
else:
raise ValueError('Unknown groupby attribute \'{}\''.format(groupby))
# We use the complexity parameter to help with sorting the parameters logically
param_sorted[grouping].append(paramtuple(param['phase_name'], param['parameter_type'],
sum([len(i) for i in param['constituent_array']]),
param['constituent_array'], param['parameter_order'],
param['diffusing_species'], param['parameter'],
param['reference']))
def write_parameter(param_to_write):
constituents = ':'.join([','.join(sorted([i.name.upper() for i in subl]))
for subl in param_to_write.constituent_array])
# TODO: Handle references
paramx = param_to_write.parameter
if not isinstance(paramx, Piecewise):
# Non-piecewise parameters need to be wrapped to print correctly
# Otherwise TC's TDB parser will fail
paramx = Piecewise((paramx, And(v.T >= 1, v.T < 10000)))
exprx = TCPrinter().doprint(paramx).upper()
if ';' not in exprx:
exprx += '; N'
if param_to_write.diffusing_species != Species(None):
ds = "&" + param_to_write.diffusing_species.name
else:
ds = ""
return "PARAMETER {}({}{},{};{}) {} !\n".format(param_to_write.parameter_type.upper(),
param_to_write.phase_name.upper(),
ds,
constituents,
param_to_write.parameter_order,
exprx)
if groupby == 'subsystem':
for num_species in range(1, 5):
subsystems = list(itertools.combinations(sorted([i.name.upper() for i in dbf.species]), num_species))
for subsystem in subsystems:
parameters = sorted(param_sorted[subsystem])
if len(parameters) > 0:
output += "\n\n"
output += "$" * maxlen + "\n"
output += "$ {}".format('-'.join(sorted(subsystem)).center(maxlen, " ")[2:-1]) + "$\n"
output += "$" * maxlen + "\n"
output += "\n"
for parameter in parameters:
output += write_parameter(parameter)
# Don't generate combinatorics for multi-component subsystems or we'll run out of memory
if len(dbf.species) > 4:
subsystems = [k for k in param_sorted.keys() if len(k) > 4]
for subsystem in subsystems:
parameters = sorted(param_sorted[subsystem])
for parameter in parameters:
output += write_parameter(parameter)
elif groupby == 'phase':
for phase_name in sorted(dbf.phases.keys()):
parameters = sorted(param_sorted[phase_name])
if len(parameters) > 0:
output += "\n\n"
output += "$" * maxlen + "\n"
output += "$ {}".format(phase_name.upper().center(maxlen, " ")[2:-1]) + "$\n"
output += "$" * maxlen + "\n"
output += "\n"
for parameter in parameters:
output += write_parameter(parameter)
else:
raise ValueError('Unknown groupby attribute {}'.format(groupby))
# Reflow text to respect character limit per line
fd.write(reflow_text(output, linewidth=maxlen)) | [
"def",
"write_tdb",
"(",
"dbf",
",",
"fd",
",",
"groupby",
"=",
"'subsystem'",
",",
"if_incompatible",
"=",
"'warn'",
")",
":",
"# Before writing anything, check that the TDB is valid and take the appropriate action if not",
"if",
"if_incompatible",
"not",
"in",
"[",
"'wa... | https://github.com/pycalphad/pycalphad/blob/631c41c3d041d4e8a47c57d0f25d078344b9da52/pycalphad/io/tdb.py#L666-L918 | ||
holzschu/Carnets | 44effb10ddfc6aa5c8b0687582a724ba82c6b547 | Library/lib/python3.7/zipapp.py | python | create_archive | (source, target=None, interpreter=None, main=None,
filter=None, compressed=False) | Create an application archive from SOURCE.
The SOURCE can be the name of a directory, or a filename or a file-like
object referring to an existing archive.
The content of SOURCE is packed into an application archive in TARGET,
which can be a filename or a file-like object. If SOURCE is a directory,
TARGET can be omitted and will default to the name of SOURCE with .pyz
appended.
The created application archive will have a shebang line specifying
that it should run with INTERPRETER (there will be no shebang line if
INTERPRETER is None), and a __main__.py which runs MAIN (if MAIN is
not specified, an existing __main__.py will be used). It is an error
to specify MAIN for anything other than a directory source with no
__main__.py, and it is an error to omit MAIN if the directory has no
__main__.py. | Create an application archive from SOURCE. | [
"Create",
"an",
"application",
"archive",
"from",
"SOURCE",
"."
] | def create_archive(source, target=None, interpreter=None, main=None,
filter=None, compressed=False):
"""Create an application archive from SOURCE.
The SOURCE can be the name of a directory, or a filename or a file-like
object referring to an existing archive.
The content of SOURCE is packed into an application archive in TARGET,
which can be a filename or a file-like object. If SOURCE is a directory,
TARGET can be omitted and will default to the name of SOURCE with .pyz
appended.
The created application archive will have a shebang line specifying
that it should run with INTERPRETER (there will be no shebang line if
INTERPRETER is None), and a __main__.py which runs MAIN (if MAIN is
not specified, an existing __main__.py will be used). It is an error
to specify MAIN for anything other than a directory source with no
__main__.py, and it is an error to omit MAIN if the directory has no
__main__.py.
"""
# Are we copying an existing archive?
source_is_file = False
if hasattr(source, 'read') and hasattr(source, 'readline'):
source_is_file = True
else:
source = pathlib.Path(source)
if source.is_file():
source_is_file = True
if source_is_file:
_copy_archive(source, target, interpreter)
return
# We are creating a new archive from a directory.
if not source.exists():
raise ZipAppError("Source does not exist")
has_main = (source / '__main__.py').is_file()
if main and has_main:
raise ZipAppError(
"Cannot specify entry point if the source has __main__.py")
if not (main or has_main):
raise ZipAppError("Archive has no entry point")
main_py = None
if main:
# Check that main has the right format.
mod, sep, fn = main.partition(':')
mod_ok = all(part.isidentifier() for part in mod.split('.'))
fn_ok = all(part.isidentifier() for part in fn.split('.'))
if not (sep == ':' and mod_ok and fn_ok):
raise ZipAppError("Invalid entry point: " + main)
main_py = MAIN_TEMPLATE.format(module=mod, fn=fn)
if target is None:
target = source.with_suffix('.pyz')
elif not hasattr(target, 'write'):
target = pathlib.Path(target)
with _maybe_open(target, 'wb') as fd:
_write_file_prefix(fd, interpreter)
compression = (zipfile.ZIP_DEFLATED if compressed else
zipfile.ZIP_STORED)
with zipfile.ZipFile(fd, 'w', compression=compression) as z:
for child in source.rglob('*'):
arcname = child.relative_to(source)
if filter is None or filter(arcname):
z.write(child, arcname.as_posix())
if main_py:
z.writestr('__main__.py', main_py.encode('utf-8'))
if interpreter and not hasattr(target, 'write'):
target.chmod(target.stat().st_mode | stat.S_IEXEC) | [
"def",
"create_archive",
"(",
"source",
",",
"target",
"=",
"None",
",",
"interpreter",
"=",
"None",
",",
"main",
"=",
"None",
",",
"filter",
"=",
"None",
",",
"compressed",
"=",
"False",
")",
":",
"# Are we copying an existing archive?",
"source_is_file",
"="... | https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/zipapp.py#L76-L147 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.