repo stringlengths 7 54 | path stringlengths 4 192 | url stringlengths 87 284 | code stringlengths 78 104k | code_tokens list | docstring stringlengths 1 46.9k | docstring_tokens list | language stringclasses 1
value | partition stringclasses 3
values |
|---|---|---|---|---|---|---|---|---|
Esri/ArcREST | src/arcrest/manageags/_system.py | https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageags/_system.py#L150-L168 | def registerDirs(self,json_dirs):
"""
Registers multiple new server directories.
Inputs:
json_dirs - Array of Server Directories in JSON format.
"""
url = self._url + "/directories/registerDirs"
params = {
"f" : "json",
"directories" : json_dirs
}
res = self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
return res | [
"def",
"registerDirs",
"(",
"self",
",",
"json_dirs",
")",
":",
"url",
"=",
"self",
".",
"_url",
"+",
"\"/directories/registerDirs\"",
"params",
"=",
"{",
"\"f\"",
":",
"\"json\"",
",",
"\"directories\"",
":",
"json_dirs",
"}",
"res",
"=",
"self",
".",
"_p... | Registers multiple new server directories.
Inputs:
json_dirs - Array of Server Directories in JSON format. | [
"Registers",
"multiple",
"new",
"server",
"directories",
".",
"Inputs",
":",
"json_dirs",
"-",
"Array",
"of",
"Server",
"Directories",
"in",
"JSON",
"format",
"."
] | python | train |
ANTsX/ANTsPy | ants/registration/reorient_image.py | https://github.com/ANTsX/ANTsPy/blob/638020af2cdfc5ff4bdb9809ffe67aa505727a3b/ants/registration/reorient_image.py#L171-L200 | def get_center_of_mass(image):
"""
Compute an image center of mass in physical space which is defined
as the mean of the intensity weighted voxel coordinate system.
ANTsR function: `getCenterOfMass`
Arguments
---------
image : ANTsImage
image from which center of mass will be computed
Returns
-------
scalar
Example
-------
>>> fi = ants.image_read( ants.get_ants_data("r16"))
>>> com1 = ants.get_center_of_mass( fi )
>>> fi = ants.image_read( ants.get_ants_data("r64"))
>>> com2 = ants.get_center_of_mass( fi )
"""
if image.pixeltype != 'float':
image = image.clone('float')
libfn = utils.get_lib_fn('centerOfMass%s' % image._libsuffix)
com = libfn(image.pointer)
return tuple(com) | [
"def",
"get_center_of_mass",
"(",
"image",
")",
":",
"if",
"image",
".",
"pixeltype",
"!=",
"'float'",
":",
"image",
"=",
"image",
".",
"clone",
"(",
"'float'",
")",
"libfn",
"=",
"utils",
".",
"get_lib_fn",
"(",
"'centerOfMass%s'",
"%",
"image",
".",
"_... | Compute an image center of mass in physical space which is defined
as the mean of the intensity weighted voxel coordinate system.
ANTsR function: `getCenterOfMass`
Arguments
---------
image : ANTsImage
image from which center of mass will be computed
Returns
-------
scalar
Example
-------
>>> fi = ants.image_read( ants.get_ants_data("r16"))
>>> com1 = ants.get_center_of_mass( fi )
>>> fi = ants.image_read( ants.get_ants_data("r64"))
>>> com2 = ants.get_center_of_mass( fi ) | [
"Compute",
"an",
"image",
"center",
"of",
"mass",
"in",
"physical",
"space",
"which",
"is",
"defined",
"as",
"the",
"mean",
"of",
"the",
"intensity",
"weighted",
"voxel",
"coordinate",
"system",
"."
] | python | train |
marcomusy/vtkplotter | vtkplotter/analysis.py | https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/analysis.py#L445-L454 | def delaunay3D(dataset, alpha=0, tol=None, boundary=True):
"""Create 3D Delaunay triangulation of input points."""
deln = vtk.vtkDelaunay3D()
deln.SetInputData(dataset)
deln.SetAlpha(alpha)
if tol:
deln.SetTolerance(tol)
deln.SetBoundingTriangulation(boundary)
deln.Update()
return deln.GetOutput() | [
"def",
"delaunay3D",
"(",
"dataset",
",",
"alpha",
"=",
"0",
",",
"tol",
"=",
"None",
",",
"boundary",
"=",
"True",
")",
":",
"deln",
"=",
"vtk",
".",
"vtkDelaunay3D",
"(",
")",
"deln",
".",
"SetInputData",
"(",
"dataset",
")",
"deln",
".",
"SetAlpha... | Create 3D Delaunay triangulation of input points. | [
"Create",
"3D",
"Delaunay",
"triangulation",
"of",
"input",
"points",
"."
] | python | train |
sparknetworks/pgpm | pgpm/lib/utils/config.py | https://github.com/sparknetworks/pgpm/blob/1a060df46a886095181f692ea870a73a32510a2e/pgpm/lib/utils/config.py#L180-L197 | def to_string(self):
"""
stringifies version
:return: string of version
"""
if self.major == -1:
major_str = 'x'
else:
major_str = self.major
if self.minor == -1:
minor_str = 'x'
else:
minor_str = self.minor
if self.patch == -1:
patch_str = 'x'
else:
patch_str = self.patch
return '{0}_{1}_{2}'.format(major_str, minor_str, patch_str) | [
"def",
"to_string",
"(",
"self",
")",
":",
"if",
"self",
".",
"major",
"==",
"-",
"1",
":",
"major_str",
"=",
"'x'",
"else",
":",
"major_str",
"=",
"self",
".",
"major",
"if",
"self",
".",
"minor",
"==",
"-",
"1",
":",
"minor_str",
"=",
"'x'",
"e... | stringifies version
:return: string of version | [
"stringifies",
"version",
":",
"return",
":",
"string",
"of",
"version"
] | python | train |
lucapinello/Haystack | haystack/external.py | https://github.com/lucapinello/Haystack/blob/cc080d741f36cd77b07c0b59d08ea6a4cf0ef2f7/haystack/external.py#L994-L1003 | def trimmed(self,thresh=0.1):
"""
m.trimmed(,thresh=0.1) -- Return motif with low-information flanks removed. 'thresh' is in bits.
"""
for start in range(0,self.width-1):
if self.bits[start]>=thresh: break
for stop in range(self.width,1,-1):
if self.bits[stop-1]>=thresh: break
m = self[start,stop]
return m | [
"def",
"trimmed",
"(",
"self",
",",
"thresh",
"=",
"0.1",
")",
":",
"for",
"start",
"in",
"range",
"(",
"0",
",",
"self",
".",
"width",
"-",
"1",
")",
":",
"if",
"self",
".",
"bits",
"[",
"start",
"]",
">=",
"thresh",
":",
"break",
"for",
"stop... | m.trimmed(,thresh=0.1) -- Return motif with low-information flanks removed. 'thresh' is in bits. | [
"m",
".",
"trimmed",
"(",
"thresh",
"=",
"0",
".",
"1",
")",
"--",
"Return",
"motif",
"with",
"low",
"-",
"information",
"flanks",
"removed",
".",
"thresh",
"is",
"in",
"bits",
"."
] | python | train |
O365/python-o365 | O365/excel.py | https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/excel.py#L851-L857 | def get_format(self):
""" Returns a RangeFormat instance with the format of this range """
url = self.build_url(self._endpoints.get('get_format'))
response = self.session.get(url)
if not response:
return None
return self.range_format_constructor(parent=self, **{self._cloud_data_key: response.json()}) | [
"def",
"get_format",
"(",
"self",
")",
":",
"url",
"=",
"self",
".",
"build_url",
"(",
"self",
".",
"_endpoints",
".",
"get",
"(",
"'get_format'",
")",
")",
"response",
"=",
"self",
".",
"session",
".",
"get",
"(",
"url",
")",
"if",
"not",
"response"... | Returns a RangeFormat instance with the format of this range | [
"Returns",
"a",
"RangeFormat",
"instance",
"with",
"the",
"format",
"of",
"this",
"range"
] | python | train |
lacava/few | few/population.py | https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/population.py#L231-L298 | def init_pop(self):
"""initializes population of features as GP stacks."""
pop = Pop(self.population_size)
seed_with_raw_features = False
# make programs
if self.seed_with_ml:
# initial population is the components of the default ml model
if (self.ml_type == 'SVC' or self.ml_type == 'SVR'):
# this is needed because svm has a bug that throws valueerror
#on attribute check
seed_with_raw_features=True
elif (hasattr(self.pipeline.named_steps['ml'],'coef_') or
hasattr(self.pipeline.named_steps['ml'],'feature_importances_')):
# add model components with non-zero coefficients to initial
# population, in order of coefficient size
coef = (self.pipeline.named_steps['ml'].coef_ if
hasattr(self.pipeline.named_steps['ml'],'coef_') else
self.pipeline.named_steps['ml'].feature_importances_)
# compress multiple coefficients for each feature into single
# numbers (occurs with multiclass classification)
if len(coef.shape)>1:
coef = [np.mean(abs(c)) for c in coef.transpose()]
# remove zeros
coef = [c for c in coef if c!=0]
# sort feature locations based on importance/coefficient
locs = np.arange(len(coef))
locs = locs[np.argsort(np.abs(coef))[::-1]]
for i,p in enumerate(pop.individuals):
if i < len(locs):
p.stack = [node('x',loc=locs[i])]
else:
# make program if pop is bigger than n_features
self.make_program(p.stack,self.func_set,self.term_set,
self.random_state.randint(self.min_depth,
self.max_depth+1),
self.otype)
p.stack = list(reversed(p.stack))
else:
seed_with_raw_features = True
# seed with random features if no importance info available
if seed_with_raw_features:
for i,p in enumerate(pop.individuals):
if i < self.n_features:
p.stack = [node('x',
loc=self.random_state.randint(self.n_features))]
else:
# make program if pop is bigger than n_features
self.make_program(p.stack,self.func_set,self.term_set,
self.random_state.randint(self.min_depth,
self.max_depth+1),
self.otype)
p.stack = list(reversed(p.stack))
# print initial population
if self.verbosity > 2:
print("seeded initial population:",
self.stacks_2_eqns(pop.individuals))
else: # don't seed with ML
for I in pop.individuals:
depth = self.random_state.randint(self.min_depth,self.max_depth_init)
self.make_program(I.stack,self.func_set,self.term_set,depth,
self.otype)
#print(I.stack)
I.stack = list(reversed(I.stack))
return pop | [
"def",
"init_pop",
"(",
"self",
")",
":",
"pop",
"=",
"Pop",
"(",
"self",
".",
"population_size",
")",
"seed_with_raw_features",
"=",
"False",
"# make programs",
"if",
"self",
".",
"seed_with_ml",
":",
"# initial population is the components of the default ml model",
... | initializes population of features as GP stacks. | [
"initializes",
"population",
"of",
"features",
"as",
"GP",
"stacks",
"."
] | python | train |
agoragames/haigha | haigha/writer.py | https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/writer.py#L69-L74 | def write_bit(self, b, pack=Struct('B').pack):
'''
Write a single bit. Convenience method for single bit args.
'''
self._output_buffer.append(pack(True if b else False))
return self | [
"def",
"write_bit",
"(",
"self",
",",
"b",
",",
"pack",
"=",
"Struct",
"(",
"'B'",
")",
".",
"pack",
")",
":",
"self",
".",
"_output_buffer",
".",
"append",
"(",
"pack",
"(",
"True",
"if",
"b",
"else",
"False",
")",
")",
"return",
"self"
] | Write a single bit. Convenience method for single bit args. | [
"Write",
"a",
"single",
"bit",
".",
"Convenience",
"method",
"for",
"single",
"bit",
"args",
"."
] | python | train |
hazelcast/hazelcast-python-client | hazelcast/proxy/map.py | https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/proxy/map.py#L544-L574 | def put_if_absent(self, key, value, ttl=-1):
"""
Associates the specified key with the given value if it is not already associated. If ttl is provided, entry
will expire and get evicted after the ttl.
This is equivalent to:
>>> if not map.contains_key(key):
>>> return map.put(key,value)
>>> else:
>>> return map.get(key)
except that the action is performed atomically.
**Warning:
This method returns a clone of the previous value, not the original (identically equal) value previously put
into the map.**
**Warning 2: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations
of __hash__ and __eq__ defined in key's class.**
:param key: (object), key of the entry.
:param value: (object), value of the entry.
:param ttl: (int), maximum time in seconds for this entry to stay in the map, if not provided, the value
configured on server side configuration will be used (optional).
:return: (object), old value of the entry.
"""
check_not_none(key, "key can't be None")
check_not_none(value, "value can't be None")
key_data = self._to_data(key)
value_data = self._to_data(value)
return self._put_if_absent_internal(key_data, value_data, ttl) | [
"def",
"put_if_absent",
"(",
"self",
",",
"key",
",",
"value",
",",
"ttl",
"=",
"-",
"1",
")",
":",
"check_not_none",
"(",
"key",
",",
"\"key can't be None\"",
")",
"check_not_none",
"(",
"value",
",",
"\"value can't be None\"",
")",
"key_data",
"=",
"self",... | Associates the specified key with the given value if it is not already associated. If ttl is provided, entry
will expire and get evicted after the ttl.
This is equivalent to:
>>> if not map.contains_key(key):
>>> return map.put(key,value)
>>> else:
>>> return map.get(key)
except that the action is performed atomically.
**Warning:
This method returns a clone of the previous value, not the original (identically equal) value previously put
into the map.**
**Warning 2: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations
of __hash__ and __eq__ defined in key's class.**
:param key: (object), key of the entry.
:param value: (object), value of the entry.
:param ttl: (int), maximum time in seconds for this entry to stay in the map, if not provided, the value
configured on server side configuration will be used (optional).
:return: (object), old value of the entry. | [
"Associates",
"the",
"specified",
"key",
"with",
"the",
"given",
"value",
"if",
"it",
"is",
"not",
"already",
"associated",
".",
"If",
"ttl",
"is",
"provided",
"entry",
"will",
"expire",
"and",
"get",
"evicted",
"after",
"the",
"ttl",
"."
] | python | train |
bitesofcode/projexui | projexui/widgets/xtoolbutton.py | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xtoolbutton.py#L303-L312 | def setEnabled(self, state):
"""
Updates the drop shadow effect for this widget on enable/disable
state change.
:param state | <bool>
"""
super(XToolButton, self).setEnabled(state)
self.updateUi() | [
"def",
"setEnabled",
"(",
"self",
",",
"state",
")",
":",
"super",
"(",
"XToolButton",
",",
"self",
")",
".",
"setEnabled",
"(",
"state",
")",
"self",
".",
"updateUi",
"(",
")"
] | Updates the drop shadow effect for this widget on enable/disable
state change.
:param state | <bool> | [
"Updates",
"the",
"drop",
"shadow",
"effect",
"for",
"this",
"widget",
"on",
"enable",
"/",
"disable",
"state",
"change",
".",
":",
"param",
"state",
"|",
"<bool",
">"
] | python | train |
google/openhtf | openhtf/plugs/usb/adb_protocol.py | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/adb_protocol.py#L440-L455 | def enqueue_message(self, message, timeout):
"""Add the given message to this transport's queue.
This method also handles ACKing any WRTE messages.
Args:
message: The AdbMessage to enqueue.
timeout: The timeout to use for the operation. Specifically, WRTE
messages cause an OKAY to be sent; timeout is used for that send.
"""
# Ack WRTE messages immediately, handle our OPEN ack if it gets enqueued.
if message.command == 'WRTE':
self._send_command('OKAY', timeout=timeout)
elif message.command == 'OKAY':
self._set_or_check_remote_id(message.arg0)
self.message_queue.put(message) | [
"def",
"enqueue_message",
"(",
"self",
",",
"message",
",",
"timeout",
")",
":",
"# Ack WRTE messages immediately, handle our OPEN ack if it gets enqueued.",
"if",
"message",
".",
"command",
"==",
"'WRTE'",
":",
"self",
".",
"_send_command",
"(",
"'OKAY'",
",",
"timeo... | Add the given message to this transport's queue.
This method also handles ACKing any WRTE messages.
Args:
message: The AdbMessage to enqueue.
timeout: The timeout to use for the operation. Specifically, WRTE
messages cause an OKAY to be sent; timeout is used for that send. | [
"Add",
"the",
"given",
"message",
"to",
"this",
"transport",
"s",
"queue",
"."
] | python | train |
meejah/txtorcon | txtorcon/torconfig.py | https://github.com/meejah/txtorcon/blob/14053b95adf0b4bd9dd9c317bece912a26578a93/txtorcon/torconfig.py#L841-L867 | def _conf_changed(self, arg):
"""
internal callback. from control-spec:
4.1.18. Configuration changed
The syntax is:
StartReplyLine *(MidReplyLine) EndReplyLine
StartReplyLine = "650-CONF_CHANGED" CRLF
MidReplyLine = "650-" KEYWORD ["=" VALUE] CRLF
EndReplyLine = "650 OK"
Tor configuration options have changed (such as via a SETCONF or
RELOAD signal). KEYWORD and VALUE specify the configuration option
that was changed. Undefined configuration options contain only the
KEYWORD.
"""
conf = parse_keywords(arg, multiline_values=False)
for (k, v) in conf.items():
# v will be txtorcon.DEFAULT_VALUE already from
# parse_keywords if it was unspecified
real_name = self._find_real_name(k)
if real_name in self.parsers:
v = self.parsers[real_name].parse(v)
self.config[real_name] = v | [
"def",
"_conf_changed",
"(",
"self",
",",
"arg",
")",
":",
"conf",
"=",
"parse_keywords",
"(",
"arg",
",",
"multiline_values",
"=",
"False",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"conf",
".",
"items",
"(",
")",
":",
"# v will be txtorcon.DEFAULT_VAL... | internal callback. from control-spec:
4.1.18. Configuration changed
The syntax is:
StartReplyLine *(MidReplyLine) EndReplyLine
StartReplyLine = "650-CONF_CHANGED" CRLF
MidReplyLine = "650-" KEYWORD ["=" VALUE] CRLF
EndReplyLine = "650 OK"
Tor configuration options have changed (such as via a SETCONF or
RELOAD signal). KEYWORD and VALUE specify the configuration option
that was changed. Undefined configuration options contain only the
KEYWORD. | [
"internal",
"callback",
".",
"from",
"control",
"-",
"spec",
":"
] | python | train |
croscon/fleaker | fleaker/marshmallow/fields/foreign_key.py | https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/marshmallow/fields/foreign_key.py#L36-L49 | def _add_to_schema(self, field_name, schema):
"""Set the ``attribute`` attr to the field in question so this always
gets deserialzed into the field name without ``_id``.
Args:
field_name (str): The name of the field (the attribute name being
set in the schema).
schema (marshmallow.Schema): The actual parent schema this field
belongs to.
"""
super(ForeignKeyField, self)._add_to_schema(field_name, schema)
if self.get_field_value('convert_fks', default=True):
self.attribute = field_name.replace('_id', '') | [
"def",
"_add_to_schema",
"(",
"self",
",",
"field_name",
",",
"schema",
")",
":",
"super",
"(",
"ForeignKeyField",
",",
"self",
")",
".",
"_add_to_schema",
"(",
"field_name",
",",
"schema",
")",
"if",
"self",
".",
"get_field_value",
"(",
"'convert_fks'",
","... | Set the ``attribute`` attr to the field in question so this always
gets deserialzed into the field name without ``_id``.
Args:
field_name (str): The name of the field (the attribute name being
set in the schema).
schema (marshmallow.Schema): The actual parent schema this field
belongs to. | [
"Set",
"the",
"attribute",
"attr",
"to",
"the",
"field",
"in",
"question",
"so",
"this",
"always",
"gets",
"deserialzed",
"into",
"the",
"field",
"name",
"without",
"_id",
"."
] | python | train |
mitsei/dlkit | dlkit/json_/authorization/managers.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/authorization/managers.py#L702-L717 | def get_vault_hierarchy_design_session(self):
"""Gets the session designing vault hierarchies.
return: (osid.authorization.VaultHierarchyDesignSession) - a
``VaultHierarchyDesignSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_vault_hierarchy_design() is
false``
*compliance: optional -- This method must be implemented if
``supports_vault_hierarchy_design()`` is true.*
"""
if not self.supports_vault_hierarchy_design():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.VaultHierarchyDesignSession(runtime=self._runtime) | [
"def",
"get_vault_hierarchy_design_session",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"supports_vault_hierarchy_design",
"(",
")",
":",
"raise",
"errors",
".",
"Unimplemented",
"(",
")",
"# pylint: disable=no-member",
"return",
"sessions",
".",
"VaultHierarchy... | Gets the session designing vault hierarchies.
return: (osid.authorization.VaultHierarchyDesignSession) - a
``VaultHierarchyDesignSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_vault_hierarchy_design() is
false``
*compliance: optional -- This method must be implemented if
``supports_vault_hierarchy_design()`` is true.* | [
"Gets",
"the",
"session",
"designing",
"vault",
"hierarchies",
"."
] | python | train |
tensorflow/skflow | scripts/docs/docs.py | https://github.com/tensorflow/skflow/blob/f8da498a1abb7562f57dfc7010941578103061b6/scripts/docs/docs.py#L377-L395 | def _write_member_markdown_to_file(self, f, prefix, name, member):
"""Print `member` to `f`."""
if (inspect.isfunction(member) or inspect.ismethod(member) or
isinstance(member, property)):
print("- - -", file=f)
print("", file=f)
self._print_function(f, prefix, name, member)
print("", file=f)
elif inspect.isclass(member):
print("- - -", file=f)
print("", file=f)
print("%s `class %s` {#%s}" % (prefix, name,
_get_anchor(self._module_to_name, name)),
file=f)
print("", file=f)
self._write_class_markdown_to_file(f, name, member)
print("", file=f)
else:
raise RuntimeError("Member %s has unknown type %s" % (name, type(member))) | [
"def",
"_write_member_markdown_to_file",
"(",
"self",
",",
"f",
",",
"prefix",
",",
"name",
",",
"member",
")",
":",
"if",
"(",
"inspect",
".",
"isfunction",
"(",
"member",
")",
"or",
"inspect",
".",
"ismethod",
"(",
"member",
")",
"or",
"isinstance",
"(... | Print `member` to `f`. | [
"Print",
"member",
"to",
"f",
"."
] | python | train |
Devoxin/Lavalink.py | lavalink/PlayerManager.py | https://github.com/Devoxin/Lavalink.py/blob/63f55c3d726d24c4cfd3674d3cd6aab6f5be110d/lavalink/PlayerManager.py#L170-L174 | async def handle_event(self, event):
""" Makes the player play the next song from the queue if a song has finished or an issue occurred. """
if isinstance(event, (TrackStuckEvent, TrackExceptionEvent)) or \
isinstance(event, TrackEndEvent) and event.reason == 'FINISHED':
await self.play() | [
"async",
"def",
"handle_event",
"(",
"self",
",",
"event",
")",
":",
"if",
"isinstance",
"(",
"event",
",",
"(",
"TrackStuckEvent",
",",
"TrackExceptionEvent",
")",
")",
"or",
"isinstance",
"(",
"event",
",",
"TrackEndEvent",
")",
"and",
"event",
".",
"rea... | Makes the player play the next song from the queue if a song has finished or an issue occurred. | [
"Makes",
"the",
"player",
"play",
"the",
"next",
"song",
"from",
"the",
"queue",
"if",
"a",
"song",
"has",
"finished",
"or",
"an",
"issue",
"occurred",
"."
] | python | valid |
jwkvam/bowtie | bowtie/_component.py | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_component.py#L165-L179 | def make_event(event: Callable) -> Callable:
"""Create an event from a method signature."""
@property # type: ignore
@wraps(event)
def actualevent(self): # pylint: disable=missing-docstring
name = event.__name__[3:]
try:
# the getter post processing function
# is preserved with an underscore
getter = event(self).__name__
except AttributeError:
getter = None
return Event(name, self._uuid, getter) # pylint: disable=protected-access
return actualevent | [
"def",
"make_event",
"(",
"event",
":",
"Callable",
")",
"->",
"Callable",
":",
"@",
"property",
"# type: ignore",
"@",
"wraps",
"(",
"event",
")",
"def",
"actualevent",
"(",
"self",
")",
":",
"# pylint: disable=missing-docstring",
"name",
"=",
"event",
".",
... | Create an event from a method signature. | [
"Create",
"an",
"event",
"from",
"a",
"method",
"signature",
"."
] | python | train |
AlpacaDB/selectivesearch | selectivesearch/selectivesearch.py | https://github.com/AlpacaDB/selectivesearch/blob/52f7f83bb247b1ed941b099c6a610da1b0e30451/selectivesearch/selectivesearch.py#L74-L100 | def _calc_colour_hist(img):
"""
calculate colour histogram for each region
the size of output histogram will be BINS * COLOUR_CHANNELS(3)
number of bins is 25 as same as [uijlings_ijcv2013_draft.pdf]
extract HSV
"""
BINS = 25
hist = numpy.array([])
for colour_channel in (0, 1, 2):
# extracting one colour channel
c = img[:, colour_channel]
# calculate histogram for each colour and join to the result
hist = numpy.concatenate(
[hist] + [numpy.histogram(c, BINS, (0.0, 255.0))[0]])
# L1 normalize
hist = hist / len(img)
return hist | [
"def",
"_calc_colour_hist",
"(",
"img",
")",
":",
"BINS",
"=",
"25",
"hist",
"=",
"numpy",
".",
"array",
"(",
"[",
"]",
")",
"for",
"colour_channel",
"in",
"(",
"0",
",",
"1",
",",
"2",
")",
":",
"# extracting one colour channel",
"c",
"=",
"img",
"[... | calculate colour histogram for each region
the size of output histogram will be BINS * COLOUR_CHANNELS(3)
number of bins is 25 as same as [uijlings_ijcv2013_draft.pdf]
extract HSV | [
"calculate",
"colour",
"histogram",
"for",
"each",
"region"
] | python | train |
dhermes/bezier | src/bezier/_surface_helpers.py | https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/_surface_helpers.py#L2826-L2852 | def _evaluate_barycentric_multi(nodes, degree, param_vals, dimension):
r"""Compute multiple points on the surface.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Args:
nodes (numpy.ndarray): Control point nodes that define the surface.
degree (int): The degree of the surface define by ``nodes``.
param_vals (numpy.ndarray): Array of parameter values (as a
``N x 3`` array).
dimension (int): The dimension the surface lives in.
Returns:
numpy.ndarray: The evaluated points, where columns correspond to
rows of ``param_vals`` and the rows to the dimension of the
underlying surface.
"""
num_vals, _ = param_vals.shape
result = np.empty((dimension, num_vals), order="F")
for index, (lambda1, lambda2, lambda3) in enumerate(param_vals):
result[:, index] = evaluate_barycentric(
nodes, degree, lambda1, lambda2, lambda3
)[:, 0]
return result | [
"def",
"_evaluate_barycentric_multi",
"(",
"nodes",
",",
"degree",
",",
"param_vals",
",",
"dimension",
")",
":",
"num_vals",
",",
"_",
"=",
"param_vals",
".",
"shape",
"result",
"=",
"np",
".",
"empty",
"(",
"(",
"dimension",
",",
"num_vals",
")",
",",
... | r"""Compute multiple points on the surface.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Args:
nodes (numpy.ndarray): Control point nodes that define the surface.
degree (int): The degree of the surface define by ``nodes``.
param_vals (numpy.ndarray): Array of parameter values (as a
``N x 3`` array).
dimension (int): The dimension the surface lives in.
Returns:
numpy.ndarray: The evaluated points, where columns correspond to
rows of ``param_vals`` and the rows to the dimension of the
underlying surface. | [
"r",
"Compute",
"multiple",
"points",
"on",
"the",
"surface",
"."
] | python | train |
mikedh/trimesh | trimesh/transformations.py | https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/transformations.py#L1420-L1431 | def quaternion_conjugate(quaternion):
"""Return conjugate of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_conjugate(q0)
>>> q1[0] == q0[0] and all(q1[1:] == -q0[1:])
True
"""
q = np.array(quaternion, dtype=np.float64, copy=True)
np.negative(q[1:], q[1:])
return q | [
"def",
"quaternion_conjugate",
"(",
"quaternion",
")",
":",
"q",
"=",
"np",
".",
"array",
"(",
"quaternion",
",",
"dtype",
"=",
"np",
".",
"float64",
",",
"copy",
"=",
"True",
")",
"np",
".",
"negative",
"(",
"q",
"[",
"1",
":",
"]",
",",
"q",
"[... | Return conjugate of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_conjugate(q0)
>>> q1[0] == q0[0] and all(q1[1:] == -q0[1:])
True | [
"Return",
"conjugate",
"of",
"quaternion",
"."
] | python | train |
BerkeleyAutomation/autolab_core | autolab_core/logger.py | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/logger.py#L41-L57 | def add_root_log_file(log_file):
"""
Add a log file to the root logger.
Parameters
----------
log_file :obj:`str`
The path to the log file.
"""
root_logger = logging.getLogger()
# add a file handle to the root logger
hdlr = logging.FileHandler(log_file)
formatter = logging.Formatter('%(asctime)s %(name)-10s %(levelname)-8s %(message)s', datefmt='%m-%d %H:%M:%S')
hdlr.setFormatter(formatter)
root_logger.addHandler(hdlr)
root_logger.info('Root logger now logging to {}'.format(log_file)) | [
"def",
"add_root_log_file",
"(",
"log_file",
")",
":",
"root_logger",
"=",
"logging",
".",
"getLogger",
"(",
")",
"# add a file handle to the root logger",
"hdlr",
"=",
"logging",
".",
"FileHandler",
"(",
"log_file",
")",
"formatter",
"=",
"logging",
".",
"Formatt... | Add a log file to the root logger.
Parameters
----------
log_file :obj:`str`
The path to the log file. | [
"Add",
"a",
"log",
"file",
"to",
"the",
"root",
"logger",
"."
] | python | train |
JukeboxPipeline/jukebox-core | src/jukeboxcore/gui/widgetdelegate.py | https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgetdelegate.py#L393-L459 | def propagate_event_to_delegate(self, event, eventhandler):
"""Propagate the given Mouse event to the widgetdelegate
Enter edit mode, get the editor widget and issue an event on that widget.
:param event: the mouse event
:type event: :class:`QtGui.QMouseEvent`
:param eventhandler: the eventhandler to use. E.g. ``"mousePressEvent"``
:type eventhandler: str
:returns: None
:rtype: None
:raises: None
"""
# if we are recursing because we sent a click event, and it got propagated to the parents
# and we recieve it again, terminate
if self.__recursing:
return
# find index at mouse position
i = self.index_at_event(event)
# if the index is not valid, we don't care
# handle it the default way
if not i.isValid():
return getattr(super(WidgetDelegateViewMixin, self), eventhandler)(event)
# get the widget delegate. if there is None, handle it the default way
delegate = self.itemDelegate(i)
if not isinstance(delegate, WidgetDelegate):
return getattr(super(WidgetDelegateViewMixin, self), eventhandler)(event)
# see if there is already a editor
widget = delegate.edit_widget(i)
if not widget:
# close all editors, then start editing
delegate.close_editors()
# Force editing. If in editing state, view will refuse editing.
if self.state() == self.EditingState:
self.setState(self.NoState)
self.edit(i)
# get the editor widget. if there is None, there is nothing to do so return
widget = delegate.edit_widget(i)
if not widget:
return getattr(super(WidgetDelegateViewMixin, self), eventhandler)(event)
# try to find the relative position to the widget
pid = self.get_pos_in_delegate(i, event.globalPos())
widgetatpos = widget.childAt(pid)
if widgetatpos:
widgettoclick = widgetatpos
g = widget.mapToGlobal(pid)
clickpos = widgettoclick.mapFromGlobal(g)
else:
widgettoclick = widget
clickpos = pid
# create a new event for the editor widget.
e = QtGui.QMouseEvent(event.type(),
clickpos,
event.button(),
event.buttons(),
event.modifiers())
# before we send, make sure, we cannot recurse
self.__recursing = True
try:
r = QtGui.QApplication.sendEvent(widgettoclick, e)
finally:
self.__recursing = False # out of the recursion. now we can accept click events again
return r | [
"def",
"propagate_event_to_delegate",
"(",
"self",
",",
"event",
",",
"eventhandler",
")",
":",
"# if we are recursing because we sent a click event, and it got propagated to the parents",
"# and we recieve it again, terminate",
"if",
"self",
".",
"__recursing",
":",
"return",
"#... | Propagate the given Mouse event to the widgetdelegate
Enter edit mode, get the editor widget and issue an event on that widget.
:param event: the mouse event
:type event: :class:`QtGui.QMouseEvent`
:param eventhandler: the eventhandler to use. E.g. ``"mousePressEvent"``
:type eventhandler: str
:returns: None
:rtype: None
:raises: None | [
"Propagate",
"the",
"given",
"Mouse",
"event",
"to",
"the",
"widgetdelegate"
] | python | train |
openwisp/netjsonconfig | netjsonconfig/backends/openwrt/converters/interfaces.py | https://github.com/openwisp/netjsonconfig/blob/c23ce9732720856e2f6dc54060db71a8182c7d4b/netjsonconfig/backends/openwrt/converters/interfaces.py#L125-L154 | def __intermediate_bridge(self, interface, i):
"""
converts NetJSON bridge to
UCI intermediate data structure
"""
# ensure type "bridge" is only given to one logical interface
if interface['type'] == 'bridge' and i < 2:
bridge_members = ' '.join(interface.pop('bridge_members'))
# put bridge members in ifname attribute
if bridge_members:
interface['ifname'] = bridge_members
# if no members, this is an empty bridge
else:
interface['bridge_empty'] = True
del interface['ifname']
# bridge has already been defined
# but we need to add more references to it
elif interface['type'] == 'bridge' and i >= 2:
# openwrt adds "br-" prefix to bridge interfaces
# we need to take this into account when referring
# to these physical names
if 'br-' not in interface['ifname']:
interface['ifname'] = 'br-{ifname}'.format(**interface)
# do not repeat bridge attributes (they have already been processed)
for attr in ['type', 'bridge_members', 'stp', 'gateway']:
if attr in interface:
del interface[attr]
elif interface['type'] != 'bridge':
del interface['type']
return interface | [
"def",
"__intermediate_bridge",
"(",
"self",
",",
"interface",
",",
"i",
")",
":",
"# ensure type \"bridge\" is only given to one logical interface",
"if",
"interface",
"[",
"'type'",
"]",
"==",
"'bridge'",
"and",
"i",
"<",
"2",
":",
"bridge_members",
"=",
"' '",
... | converts NetJSON bridge to
UCI intermediate data structure | [
"converts",
"NetJSON",
"bridge",
"to",
"UCI",
"intermediate",
"data",
"structure"
] | python | valid |
python-cas/python-cas | cas.py | https://github.com/python-cas/python-cas/blob/42fc76fbd2e50f167e752eba4bf5b0df74a83978/cas.py#L29-L41 | def verify_logout_request(cls, logout_request, ticket):
"""verifies the single logout request came from the CAS server
returns True if the logout_request is valid, False otherwise
"""
try:
session_index = cls.get_saml_slos(logout_request)
session_index = session_index[0].text
if session_index == ticket:
return True
else:
return False
except (AttributeError, IndexError):
return False | [
"def",
"verify_logout_request",
"(",
"cls",
",",
"logout_request",
",",
"ticket",
")",
":",
"try",
":",
"session_index",
"=",
"cls",
".",
"get_saml_slos",
"(",
"logout_request",
")",
"session_index",
"=",
"session_index",
"[",
"0",
"]",
".",
"text",
"if",
"s... | verifies the single logout request came from the CAS server
returns True if the logout_request is valid, False otherwise | [
"verifies",
"the",
"single",
"logout",
"request",
"came",
"from",
"the",
"CAS",
"server",
"returns",
"True",
"if",
"the",
"logout_request",
"is",
"valid",
"False",
"otherwise"
] | python | train |
Kortemme-Lab/klab | klab/bio/uniprot.py | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/uniprot.py#L542-L560 | def _parse_sequence_tag(self):
'''Parses the sequence and atomic mass.'''
#main_tags = self._dom.getElementsByTagName("uniprot")
#assert(len(main_tags) == 1)
#entry_tags = main_tags[0].getElementsByTagName("entry")
#assert(len(entry_tags) == 1)
#entry_tags[0]
entry_tag = self.entry_tag
# only get sequence tags that are direct children of the entry tag (sequence tags can also be children of entry.comment.conflict)
sequence_tags = [child for child in entry_tag.childNodes if child.nodeType == child.ELEMENT_NODE and child.tagName == 'sequence']
assert(len(sequence_tags) == 1)
sequence_tag = sequence_tags[0]
# atomic mass, sequence, CRC64 digest
self.atomic_mass = float(sequence_tag.getAttribute("mass"))
self.sequence = "".join(sequence_tag.firstChild.nodeValue.strip().split("\n"))
self.sequence_length = int(sequence_tag.getAttribute("length"))
self.CRC64Digest = sequence_tag.getAttribute("checksum") | [
"def",
"_parse_sequence_tag",
"(",
"self",
")",
":",
"#main_tags = self._dom.getElementsByTagName(\"uniprot\")",
"#assert(len(main_tags) == 1)",
"#entry_tags = main_tags[0].getElementsByTagName(\"entry\")",
"#assert(len(entry_tags) == 1)",
"#entry_tags[0]",
"entry_tag",
"=",
"self",
".",... | Parses the sequence and atomic mass. | [
"Parses",
"the",
"sequence",
"and",
"atomic",
"mass",
"."
] | python | train |
senaite/senaite.core | bika/lims/content/analysis.py | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/analysis.py#L43-L70 | def getSiblings(self, retracted=False):
"""
Returns the list of analyses of the Analysis Request to which this
analysis belongs to, but with the current analysis excluded.
:param retracted: If false, retracted/rejected siblings are dismissed
:type retracted: bool
:return: list of siblings for this analysis
:rtype: list of IAnalysis
"""
request = self.getRequest()
if not request:
return []
siblings = []
retracted_states = [STATE_RETRACTED, STATE_REJECTED]
for sibling in request.getAnalyses(full_objects=True):
if api.get_uid(sibling) == self.UID():
# Exclude me from the list
continue
if not retracted:
if api.get_workflow_status_of(sibling) in retracted_states:
# Exclude retracted analyses
continue
siblings.append(sibling)
return siblings | [
"def",
"getSiblings",
"(",
"self",
",",
"retracted",
"=",
"False",
")",
":",
"request",
"=",
"self",
".",
"getRequest",
"(",
")",
"if",
"not",
"request",
":",
"return",
"[",
"]",
"siblings",
"=",
"[",
"]",
"retracted_states",
"=",
"[",
"STATE_RETRACTED",... | Returns the list of analyses of the Analysis Request to which this
analysis belongs to, but with the current analysis excluded.
:param retracted: If false, retracted/rejected siblings are dismissed
:type retracted: bool
:return: list of siblings for this analysis
:rtype: list of IAnalysis | [
"Returns",
"the",
"list",
"of",
"analyses",
"of",
"the",
"Analysis",
"Request",
"to",
"which",
"this",
"analysis",
"belongs",
"to",
"but",
"with",
"the",
"current",
"analysis",
"excluded",
".",
":",
"param",
"retracted",
":",
"If",
"false",
"retracted",
"/",... | python | train |
ibis-project/ibis | ibis/pandas/udf.py | https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/pandas/udf.py#L433-L527 | def _grouped(input_type, output_type, base_class, output_type_method):
"""Define a user-defined function that is applied per group.
Parameters
----------
input_type : List[ibis.expr.datatypes.DataType]
A list of the types found in :mod:`~ibis.expr.datatypes`. The
length of this list must match the number of arguments to the
function. Variadic arguments are not yet supported.
output_type : ibis.expr.datatypes.DataType
The return type of the function.
base_class : Type[T]
The base class of the generated Node
output_type_method : Callable
A callable that determines the method to call to get the expression
type of the UDF
See Also
--------
ibis.pandas.udf.reduction
ibis.pandas.udf.analytic
"""
def wrapper(func):
funcsig = valid_function_signature(input_type, func)
UDAFNode = type(
func.__name__,
(base_class,),
{
'signature': sig.TypeSignature.from_dtypes(input_type),
'output_type': output_type_method(output_type),
},
)
# An execution rule for a simple aggregate node
@execute_node.register(
UDAFNode, *udf_signature(input_type, pin=None, klass=pd.Series)
)
def execute_udaf_node(op, *args, **kwargs):
args, kwargs = arguments_from_signature(
funcsig, *args, **kwargs
)
return func(*args, **kwargs)
# An execution rule for a grouped aggregation node. This
# includes aggregates applied over a window.
nargs = len(input_type)
group_by_signatures = [
udf_signature(input_type, pin=pin, klass=SeriesGroupBy)
for pin in range(nargs)
]
@toolz.compose(
*(
execute_node.register(UDAFNode, *types)
for types in group_by_signatures
)
)
def execute_udaf_node_groupby(op, *args, **kwargs):
# construct a generator that yields the next group of data
# for every argument excluding the first (pandas performs
# the iteration for the first argument) for each argument
# that is a SeriesGroupBy.
#
# If the argument is not a SeriesGroupBy then keep
# repeating it until all groups are exhausted.
aggcontext = kwargs.pop('aggcontext', None)
assert aggcontext is not None, 'aggcontext is None'
iters = (
(data for _, data in arg)
if isinstance(arg, SeriesGroupBy)
else itertools.repeat(arg)
for arg in args[1:]
)
funcsig = signature(func)
def aggregator(first, *rest, **kwargs):
# map(next, *rest) gets the inputs for the next group
# TODO: might be inefficient to do this on every call
args, kwargs = arguments_from_signature(
funcsig, first, *map(next, rest), **kwargs
)
return func(*args, **kwargs)
result = aggcontext.agg(args[0], aggregator, *iters, **kwargs)
return result
@functools.wraps(func)
def wrapped(*args):
return UDAFNode(*args).to_expr()
return wrapped
return wrapper | [
"def",
"_grouped",
"(",
"input_type",
",",
"output_type",
",",
"base_class",
",",
"output_type_method",
")",
":",
"def",
"wrapper",
"(",
"func",
")",
":",
"funcsig",
"=",
"valid_function_signature",
"(",
"input_type",
",",
"func",
")",
"UDAFNode",
"=",
"type",... | Define a user-defined function that is applied per group.
Parameters
----------
input_type : List[ibis.expr.datatypes.DataType]
A list of the types found in :mod:`~ibis.expr.datatypes`. The
length of this list must match the number of arguments to the
function. Variadic arguments are not yet supported.
output_type : ibis.expr.datatypes.DataType
The return type of the function.
base_class : Type[T]
The base class of the generated Node
output_type_method : Callable
A callable that determines the method to call to get the expression
type of the UDF
See Also
--------
ibis.pandas.udf.reduction
ibis.pandas.udf.analytic | [
"Define",
"a",
"user",
"-",
"defined",
"function",
"that",
"is",
"applied",
"per",
"group",
"."
] | python | train |
cjdrake/pyeda | pyeda/boolalg/expr.py | https://github.com/cjdrake/pyeda/blob/554ee53aa678f4b61bcd7e07ba2c74ddc749d665/pyeda/boolalg/expr.py#L1386-L1395 | def satisfy_all(self, **params):
"""Iterate through all satisfying input points."""
verbosity = params.get('verbosity', 0)
default_phase = params.get('default_phase', 2)
propagation_limit = params.get('propagation_limit', -1)
decision_limit = params.get('decision_limit', -1)
seed = params.get('seed', 1)
yield from picosat.satisfy_all(self.nvars, self.clauses, verbosity,
default_phase, propagation_limit,
decision_limit, seed) | [
"def",
"satisfy_all",
"(",
"self",
",",
"*",
"*",
"params",
")",
":",
"verbosity",
"=",
"params",
".",
"get",
"(",
"'verbosity'",
",",
"0",
")",
"default_phase",
"=",
"params",
".",
"get",
"(",
"'default_phase'",
",",
"2",
")",
"propagation_limit",
"=",
... | Iterate through all satisfying input points. | [
"Iterate",
"through",
"all",
"satisfying",
"input",
"points",
"."
] | python | train |
AkihikoITOH/capybara | capybara/virtualenv/lib/python2.7/site-packages/lxml/html/html5parser.py | https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/lxml/html/html5parser.py#L95-L133 | def fragment_fromstring(html, create_parent=False,
guess_charset=False, parser=None):
"""Parses a single HTML element; it is an error if there is more than
one element, or if anything but whitespace precedes or follows the
element.
If create_parent is true (or is a tag name) then a parent node
will be created to encapsulate the HTML in a single element. In
this case, leading or trailing text is allowed.
"""
if not isinstance(html, _strings):
raise TypeError('string required')
accept_leading_text = bool(create_parent)
elements = fragments_fromstring(
html, guess_charset=guess_charset, parser=parser,
no_leading_text=not accept_leading_text)
if create_parent:
if not isinstance(create_parent, _strings):
create_parent = 'div'
new_root = Element(create_parent)
if elements:
if isinstance(elements[0], _strings):
new_root.text = elements[0]
del elements[0]
new_root.extend(elements)
return new_root
if not elements:
raise etree.ParserError('No elements found')
if len(elements) > 1:
raise etree.ParserError('Multiple elements found')
result = elements[0]
if result.tail and result.tail.strip():
raise etree.ParserError('Element followed by text: %r' % result.tail)
result.tail = None
return result | [
"def",
"fragment_fromstring",
"(",
"html",
",",
"create_parent",
"=",
"False",
",",
"guess_charset",
"=",
"False",
",",
"parser",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"html",
",",
"_strings",
")",
":",
"raise",
"TypeError",
"(",
"'string ... | Parses a single HTML element; it is an error if there is more than
one element, or if anything but whitespace precedes or follows the
element.
If create_parent is true (or is a tag name) then a parent node
will be created to encapsulate the HTML in a single element. In
this case, leading or trailing text is allowed. | [
"Parses",
"a",
"single",
"HTML",
"element",
";",
"it",
"is",
"an",
"error",
"if",
"there",
"is",
"more",
"than",
"one",
"element",
"or",
"if",
"anything",
"but",
"whitespace",
"precedes",
"or",
"follows",
"the",
"element",
"."
] | python | test |
tcalmant/ipopo | pelix/shell/core.py | https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/shell/core.py#L268-L289 | def unbind_handler(self, svc_ref):
"""
Called if a command service is gone.
Unregisters its commands.
:param svc_ref: A reference to the unbound service
:return: True if the commands have been unregistered
"""
if svc_ref not in self._bound_references:
# Unknown reference
return False
# Unregister its commands
namespace, commands = self._reference_commands[svc_ref]
for command in commands:
self.unregister(namespace, command)
# Release the service
self._context.unget_service(svc_ref)
del self._bound_references[svc_ref]
del self._reference_commands[svc_ref]
return True | [
"def",
"unbind_handler",
"(",
"self",
",",
"svc_ref",
")",
":",
"if",
"svc_ref",
"not",
"in",
"self",
".",
"_bound_references",
":",
"# Unknown reference",
"return",
"False",
"# Unregister its commands",
"namespace",
",",
"commands",
"=",
"self",
".",
"_reference_... | Called if a command service is gone.
Unregisters its commands.
:param svc_ref: A reference to the unbound service
:return: True if the commands have been unregistered | [
"Called",
"if",
"a",
"command",
"service",
"is",
"gone",
".",
"Unregisters",
"its",
"commands",
"."
] | python | train |
h2oai/h2o-3 | h2o-py/h2o/estimators/estimator_base.py | https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/estimators/estimator_base.py#L353-L369 | def get_params(self, deep=True):
"""
Obtain parameters for this estimator.
Used primarily for sklearn Pipelines and sklearn grid search.
:param deep: If True, return parameters of all sub-objects that are estimators.
:returns: A dict of parameters
"""
out = dict()
for key, value in self.parms.items():
if deep and isinstance(value, H2OEstimator):
deep_items = list(value.get_params().items())
out.update((key + "__" + k, val) for k, val in deep_items)
out[key] = value
return out | [
"def",
"get_params",
"(",
"self",
",",
"deep",
"=",
"True",
")",
":",
"out",
"=",
"dict",
"(",
")",
"for",
"key",
",",
"value",
"in",
"self",
".",
"parms",
".",
"items",
"(",
")",
":",
"if",
"deep",
"and",
"isinstance",
"(",
"value",
",",
"H2OEst... | Obtain parameters for this estimator.
Used primarily for sklearn Pipelines and sklearn grid search.
:param deep: If True, return parameters of all sub-objects that are estimators.
:returns: A dict of parameters | [
"Obtain",
"parameters",
"for",
"this",
"estimator",
"."
] | python | test |
eisber/sarplus | python/pysarplus/SARPlus.py | https://github.com/eisber/sarplus/blob/945a1182e00a8bf70414fc3600086316701777f9/python/pysarplus/SARPlus.py#L323-L362 | def recommend_k_items_slow(self, test, top_k=10, remove_seen=True):
"""Recommend top K items for all users which are in the test set.
Args:
test: test Spark dataframe
top_k: top n items to return
remove_seen: remove items test users have already seen in the past from the recommended set.
"""
# TODO: remove seen
if remove_seen:
raise ValueError("Not implemented")
self.get_user_affinity(test)\
.write.mode("overwrite")\
.saveAsTable(self.f("{prefix}user_affinity"))
# user_affinity * item_similarity
# filter top-k
query = self.f(
"""
SELECT {col_user}, {col_item}, score
FROM
(
SELECT df.{col_user},
S.i2 {col_item},
SUM(df.{col_rating} * S.value) AS score,
row_number() OVER(PARTITION BY {col_user} ORDER BY SUM(df.{col_rating} * S.value) DESC) rank
FROM
{prefix}user_affinity df,
{prefix}item_similarity S
WHERE df.{col_item} = S.i1
GROUP BY df.{col_user}, S.i2
)
WHERE rank <= {top_k}
""",
top_k=top_k,
)
return self.spark.sql(query) | [
"def",
"recommend_k_items_slow",
"(",
"self",
",",
"test",
",",
"top_k",
"=",
"10",
",",
"remove_seen",
"=",
"True",
")",
":",
"# TODO: remove seen",
"if",
"remove_seen",
":",
"raise",
"ValueError",
"(",
"\"Not implemented\"",
")",
"self",
".",
"get_user_affinit... | Recommend top K items for all users which are in the test set.
Args:
test: test Spark dataframe
top_k: top n items to return
remove_seen: remove items test users have already seen in the past from the recommended set. | [
"Recommend",
"top",
"K",
"items",
"for",
"all",
"users",
"which",
"are",
"in",
"the",
"test",
"set",
"."
] | python | test |
saltstack/salt | salt/modules/win_wua.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_wua.py#L331-L479 | def list(software=True,
drivers=False,
summary=False,
skip_installed=True,
categories=None,
severities=None,
download=False,
install=False):
'''
.. versionadded:: 2017.7.0
Returns a detailed list of available updates or a summary. If download or
install is True the same list will be downloaded and/or installed.
Args:
software (bool):
Include software updates in the results (default is True)
drivers (bool):
Include driver updates in the results (default is False)
summary (bool):
- True: Return a summary of updates available for each category.
- False (default): Return a detailed list of available updates.
skip_installed (bool):
Skip installed updates in the results (default is False)
download (bool):
(Overrides reporting functionality) Download the list of updates
returned by this function. Run this function first with
``download=False`` to see what will be downloaded, then set
``download=True`` to download the updates.
install (bool):
(Overrides reporting functionality) Install the list of updates
returned by this function. Run this function first with
``install=False`` to see what will be installed, then set
``install=True`` to install the updates.
categories (list):
Specify the categories to list. Must be passed as a list. All
categories returned by default.
Categories include the following:
* Critical Updates
* Definition Updates
* Drivers (make sure you set drivers=True)
* Feature Packs
* Security Updates
* Update Rollups
* Updates
* Update Rollups
* Windows 7
* Windows 8.1
* Windows 8.1 drivers
* Windows 8.1 and later drivers
* Windows Defender
severities (list):
Specify the severities to include. Must be passed as a list. All
severities returned by default.
Severities include the following:
* Critical
* Important
Returns:
dict: Returns a dict containing either a summary or a list of updates:
.. code-block:: cfg
List of Updates:
{'<GUID>': {'Title': <title>,
'KB': <KB>,
'GUID': <the globally unique identifier for the update>
'Description': <description>,
'Downloaded': <has the update been downloaded>,
'Installed': <has the update been installed>,
'Mandatory': <is the update mandatory>,
'UserInput': <is user input required>,
'EULAAccepted': <has the EULA been accepted>,
'Severity': <update severity>,
'NeedsReboot': <is the update installed and awaiting reboot>,
'RebootBehavior': <will the update require a reboot>,
'Categories': [ '<category 1>',
'<category 2>',
...]
}
}
Summary of Updates:
{'Total': <total number of updates returned>,
'Available': <updates that are not downloaded or installed>,
'Downloaded': <updates that are downloaded but not installed>,
'Installed': <updates installed (usually 0 unless installed=True)>,
'Categories': { <category 1>: <total for that category>,
<category 2>: <total for category 2>,
... }
}
CLI Examples:
.. code-block:: bash
# Normal Usage (list all software updates)
salt '*' win_wua.list
# List all updates with categories of Critical Updates and Drivers
salt '*' win_wua.list categories=['Critical Updates','Drivers']
# List all Critical Security Updates
salt '*' win_wua.list categories=['Security Updates'] severities=['Critical']
# List all updates with a severity of Critical
salt '*' win_wua.list severities=['Critical']
# A summary of all available updates
salt '*' win_wua.list summary=True
# A summary of all Feature Packs and Windows 8.1 Updates
salt '*' win_wua.list categories=['Feature Packs','Windows 8.1'] summary=True
'''
# Create a Windows Update Agent instance
wua = salt.utils.win_update.WindowsUpdateAgent()
# Search for Update
updates = wua.available(skip_installed=skip_installed, software=software,
drivers=drivers, categories=categories,
severities=severities)
ret = {}
# Download
if download or install:
ret['Download'] = wua.download(updates)
# Install
if install:
ret['Install'] = wua.install(updates)
if not ret:
return updates.summary() if summary else updates.list()
return ret | [
"def",
"list",
"(",
"software",
"=",
"True",
",",
"drivers",
"=",
"False",
",",
"summary",
"=",
"False",
",",
"skip_installed",
"=",
"True",
",",
"categories",
"=",
"None",
",",
"severities",
"=",
"None",
",",
"download",
"=",
"False",
",",
"install",
... | .. versionadded:: 2017.7.0
Returns a detailed list of available updates or a summary. If download or
install is True the same list will be downloaded and/or installed.
Args:
software (bool):
Include software updates in the results (default is True)
drivers (bool):
Include driver updates in the results (default is False)
summary (bool):
- True: Return a summary of updates available for each category.
- False (default): Return a detailed list of available updates.
skip_installed (bool):
Skip installed updates in the results (default is False)
download (bool):
(Overrides reporting functionality) Download the list of updates
returned by this function. Run this function first with
``download=False`` to see what will be downloaded, then set
``download=True`` to download the updates.
install (bool):
(Overrides reporting functionality) Install the list of updates
returned by this function. Run this function first with
``install=False`` to see what will be installed, then set
``install=True`` to install the updates.
categories (list):
Specify the categories to list. Must be passed as a list. All
categories returned by default.
Categories include the following:
* Critical Updates
* Definition Updates
* Drivers (make sure you set drivers=True)
* Feature Packs
* Security Updates
* Update Rollups
* Updates
* Update Rollups
* Windows 7
* Windows 8.1
* Windows 8.1 drivers
* Windows 8.1 and later drivers
* Windows Defender
severities (list):
Specify the severities to include. Must be passed as a list. All
severities returned by default.
Severities include the following:
* Critical
* Important
Returns:
dict: Returns a dict containing either a summary or a list of updates:
.. code-block:: cfg
List of Updates:
{'<GUID>': {'Title': <title>,
'KB': <KB>,
'GUID': <the globally unique identifier for the update>
'Description': <description>,
'Downloaded': <has the update been downloaded>,
'Installed': <has the update been installed>,
'Mandatory': <is the update mandatory>,
'UserInput': <is user input required>,
'EULAAccepted': <has the EULA been accepted>,
'Severity': <update severity>,
'NeedsReboot': <is the update installed and awaiting reboot>,
'RebootBehavior': <will the update require a reboot>,
'Categories': [ '<category 1>',
'<category 2>',
...]
}
}
Summary of Updates:
{'Total': <total number of updates returned>,
'Available': <updates that are not downloaded or installed>,
'Downloaded': <updates that are downloaded but not installed>,
'Installed': <updates installed (usually 0 unless installed=True)>,
'Categories': { <category 1>: <total for that category>,
<category 2>: <total for category 2>,
... }
}
CLI Examples:
.. code-block:: bash
# Normal Usage (list all software updates)
salt '*' win_wua.list
# List all updates with categories of Critical Updates and Drivers
salt '*' win_wua.list categories=['Critical Updates','Drivers']
# List all Critical Security Updates
salt '*' win_wua.list categories=['Security Updates'] severities=['Critical']
# List all updates with a severity of Critical
salt '*' win_wua.list severities=['Critical']
# A summary of all available updates
salt '*' win_wua.list summary=True
# A summary of all Feature Packs and Windows 8.1 Updates
salt '*' win_wua.list categories=['Feature Packs','Windows 8.1'] summary=True | [
"..",
"versionadded",
"::",
"2017",
".",
"7",
".",
"0"
] | python | train |
SoCo/SoCo | dev_tools/analyse_ws.py | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/dev_tools/analyse_ws.py#L127-L159 | def _parse_load(self, load):
""" Parse the load from a single packet """
# If the load is ??
if load in ['??']:
self._debug('IGNORING')
# If there is a start in load
elif any([start in load for start in STARTS]):
self._debug('START')
self.messages.append(WSPart(load, self.args))
# and there is also an end
if any([end in load for end in ENDS]):
self.messages[-1].finalize_content()
self._debug('AND END')
# If there is an end in load
elif any([end in load for end in ENDS]):
# If there is an open WSPart
if len(self.messages) > 0 and not\
self.messages[-1].write_closed:
self._debug('END ON OPEN FILE')
self.messages[-1].add_content(load)
self.messages[-1].finalize_content()
# Ignore ends before start
else:
self._debug('END BUT NO OPEN FILE')
else:
# If there is an open WSPart
if len(self.messages) > 0 and not\
self.messages[-1].write_closed:
self._debug('ADD TO OPEN FILE')
self.messages[-1].add_content(load)
# else ignore
else:
self._debug('NOTHING TO DO') | [
"def",
"_parse_load",
"(",
"self",
",",
"load",
")",
":",
"# If the load is ??",
"if",
"load",
"in",
"[",
"'??'",
"]",
":",
"self",
".",
"_debug",
"(",
"'IGNORING'",
")",
"# If there is a start in load",
"elif",
"any",
"(",
"[",
"start",
"in",
"load",
"for... | Parse the load from a single packet | [
"Parse",
"the",
"load",
"from",
"a",
"single",
"packet"
] | python | train |
bububa/pyTOP | pyTOP/trade.py | https://github.com/bububa/pyTOP/blob/1e48009bcfe886be392628244b370e6374e1f2b2/pyTOP/trade.py#L253-L266 | def bought_get(self, session, fields=[], **kwargs):
'''taobao.trades.bought.get 搜索当前会话用户作为买家达成的交易记录
搜索当前会话用户作为买家达成的交易记录(目前只能查询三个月以内的订单)'''
request = TOPRequest('taobao.trades.bought.get')
if not fields:
trade = Trade()
fields = trade.fields
request['fields'] = fields
for k, v in kwargs.iteritems():
if k not in ('start_created','end_created','status','seller_nick','type','page_no','page_size','rate_status') or v==None: continue
request[k] = v
self.create(self.execute(request, session))
return self.trades | [
"def",
"bought_get",
"(",
"self",
",",
"session",
",",
"fields",
"=",
"[",
"]",
",",
"*",
"*",
"kwargs",
")",
":",
"request",
"=",
"TOPRequest",
"(",
"'taobao.trades.bought.get'",
")",
"if",
"not",
"fields",
":",
"trade",
"=",
"Trade",
"(",
")",
"field... | taobao.trades.bought.get 搜索当前会话用户作为买家达成的交易记录
搜索当前会话用户作为买家达成的交易记录(目前只能查询三个月以内的订单) | [
"taobao",
".",
"trades",
".",
"bought",
".",
"get",
"搜索当前会话用户作为买家达成的交易记录",
"搜索当前会话用户作为买家达成的交易记录",
"(",
"目前只能查询三个月以内的订单",
")"
] | python | train |
horazont/aioxmpp | aioxmpp/bookmarks/service.py | https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/bookmarks/service.py#L141-L265 | def _diff_emit_update(self, new_bookmarks):
"""
Diff the bookmark cache and the new bookmark state, emit signals as
needed and set the bookmark cache to the new data.
"""
self.logger.debug("diffing %s, %s", self._bookmark_cache,
new_bookmarks)
def subdivide(level, old, new):
"""
Subdivide the bookmarks according to the data item
``bookmark.secondary[level]`` and emit the appropriate
events.
"""
if len(old) == len(new) == 1:
old_entry = old.pop()
new_entry = new.pop()
if old_entry == new_entry:
pass
else:
self.on_bookmark_changed(old_entry, new_entry)
return ([], [])
elif len(old) == 0:
return ([], new)
elif len(new) == 0:
return (old, [])
else:
try:
groups = {}
for entry in old:
group = groups.setdefault(
entry.secondary[level],
([], [])
)
group[0].append(entry)
for entry in new:
group = groups.setdefault(
entry.secondary[level],
([], [])
)
group[1].append(entry)
except IndexError:
# the classification is exhausted, this means
# all entries in this bin are equal by the
# defininition of bookmark equivalence!
common = min(len(old), len(new))
assert old[:common] == new[:common]
return (old[common:], new[common:])
old_unhandled, new_unhandled = [], []
for old, new in groups.values():
unhandled = subdivide(level+1, old, new)
old_unhandled += unhandled[0]
new_unhandled += unhandled[1]
# match up unhandleds as changes as early as possible
i = -1
for i, (old_entry, new_entry) in enumerate(
zip(old_unhandled, new_unhandled)):
self.logger.debug("changed %s -> %s", old_entry, new_entry)
self.on_bookmark_changed(old_entry, new_entry)
i += 1
return old_unhandled[i:], new_unhandled[i:]
# group the bookmarks into groups whose elements may transform
# among one another by on_bookmark_changed events. This information
# is given by the type of the bookmark and the .primary property
changable_groups = {}
for item in self._bookmark_cache:
group = changable_groups.setdefault(
(type(item), item.primary),
([], [])
)
group[0].append(item)
for item in new_bookmarks:
group = changable_groups.setdefault(
(type(item), item.primary),
([], [])
)
group[1].append(item)
for old, new in changable_groups.values():
# the first branches are fast paths which should catch
# most cases – especially all cases where each bare jid of
# a conference bookmark or each url of an url bookmark is
# only used in one bookmark
if len(old) == len(new) == 1:
old_entry = old.pop()
new_entry = new.pop()
if old_entry == new_entry:
# the bookmark is unchanged, do not emit an event
pass
else:
self.logger.debug("changed %s -> %s", old_entry, new_entry)
self.on_bookmark_changed(old_entry, new_entry)
elif len(new) == 0:
for removed in old:
self.logger.debug("removed %s", removed)
self.on_bookmark_removed(removed)
elif len(old) == 0:
for added in new:
self.logger.debug("added %s", added)
self.on_bookmark_added(added)
else:
old, new = subdivide(0, old, new)
assert len(old) == 0 or len(new) == 0
for removed in old:
self.logger.debug("removed %s", removed)
self.on_bookmark_removed(removed)
for added in new:
self.logger.debug("added %s", added)
self.on_bookmark_added(added)
self._bookmark_cache = new_bookmarks | [
"def",
"_diff_emit_update",
"(",
"self",
",",
"new_bookmarks",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"diffing %s, %s\"",
",",
"self",
".",
"_bookmark_cache",
",",
"new_bookmarks",
")",
"def",
"subdivide",
"(",
"level",
",",
"old",
",",
"new",... | Diff the bookmark cache and the new bookmark state, emit signals as
needed and set the bookmark cache to the new data. | [
"Diff",
"the",
"bookmark",
"cache",
"and",
"the",
"new",
"bookmark",
"state",
"emit",
"signals",
"as",
"needed",
"and",
"set",
"the",
"bookmark",
"cache",
"to",
"the",
"new",
"data",
"."
] | python | train |
libtcod/python-tcod | tcod/console.py | https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/console.py#L479-L527 | def print_rect(
self,
x: int,
y: int,
width: int,
height: int,
string: str,
bg_blend: int = tcod.constants.BKGND_DEFAULT,
alignment: Optional[int] = None,
) -> int:
"""Print a string constrained to a rectangle.
If h > 0 and the bottom of the rectangle is reached,
the string is truncated. If h = 0,
the string is only truncated if it reaches the bottom of the console.
Args:
x (int): The x coordinate from the left.
y (int): The y coordinate from the top.
width (int): Maximum width to render the text.
height (int): Maximum lines to render the text.
string (str): A Unicode string.
bg_blend (int): Background blending flag.
alignment (Optional[int]): Alignment flag.
Returns:
int: The number of lines of text once word-wrapped.
.. deprecated:: 8.5
Console methods which depend on console defaults have been
deprecated.
Use :any:`Console.print_box` instead, calling this function will
print a warning detailing which default values need to be made
explicit.
"""
self.__deprecate_defaults("print_box", bg_blend, alignment)
alignment = self.default_alignment if alignment is None else alignment
return int(
lib.TCOD_console_printf_rect_ex(
self.console_c,
x,
y,
width,
height,
bg_blend,
alignment,
_fmt(string),
)
) | [
"def",
"print_rect",
"(",
"self",
",",
"x",
":",
"int",
",",
"y",
":",
"int",
",",
"width",
":",
"int",
",",
"height",
":",
"int",
",",
"string",
":",
"str",
",",
"bg_blend",
":",
"int",
"=",
"tcod",
".",
"constants",
".",
"BKGND_DEFAULT",
",",
"... | Print a string constrained to a rectangle.
If h > 0 and the bottom of the rectangle is reached,
the string is truncated. If h = 0,
the string is only truncated if it reaches the bottom of the console.
Args:
x (int): The x coordinate from the left.
y (int): The y coordinate from the top.
width (int): Maximum width to render the text.
height (int): Maximum lines to render the text.
string (str): A Unicode string.
bg_blend (int): Background blending flag.
alignment (Optional[int]): Alignment flag.
Returns:
int: The number of lines of text once word-wrapped.
.. deprecated:: 8.5
Console methods which depend on console defaults have been
deprecated.
Use :any:`Console.print_box` instead, calling this function will
print a warning detailing which default values need to be made
explicit. | [
"Print",
"a",
"string",
"constrained",
"to",
"a",
"rectangle",
"."
] | python | train |
opengridcc/opengrid | opengrid/library/regression.py | https://github.com/opengridcc/opengrid/blob/69b8da3c8fcea9300226c45ef0628cd6d4307651/opengrid/library/regression.py#L116-L155 | def _do_analysis_no_cross_validation(self):
"""
Find the best model (fit) and create self.list_of_fits and self.fit
"""
# first model is just the mean
response_term = [Term([LookupFactor(self.y)])]
model_terms = [Term([])] # empty term is the intercept
all_model_terms_dict = {x:Term([LookupFactor(x)]) for x in self.list_of_x}
# ...then add another term for each candidate
#model_terms += [Term([LookupFactor(c)]) for c in candidates]
model_desc = ModelDesc(response_term, model_terms)
self._list_of_fits.append(fm.ols(model_desc, data=self.df).fit())
# try to improve the model until no improvements can be found
while all_model_terms_dict:
# try each x and overwrite the best_fit if we find a better one
# the first best_fit is the one from the previous round
ref_fit = self._list_of_fits[-1]
best_fit = self._list_of_fits[-1]
best_bic = best_fit.bic
for x, term in all_model_terms_dict.items():
# make new_fit, compare with best found so far
model_desc = ModelDesc(response_term, ref_fit.model.formula.rhs_termlist + [term])
fit = fm.ols(model_desc, data=self.df).fit()
if fit.bic < best_bic:
best_bic = fit.bic
best_fit = fit
best_x = x
# Sometimes, the obtained fit may be better, but contains unsignificant parameters.
# Correct the fit by removing the unsignificant parameters and estimate again
best_fit = self._prune(best_fit, p_max=self.p_max)
# if best_fit does not contain more variables than ref fit, exit
if len(best_fit.model.formula.rhs_termlist) == len(ref_fit.model.formula.rhs_termlist):
break
else:
self._list_of_fits.append(best_fit)
all_model_terms_dict.pop(best_x)
self._fit = self._list_of_fits[-1] | [
"def",
"_do_analysis_no_cross_validation",
"(",
"self",
")",
":",
"# first model is just the mean",
"response_term",
"=",
"[",
"Term",
"(",
"[",
"LookupFactor",
"(",
"self",
".",
"y",
")",
"]",
")",
"]",
"model_terms",
"=",
"[",
"Term",
"(",
"[",
"]",
")",
... | Find the best model (fit) and create self.list_of_fits and self.fit | [
"Find",
"the",
"best",
"model",
"(",
"fit",
")",
"and",
"create",
"self",
".",
"list_of_fits",
"and",
"self",
".",
"fit"
] | python | train |
kubernetes-client/python | kubernetes/client/apis/core_v1_api.py | https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/core_v1_api.py#L13847-L13873 | def list_replication_controller_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind ReplicationController
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_replication_controller_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1ReplicationControllerList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_replication_controller_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_replication_controller_for_all_namespaces_with_http_info(**kwargs)
return data | [
"def",
"list_replication_controller_for_all_namespaces",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"list_replicati... | list or watch objects of kind ReplicationController
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_replication_controller_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1ReplicationControllerList
If the method is called asynchronously,
returns the request thread. | [
"list",
"or",
"watch",
"objects",
"of",
"kind",
"ReplicationController",
"This",
"method",
"makes",
"a",
"synchronous",
"HTTP",
"request",
"by",
"default",
".",
"To",
"make",
"an",
"asynchronous",
"HTTP",
"request",
"please",
"pass",
"async_req",
"=",
"True",
... | python | train |
enkore/i3pystatus | i3pystatus/weather/wunderground.py | https://github.com/enkore/i3pystatus/blob/14cfde967cecf79b40e223e35a04600f4c875af7/i3pystatus/weather/wunderground.py#L195-L289 | def check_weather(self):
'''
Query the configured/queried station and return the weather data
'''
if self.station_id is None:
# Failed to get the nearest station ID when first launched, so
# retry it.
self.get_station_id()
self.data['update_error'] = ''
try:
query_url = STATION_QUERY_URL % (self.api_key,
'conditions',
self.station_id)
try:
response = self.api_request(query_url)['current_observation']
self.forecast_url = response.pop('ob_url', None)
except KeyError:
self.logger.error('No weather data found for %s', self.station_id)
self.data['update_error'] = self.update_error
return
if self.forecast:
query_url = STATION_QUERY_URL % (self.api_key,
'forecast',
self.station_id)
try:
forecast = self.api_request(query_url)['forecast']
forecast = forecast['simpleforecast']['forecastday'][0]
except (KeyError, IndexError, TypeError):
self.logger.error(
'No forecast data found for %s', self.station_id)
# This is a non-fatal error, so don't return but do set the
# error flag.
self.data['update_error'] = self.update_error
unit = 'celsius' if self.units == 'metric' else 'fahrenheit'
low_temp = forecast.get('low', {}).get(unit, '')
high_temp = forecast.get('high', {}).get(unit, '')
else:
low_temp = high_temp = ''
if self.units == 'metric':
temp_unit = 'c'
speed_unit = 'kph'
distance_unit = 'km'
pressure_unit = 'mb'
else:
temp_unit = 'f'
speed_unit = 'mph'
distance_unit = 'mi'
pressure_unit = 'in'
def _find(key, data=None, default=''):
if data is None:
data = response
return str(data.get(key, default))
try:
observation_epoch = _find('observation_epoch') or _find('local_epoch')
observation_time = datetime.fromtimestamp(int(observation_epoch))
except (TypeError, ValueError):
log.debug(
'Observation time \'%s\' is not a UNIX timestamp',
observation_epoch
)
observation_time = datetime.fromtimestamp(0)
self.data['city'] = _find('city', response['observation_location'])
self.data['condition'] = _find('weather')
self.data['observation_time'] = observation_time
self.data['current_temp'] = _find('temp_' + temp_unit).split('.')[0]
self.data['low_temp'] = low_temp
self.data['high_temp'] = high_temp
self.data['temp_unit'] = '°' + temp_unit.upper()
self.data['feelslike'] = _find('feelslike_' + temp_unit)
self.data['dewpoint'] = _find('dewpoint_' + temp_unit)
self.data['wind_speed'] = _find('wind_' + speed_unit)
self.data['wind_unit'] = speed_unit
self.data['wind_direction'] = _find('wind_dir')
self.data['wind_gust'] = _find('wind_gust_' + speed_unit)
self.data['pressure'] = _find('pressure_' + pressure_unit)
self.data['pressure_unit'] = pressure_unit
self.data['pressure_trend'] = _find('pressure_trend')
self.data['visibility'] = _find('visibility_' + distance_unit)
self.data['visibility_unit'] = distance_unit
self.data['humidity'] = _find('relative_humidity').rstrip('%')
self.data['uv_index'] = _find('UV')
except Exception:
# Don't let an uncaught exception kill the update thread
self.logger.error(
'Uncaught error occurred while checking weather. '
'Exception follows:', exc_info=True
)
self.data['update_error'] = self.update_error | [
"def",
"check_weather",
"(",
"self",
")",
":",
"if",
"self",
".",
"station_id",
"is",
"None",
":",
"# Failed to get the nearest station ID when first launched, so",
"# retry it.",
"self",
".",
"get_station_id",
"(",
")",
"self",
".",
"data",
"[",
"'update_error'",
"... | Query the configured/queried station and return the weather data | [
"Query",
"the",
"configured",
"/",
"queried",
"station",
"and",
"return",
"the",
"weather",
"data"
] | python | train |
mitsei/dlkit | dlkit/json_/relationship/managers.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/relationship/managers.py#L334-L356 | def get_relationship_admin_session_for_family(self, family_id):
"""Gets the ``OsidSession`` associated with the relationship administration service for the given family.
arg: family_id (osid.id.Id): the ``Id`` of the ``Family``
return: (osid.relationship.RelationshipAdminSession) - a
``RelationshipAdminSession``
raise: NotFound - no family found by the given ``Id``
raise: NullArgument - ``family_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_relationship_admin()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_relationship_admin()`` and
``supports_visible_federation()`` are ``true``*
"""
if not self.supports_relationship_admin():
raise errors.Unimplemented()
##
# Also include check to see if the catalog Id is found otherwise raise errors.NotFound
##
# pylint: disable=no-member
return sessions.RelationshipAdminSession(family_id, runtime=self._runtime) | [
"def",
"get_relationship_admin_session_for_family",
"(",
"self",
",",
"family_id",
")",
":",
"if",
"not",
"self",
".",
"supports_relationship_admin",
"(",
")",
":",
"raise",
"errors",
".",
"Unimplemented",
"(",
")",
"##",
"# Also include check to see if the catalog Id i... | Gets the ``OsidSession`` associated with the relationship administration service for the given family.
arg: family_id (osid.id.Id): the ``Id`` of the ``Family``
return: (osid.relationship.RelationshipAdminSession) - a
``RelationshipAdminSession``
raise: NotFound - no family found by the given ``Id``
raise: NullArgument - ``family_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_relationship_admin()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_relationship_admin()`` and
``supports_visible_federation()`` are ``true``* | [
"Gets",
"the",
"OsidSession",
"associated",
"with",
"the",
"relationship",
"administration",
"service",
"for",
"the",
"given",
"family",
"."
] | python | train |
deanmalmgren/textract | textract/parsers/odt_parser.py | https://github.com/deanmalmgren/textract/blob/117ea191d93d80321e4bf01f23cc1ac54d69a075/textract/parsers/odt_parser.py#L19-L28 | def to_string(self):
""" Converts the document to a string. """
buff = u""
for child in self.content.iter():
if child.tag in [self.qn('text:p'), self.qn('text:h')]:
buff += self.text_to_string(child) + "\n"
# remove last newline char
if buff:
buff = buff[:-1]
return buff | [
"def",
"to_string",
"(",
"self",
")",
":",
"buff",
"=",
"u\"\"",
"for",
"child",
"in",
"self",
".",
"content",
".",
"iter",
"(",
")",
":",
"if",
"child",
".",
"tag",
"in",
"[",
"self",
".",
"qn",
"(",
"'text:p'",
")",
",",
"self",
".",
"qn",
"(... | Converts the document to a string. | [
"Converts",
"the",
"document",
"to",
"a",
"string",
"."
] | python | train |
robotools/extractor | Lib/extractor/formats/opentype.py | https://github.com/robotools/extractor/blob/da3c2c92bfd3da863dd5de29bd8bc94cbbf433df/Lib/extractor/formats/opentype.py#L408-L420 | def _makeScriptOrder(gpos):
"""
Run therough GPOS and make an alphabetically
ordered list of scripts. If DFLT is in the list,
move it to the front.
"""
scripts = []
for scriptRecord in gpos.ScriptList.ScriptRecord:
scripts.append(scriptRecord.ScriptTag)
if "DFLT" in scripts:
scripts.remove("DFLT")
scripts.insert(0, "DFLT")
return sorted(scripts) | [
"def",
"_makeScriptOrder",
"(",
"gpos",
")",
":",
"scripts",
"=",
"[",
"]",
"for",
"scriptRecord",
"in",
"gpos",
".",
"ScriptList",
".",
"ScriptRecord",
":",
"scripts",
".",
"append",
"(",
"scriptRecord",
".",
"ScriptTag",
")",
"if",
"\"DFLT\"",
"in",
"scr... | Run therough GPOS and make an alphabetically
ordered list of scripts. If DFLT is in the list,
move it to the front. | [
"Run",
"therough",
"GPOS",
"and",
"make",
"an",
"alphabetically",
"ordered",
"list",
"of",
"scripts",
".",
"If",
"DFLT",
"is",
"in",
"the",
"list",
"move",
"it",
"to",
"the",
"front",
"."
] | python | train |
wummel/linkchecker | third_party/miniboa-r42/miniboa/xterm.py | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/third_party/miniboa-r42/miniboa/xterm.py#L89-L110 | def word_wrap(text, columns=80, indent=4, padding=2):
"""
Given a block of text, breaks into a list of lines wrapped to
length.
"""
paragraphs = _PARA_BREAK.split(text)
lines = []
columns -= padding
for para in paragraphs:
if para.isspace():
continue
line = ' ' * indent
for word in para.split():
if (len(line) + 1 + len(word)) > columns:
lines.append(line)
line = ' ' * padding
line += word
else:
line += ' ' + word
if not line.isspace():
lines.append(line)
return lines | [
"def",
"word_wrap",
"(",
"text",
",",
"columns",
"=",
"80",
",",
"indent",
"=",
"4",
",",
"padding",
"=",
"2",
")",
":",
"paragraphs",
"=",
"_PARA_BREAK",
".",
"split",
"(",
"text",
")",
"lines",
"=",
"[",
"]",
"columns",
"-=",
"padding",
"for",
"p... | Given a block of text, breaks into a list of lines wrapped to
length. | [
"Given",
"a",
"block",
"of",
"text",
"breaks",
"into",
"a",
"list",
"of",
"lines",
"wrapped",
"to",
"length",
"."
] | python | train |
ska-sa/purr | Purr/Plugins/local_pychart/gs_frontend.py | https://github.com/ska-sa/purr/blob/4c848768d0485d0f88b30850d0d5372221b21b66/Purr/Plugins/local_pychart/gs_frontend.py#L24-L34 | def _get_gs_path():
"""Guess where the Ghostscript executable is
and return its absolute path name."""
path = os.environ.get("PATH", os.defpath)
for dir in path.split(os.pathsep):
for name in ("gs", "gs.exe", "gswin32c.exe"):
g = os.path.join(dir, name)
if os.path.exists(g):
return g
raise Exception("Ghostscript not found. path=%s" % str(path)) | [
"def",
"_get_gs_path",
"(",
")",
":",
"path",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"\"PATH\"",
",",
"os",
".",
"defpath",
")",
"for",
"dir",
"in",
"path",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
":",
"for",
"name",
"in",
"(",
"\"gs\"... | Guess where the Ghostscript executable is
and return its absolute path name. | [
"Guess",
"where",
"the",
"Ghostscript",
"executable",
"is",
"and",
"return",
"its",
"absolute",
"path",
"name",
"."
] | python | train |
saltstack/salt | salt/cloud/clouds/joyent.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/joyent.py#L893-L914 | def show_key(kwargs=None, call=None):
'''
List the keys available
'''
if call != 'function':
log.error(
'The list_keys function must be called with -f or --function.'
)
return False
if not kwargs:
kwargs = {}
if 'keyname' not in kwargs:
log.error('A keyname is required.')
return False
rcode, data = query(
command='my/keys/{0}'.format(kwargs['keyname']),
method='GET',
)
return {'keys': {data['name']: data['key']}} | [
"def",
"show_key",
"(",
"kwargs",
"=",
"None",
",",
"call",
"=",
"None",
")",
":",
"if",
"call",
"!=",
"'function'",
":",
"log",
".",
"error",
"(",
"'The list_keys function must be called with -f or --function.'",
")",
"return",
"False",
"if",
"not",
"kwargs",
... | List the keys available | [
"List",
"the",
"keys",
"available"
] | python | train |
markuskiller/textblob-de | textblob_de/ext/_pattern/text/search.py | https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L308-L317 | def append(self, term, type=None, value=None):
""" Appends the given term to the taxonomy and tags it as the given type.
Optionally, a disambiguation value can be supplied.
For example: taxonomy.append("many", "quantity", "50-200")
"""
term = self._normalize(term)
type = self._normalize(type)
self.setdefault(term, (odict(), odict()))[0].push((type, True))
self.setdefault(type, (odict(), odict()))[1].push((term, True))
self._values[term] = value | [
"def",
"append",
"(",
"self",
",",
"term",
",",
"type",
"=",
"None",
",",
"value",
"=",
"None",
")",
":",
"term",
"=",
"self",
".",
"_normalize",
"(",
"term",
")",
"type",
"=",
"self",
".",
"_normalize",
"(",
"type",
")",
"self",
".",
"setdefault",... | Appends the given term to the taxonomy and tags it as the given type.
Optionally, a disambiguation value can be supplied.
For example: taxonomy.append("many", "quantity", "50-200") | [
"Appends",
"the",
"given",
"term",
"to",
"the",
"taxonomy",
"and",
"tags",
"it",
"as",
"the",
"given",
"type",
".",
"Optionally",
"a",
"disambiguation",
"value",
"can",
"be",
"supplied",
".",
"For",
"example",
":",
"taxonomy",
".",
"append",
"(",
"many",
... | python | train |
materialsproject/pymatgen | pymatgen/io/vasp/inputs.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/vasp/inputs.py#L1918-L1939 | def run_vasp(self, run_dir: PathLike = ".",
vasp_cmd: list = None,
output_file: PathLike = "vasp.out",
err_file: PathLike = "vasp.err"):
"""
Write input files and run VASP.
:param run_dir: Where to write input files and do the run.
:param vasp_cmd: Args to be supplied to run VASP. Otherwise, the
PMG_VASP_EXE in .pmgrc.yaml is used.
:param output_file: File to write output.
:param err_file: File to write err.
"""
self.write_input(output_dir=run_dir)
vasp_cmd = vasp_cmd or SETTINGS.get("PMG_VASP_EXE")
vasp_cmd = [os.path.expanduser(os.path.expandvars(t)) for t in vasp_cmd]
if not vasp_cmd:
raise RuntimeError("You need to supply vasp_cmd or set the PMG_VASP_EXE in .pmgrc.yaml to run VASP.")
with cd(run_dir):
with open(output_file, 'w') as f_std, \
open(err_file, "w", buffering=1) as f_err:
subprocess.check_call(vasp_cmd, stdout=f_std, stderr=f_err) | [
"def",
"run_vasp",
"(",
"self",
",",
"run_dir",
":",
"PathLike",
"=",
"\".\"",
",",
"vasp_cmd",
":",
"list",
"=",
"None",
",",
"output_file",
":",
"PathLike",
"=",
"\"vasp.out\"",
",",
"err_file",
":",
"PathLike",
"=",
"\"vasp.err\"",
")",
":",
"self",
"... | Write input files and run VASP.
:param run_dir: Where to write input files and do the run.
:param vasp_cmd: Args to be supplied to run VASP. Otherwise, the
PMG_VASP_EXE in .pmgrc.yaml is used.
:param output_file: File to write output.
:param err_file: File to write err. | [
"Write",
"input",
"files",
"and",
"run",
"VASP",
"."
] | python | train |
KelSolaar/Umbra | umbra/components/factory/script_editor/script_editor.py | https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/components/factory/script_editor/script_editor.py#L2848-L2859 | def __set_window_title(self):
"""
Sets the Component window title.
"""
if self.has_editor_tab():
windowTitle = "{0} - {1}".format(self.__default_window_title, self.get_current_editor().file)
else:
windowTitle = "{0}".format(self.__default_window_title)
LOGGER.debug("> Setting 'Script Editor' window title to '{0}'.".format(windowTitle))
self.setWindowTitle(windowTitle) | [
"def",
"__set_window_title",
"(",
"self",
")",
":",
"if",
"self",
".",
"has_editor_tab",
"(",
")",
":",
"windowTitle",
"=",
"\"{0} - {1}\"",
".",
"format",
"(",
"self",
".",
"__default_window_title",
",",
"self",
".",
"get_current_editor",
"(",
")",
".",
"fi... | Sets the Component window title. | [
"Sets",
"the",
"Component",
"window",
"title",
"."
] | python | train |
odlgroup/odl | odl/solvers/functional/default_functionals.py | https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/functional/default_functionals.py#L2000-L2006 | def _asvector(self, arr):
"""Convert ``arr`` to a `domain` element.
This is the inverse of `_asarray`.
"""
result = moveaxis(arr, [-2, -1], [0, 1])
return self.domain.element(result) | [
"def",
"_asvector",
"(",
"self",
",",
"arr",
")",
":",
"result",
"=",
"moveaxis",
"(",
"arr",
",",
"[",
"-",
"2",
",",
"-",
"1",
"]",
",",
"[",
"0",
",",
"1",
"]",
")",
"return",
"self",
".",
"domain",
".",
"element",
"(",
"result",
")"
] | Convert ``arr`` to a `domain` element.
This is the inverse of `_asarray`. | [
"Convert",
"arr",
"to",
"a",
"domain",
"element",
"."
] | python | train |
fy0/slim | slim/base/sqlquery.py | https://github.com/fy0/slim/blob/9951a910750888dbe7dd3e98acae9c40efae0689/slim/base/sqlquery.py#L235-L296 | def parse_load_fk(cls, data: Dict[str, List[Dict[str, object]]]) -> Dict[str, List[Dict[str, object]]]:
"""
:param data:{
<column>: role,
<column2>: role,
<column>: {
'role': role,
'loadfk': { ... },
},
:return: {
<column>: {
'role': role,
},
...
<column3>: {
'role': role,
'loadfk': { ... },
},
}
"""
default_value_dict = {'role': None, 'as': None, 'table': None, 'loadfk': None}
def value_normalize_dict(value):
def check(k, v):
if k == 'role': return isinstance(v, str)
if k == 'as': return isinstance(v, str)
if k == 'table': return isinstance(v, str)
if k == 'loadfk': return isinstance(v, dict)
valid = {k: v for k, v in value.items() if check(k, v)}
if not valid: return default_value_dict.copy()
if 'loadfk' in valid and valid['loadfk']:
valid['loadfk'] = cls.parse_load_fk(valid['loadfk'])
for k, v in default_value_dict.items():
valid.setdefault(k, v)
return valid
def value_normalize(value, no_list=True):
if value is None:
return default_value_dict.copy()
elif not no_list and isinstance(value, List):
# <column>: [value1, value2, ...]
return list(map(value_normalize, value))
elif isinstance(value, str):
# <column>: role
val = default_value_dict.copy()
val['role'] = value
return val
elif isinstance(value, Dict):
# {'role': <str>, 'as': <str>, ...}
return value_normalize_dict(value)
else:
raise InvalidParams('Invalid syntax for "loadfk": %s' % value)
# 对全部项进行检查
new_data = {}
if not isinstance(data, dict):
raise InvalidParams('Invalid syntax for "loadfk": %s' % data)
for k, v in data.items():
nv = value_normalize(v, False)
new_data[k] = nv if isinstance(nv, List) else [nv]
return new_data | [
"def",
"parse_load_fk",
"(",
"cls",
",",
"data",
":",
"Dict",
"[",
"str",
",",
"List",
"[",
"Dict",
"[",
"str",
",",
"object",
"]",
"]",
"]",
")",
"->",
"Dict",
"[",
"str",
",",
"List",
"[",
"Dict",
"[",
"str",
",",
"object",
"]",
"]",
"]",
"... | :param data:{
<column>: role,
<column2>: role,
<column>: {
'role': role,
'loadfk': { ... },
},
:return: {
<column>: {
'role': role,
},
...
<column3>: {
'role': role,
'loadfk': { ... },
},
} | [
":",
"param",
"data",
":",
"{",
"<column",
">",
":",
"role",
"<column2",
">",
":",
"role",
"<column",
">",
":",
"{",
"role",
":",
"role",
"loadfk",
":",
"{",
"...",
"}",
"}",
":",
"return",
":",
"{",
"<column",
">",
":",
"{",
"role",
":",
"role... | python | valid |
bcbio/bcbio-nextgen | bcbio/distributed/split.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/split.py#L71-L87 | def _get_extra_args(extra_args, arg_keys):
"""Retrieve extra arguments to pass along to combine function.
Special cases like reference files and configuration information
are passed as single items, the rest as lists mapping to each data
item combined.
"""
# XXX back compatible hack -- should have a way to specify these.
single_keys = set(["sam_ref", "config"])
out = []
for i, arg_key in enumerate(arg_keys):
vals = [xs[i] for xs in extra_args]
if arg_key in single_keys:
out.append(vals[-1])
else:
out.append(vals)
return out | [
"def",
"_get_extra_args",
"(",
"extra_args",
",",
"arg_keys",
")",
":",
"# XXX back compatible hack -- should have a way to specify these.",
"single_keys",
"=",
"set",
"(",
"[",
"\"sam_ref\"",
",",
"\"config\"",
"]",
")",
"out",
"=",
"[",
"]",
"for",
"i",
",",
"ar... | Retrieve extra arguments to pass along to combine function.
Special cases like reference files and configuration information
are passed as single items, the rest as lists mapping to each data
item combined. | [
"Retrieve",
"extra",
"arguments",
"to",
"pass",
"along",
"to",
"combine",
"function",
"."
] | python | train |
fastai/fastai | docs_src/nbval/nbdime_reporter.py | https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/docs_src/nbval/nbdime_reporter.py#L76-L107 | def make_report(self, outcome):
"""Make report in form of two notebooks.
Use nbdime diff-web to present the difference between reference
cells and test cells.
"""
failures = self.getreports('failed')
if not failures:
return
for rep in failures:
# Check if this is a notebook node
msg = self._getfailureheadline(rep)
lines = rep.longrepr.splitlines()
if len(lines) > 1:
self.section(msg, lines[1])
self._outrep_summary(rep)
tmpdir = tempfile.mkdtemp()
try:
ref_file = os.path.join(tmpdir, 'reference.ipynb')
test_file = os.path.join(tmpdir, 'test_result.ipynb')
with io.open(ref_file, "w", encoding="utf8") as f:
nbformat.write(self.nb_ref, f)
with io.open(test_file, "w", encoding="utf8") as f:
nbformat.write(self.nb_test, f)
run_server(
port=0, # Run on random port
cwd=tmpdir,
closable=True,
on_port=lambda port: browse(
port, ref_file, test_file, None))
finally:
shutil.rmtree(tmpdir) | [
"def",
"make_report",
"(",
"self",
",",
"outcome",
")",
":",
"failures",
"=",
"self",
".",
"getreports",
"(",
"'failed'",
")",
"if",
"not",
"failures",
":",
"return",
"for",
"rep",
"in",
"failures",
":",
"# Check if this is a notebook node",
"msg",
"=",
"sel... | Make report in form of two notebooks.
Use nbdime diff-web to present the difference between reference
cells and test cells. | [
"Make",
"report",
"in",
"form",
"of",
"two",
"notebooks",
"."
] | python | train |
minhhoit/yacms | yacms/pages/page_processors.py | https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/pages/page_processors.py#L59-L78 | def autodiscover():
"""
Taken from ``django.contrib.admin.autodiscover`` and used to run
any calls to the ``processor_for`` decorator.
"""
global LOADED
if LOADED:
return
LOADED = True
for app in get_app_name_list():
try:
module = import_module(app)
except ImportError:
pass
else:
try:
import_module("%s.page_processors" % app)
except:
if module_has_submodule(module, "page_processors"):
raise | [
"def",
"autodiscover",
"(",
")",
":",
"global",
"LOADED",
"if",
"LOADED",
":",
"return",
"LOADED",
"=",
"True",
"for",
"app",
"in",
"get_app_name_list",
"(",
")",
":",
"try",
":",
"module",
"=",
"import_module",
"(",
"app",
")",
"except",
"ImportError",
... | Taken from ``django.contrib.admin.autodiscover`` and used to run
any calls to the ``processor_for`` decorator. | [
"Taken",
"from",
"django",
".",
"contrib",
".",
"admin",
".",
"autodiscover",
"and",
"used",
"to",
"run",
"any",
"calls",
"to",
"the",
"processor_for",
"decorator",
"."
] | python | train |
yamcs/yamcs-python | yamcs-client/yamcs/archive/client.py | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/archive/client.py#L37-L50 | def list_packet_names(self):
"""
Returns the existing packet names.
:rtype: ~collections.Iterable[str]
"""
# Server does not do pagination on listings of this resource.
# Return an iterator anyway for similarity with other API methods
path = '/archive/{}/packet-names'.format(self._instance)
response = self._client.get_proto(path=path)
message = archive_pb2.GetPacketNamesResponse()
message.ParseFromString(response.content)
names = getattr(message, 'name')
return iter(names) | [
"def",
"list_packet_names",
"(",
"self",
")",
":",
"# Server does not do pagination on listings of this resource.",
"# Return an iterator anyway for similarity with other API methods",
"path",
"=",
"'/archive/{}/packet-names'",
".",
"format",
"(",
"self",
".",
"_instance",
")",
"... | Returns the existing packet names.
:rtype: ~collections.Iterable[str] | [
"Returns",
"the",
"existing",
"packet",
"names",
"."
] | python | train |
spyder-ide/spyder | spyder/plugins/variableexplorer/widgets/texteditor.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/variableexplorer/widgets/texteditor.py#L98-L108 | def text_changed(self):
"""Text has changed"""
# Save text as bytes, if it was initially bytes
if self.is_binary:
self.text = to_binary_string(self.edit.toPlainText(), 'utf8')
else:
self.text = to_text_string(self.edit.toPlainText())
if self.btn_save_and_close:
self.btn_save_and_close.setEnabled(True)
self.btn_save_and_close.setAutoDefault(True)
self.btn_save_and_close.setDefault(True) | [
"def",
"text_changed",
"(",
"self",
")",
":",
"# Save text as bytes, if it was initially bytes\r",
"if",
"self",
".",
"is_binary",
":",
"self",
".",
"text",
"=",
"to_binary_string",
"(",
"self",
".",
"edit",
".",
"toPlainText",
"(",
")",
",",
"'utf8'",
")",
"e... | Text has changed | [
"Text",
"has",
"changed"
] | python | train |
solvebio/solvebio-python | solvebio/utils/tabulate.py | https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/utils/tabulate.py#L401-L467 | def _normalize_tabular_data(tabular_data, headers, sort=True):
"""
Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* 2D NumPy arrays
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
"""
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = list(tabular_data.keys())
# columns have to be transposed
rows = list(izip_longest(*list(tabular_data.values())))
elif hasattr(tabular_data, "index"):
# values is a property, has .index then
# it's likely a pandas.DataFrame (pandas 0.11.0)
keys = list(tabular_data.keys())
# values matrix doesn't need to be transposed
vals = tabular_data.values
names = tabular_data.index
rows = [[v] + list(row) for v, row in zip(names, vals)]
else:
raise ValueError("tabular data doesn't appear to be a dict "
"or a DataFrame")
if headers == "keys":
headers = list(map(_text_type, keys)) # headers should be strings
else: # it's, as usual, an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if headers == "keys" and len(rows) > 0: # keys are column indices
headers = list(map(_text_type, list(range(len(rows[0])))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
headers = list(map(_text_type, rows[0])) # headers should be strings
rows = rows[1:]
headers = list(headers)
rows = list(map(list, rows))
if sort and len(rows) > 1:
rows = sorted(rows, key=lambda x: x[0])
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [""] * (ncols - nhs) + headers
return rows, headers | [
"def",
"_normalize_tabular_data",
"(",
"tabular_data",
",",
"headers",
",",
"sort",
"=",
"True",
")",
":",
"if",
"hasattr",
"(",
"tabular_data",
",",
"\"keys\"",
")",
"and",
"hasattr",
"(",
"tabular_data",
",",
"\"values\"",
")",
":",
"# dict-like and pandas.Dat... | Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* 2D NumPy arrays
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys". | [
"Transform",
"a",
"supported",
"data",
"type",
"to",
"a",
"list",
"of",
"lists",
"and",
"a",
"list",
"of",
"headers",
"."
] | python | test |
wummel/linkchecker | linkcheck/HtmlParser/htmllib.py | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/HtmlParser/htmllib.py#L159-L168 | def pi (self, data):
"""
Print HTML pi.
@param data: the tag data
@type data: string
@return: None
"""
data = data.encode(self.encoding, "ignore")
self.fd.write("<?%s?>" % data) | [
"def",
"pi",
"(",
"self",
",",
"data",
")",
":",
"data",
"=",
"data",
".",
"encode",
"(",
"self",
".",
"encoding",
",",
"\"ignore\"",
")",
"self",
".",
"fd",
".",
"write",
"(",
"\"<?%s?>\"",
"%",
"data",
")"
] | Print HTML pi.
@param data: the tag data
@type data: string
@return: None | [
"Print",
"HTML",
"pi",
"."
] | python | train |
tjcsl/cslbot | cslbot/commands/cancel.py | https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/commands/cancel.py#L22-L36 | def cmd(send, msg, args):
"""Cancels a deferred action with the given id.
Syntax: {command} <id>
"""
try:
args['handler'].workers.cancel(int(msg))
except ValueError:
send("Index must be a digit.")
return
except KeyError:
send("No such event.")
return
send("Event canceled.") | [
"def",
"cmd",
"(",
"send",
",",
"msg",
",",
"args",
")",
":",
"try",
":",
"args",
"[",
"'handler'",
"]",
".",
"workers",
".",
"cancel",
"(",
"int",
"(",
"msg",
")",
")",
"except",
"ValueError",
":",
"send",
"(",
"\"Index must be a digit.\"",
")",
"re... | Cancels a deferred action with the given id.
Syntax: {command} <id> | [
"Cancels",
"a",
"deferred",
"action",
"with",
"the",
"given",
"id",
"."
] | python | train |
apache/spark | python/pyspark/sql/group.py | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/group.py#L224-L276 | def apply(self, udf):
"""
Maps each group of the current :class:`DataFrame` using a pandas udf and returns the result
as a `DataFrame`.
The user-defined function should take a `pandas.DataFrame` and return another
`pandas.DataFrame`. For each group, all columns are passed together as a `pandas.DataFrame`
to the user-function and the returned `pandas.DataFrame` are combined as a
:class:`DataFrame`.
The returned `pandas.DataFrame` can be of arbitrary length and its schema must match the
returnType of the pandas udf.
.. note:: This function requires a full shuffle. all the data of a group will be loaded
into memory, so the user should be aware of the potential OOM risk if data is skewed
and certain groups are too large to fit in memory.
.. note:: Experimental
:param udf: a grouped map user-defined function returned by
:func:`pyspark.sql.functions.pandas_udf`.
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v"))
>>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP
... def normalize(pdf):
... v = pdf.v
... return pdf.assign(v=(v - v.mean()) / v.std())
>>> df.groupby("id").apply(normalize).show() # doctest: +SKIP
+---+-------------------+
| id| v|
+---+-------------------+
| 1|-0.7071067811865475|
| 1| 0.7071067811865475|
| 2|-0.8320502943378437|
| 2|-0.2773500981126146|
| 2| 1.1094003924504583|
+---+-------------------+
.. seealso:: :meth:`pyspark.sql.functions.pandas_udf`
"""
# Columns are special because hasattr always return True
if isinstance(udf, Column) or not hasattr(udf, 'func') \
or udf.evalType != PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF:
raise ValueError("Invalid udf: the udf argument must be a pandas_udf of type "
"GROUPED_MAP.")
df = self._df
udf_column = udf(*[df[col] for col in df.columns])
jdf = self._jgd.flatMapGroupsInPandas(udf_column._jc.expr())
return DataFrame(jdf, self.sql_ctx) | [
"def",
"apply",
"(",
"self",
",",
"udf",
")",
":",
"# Columns are special because hasattr always return True",
"if",
"isinstance",
"(",
"udf",
",",
"Column",
")",
"or",
"not",
"hasattr",
"(",
"udf",
",",
"'func'",
")",
"or",
"udf",
".",
"evalType",
"!=",
"Py... | Maps each group of the current :class:`DataFrame` using a pandas udf and returns the result
as a `DataFrame`.
The user-defined function should take a `pandas.DataFrame` and return another
`pandas.DataFrame`. For each group, all columns are passed together as a `pandas.DataFrame`
to the user-function and the returned `pandas.DataFrame` are combined as a
:class:`DataFrame`.
The returned `pandas.DataFrame` can be of arbitrary length and its schema must match the
returnType of the pandas udf.
.. note:: This function requires a full shuffle. all the data of a group will be loaded
into memory, so the user should be aware of the potential OOM risk if data is skewed
and certain groups are too large to fit in memory.
.. note:: Experimental
:param udf: a grouped map user-defined function returned by
:func:`pyspark.sql.functions.pandas_udf`.
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v"))
>>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP
... def normalize(pdf):
... v = pdf.v
... return pdf.assign(v=(v - v.mean()) / v.std())
>>> df.groupby("id").apply(normalize).show() # doctest: +SKIP
+---+-------------------+
| id| v|
+---+-------------------+
| 1|-0.7071067811865475|
| 1| 0.7071067811865475|
| 2|-0.8320502943378437|
| 2|-0.2773500981126146|
| 2| 1.1094003924504583|
+---+-------------------+
.. seealso:: :meth:`pyspark.sql.functions.pandas_udf` | [
"Maps",
"each",
"group",
"of",
"the",
"current",
":",
"class",
":",
"DataFrame",
"using",
"a",
"pandas",
"udf",
"and",
"returns",
"the",
"result",
"as",
"a",
"DataFrame",
"."
] | python | train |
CZ-NIC/yangson | yangson/schemanode.py | https://github.com/CZ-NIC/yangson/blob/a4b9464041fa8b28f6020a420ababf18fddf5d4a/yangson/schemanode.py#L826-L831 | def from_raw(self, rval: RawScalar, jptr: JSONPointer = "") -> ScalarValue:
"""Override the superclass method."""
res = self.type.from_raw(rval)
if res is None:
raise RawTypeError(jptr, self.type.yang_type() + " value")
return res | [
"def",
"from_raw",
"(",
"self",
",",
"rval",
":",
"RawScalar",
",",
"jptr",
":",
"JSONPointer",
"=",
"\"\"",
")",
"->",
"ScalarValue",
":",
"res",
"=",
"self",
".",
"type",
".",
"from_raw",
"(",
"rval",
")",
"if",
"res",
"is",
"None",
":",
"raise",
... | Override the superclass method. | [
"Override",
"the",
"superclass",
"method",
"."
] | python | train |
hhatto/autopep8 | autopep8.py | https://github.com/hhatto/autopep8/blob/fda3bb39181437b6b8a0aa0185f21ae5f14385dd/autopep8.py#L1079-L1115 | def fix_e712(self, result):
"""Fix (trivial case of) comparison with boolean."""
(line_index, offset, target) = get_index_offset_contents(result,
self.source)
# Handle very easy "not" special cases.
if re.match(r'^\s*if [\w."\'\[\]]+ == False:$', target):
self.source[line_index] = re.sub(r'if ([\w."\'\[\]]+) == False:',
r'if not \1:', target, count=1)
elif re.match(r'^\s*if [\w."\'\[\]]+ != True:$', target):
self.source[line_index] = re.sub(r'if ([\w."\'\[\]]+) != True:',
r'if not \1:', target, count=1)
else:
right_offset = offset + 2
if right_offset >= len(target):
return []
left = target[:offset].rstrip()
center = target[offset:right_offset]
right = target[right_offset:].lstrip()
# Handle simple cases only.
new_right = None
if center.strip() == '==':
if re.match(r'\bTrue\b', right):
new_right = re.sub(r'\bTrue\b *', '', right, count=1)
elif center.strip() == '!=':
if re.match(r'\bFalse\b', right):
new_right = re.sub(r'\bFalse\b *', '', right, count=1)
if new_right is None:
return []
if new_right[0].isalnum():
new_right = ' ' + new_right
self.source[line_index] = left + new_right | [
"def",
"fix_e712",
"(",
"self",
",",
"result",
")",
":",
"(",
"line_index",
",",
"offset",
",",
"target",
")",
"=",
"get_index_offset_contents",
"(",
"result",
",",
"self",
".",
"source",
")",
"# Handle very easy \"not\" special cases.",
"if",
"re",
".",
"matc... | Fix (trivial case of) comparison with boolean. | [
"Fix",
"(",
"trivial",
"case",
"of",
")",
"comparison",
"with",
"boolean",
"."
] | python | train |
juju/charm-helpers | charmhelpers/core/services/helpers.py | https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/core/services/helpers.py#L70-L77 | def is_ready(self):
"""
Returns True if all of the `required_keys` are available from any units.
"""
ready = len(self.get(self.name, [])) > 0
if not ready:
hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
return ready | [
"def",
"is_ready",
"(",
"self",
")",
":",
"ready",
"=",
"len",
"(",
"self",
".",
"get",
"(",
"self",
".",
"name",
",",
"[",
"]",
")",
")",
">",
"0",
"if",
"not",
"ready",
":",
"hookenv",
".",
"log",
"(",
"'Incomplete relation: {}'",
".",
"format",
... | Returns True if all of the `required_keys` are available from any units. | [
"Returns",
"True",
"if",
"all",
"of",
"the",
"required_keys",
"are",
"available",
"from",
"any",
"units",
"."
] | python | train |
PyPSA/PyPSA | pypsa/components.py | https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/components.py#L305-L327 | def _build_dataframes(self):
"""Function called when network is created to build component pandas.DataFrames."""
for component in self.all_components:
attrs = self.components[component]["attrs"]
static_dtypes = attrs.loc[attrs.static, "dtype"].drop(["name"])
df = pd.DataFrame({k: pd.Series(dtype=d) for k, d in static_dtypes.iteritems()},
columns=static_dtypes.index)
df.index.name = "name"
setattr(self,self.components[component]["list_name"],df)
pnl = Dict({k : pd.DataFrame(index=self.snapshots,
columns=[],
#it's currently hard to imagine non-float series, but this could be generalised
dtype=np.dtype(float))
for k in attrs.index[attrs.varying]})
setattr(self,self.components[component]["list_name"]+"_t",pnl) | [
"def",
"_build_dataframes",
"(",
"self",
")",
":",
"for",
"component",
"in",
"self",
".",
"all_components",
":",
"attrs",
"=",
"self",
".",
"components",
"[",
"component",
"]",
"[",
"\"attrs\"",
"]",
"static_dtypes",
"=",
"attrs",
".",
"loc",
"[",
"attrs",... | Function called when network is created to build component pandas.DataFrames. | [
"Function",
"called",
"when",
"network",
"is",
"created",
"to",
"build",
"component",
"pandas",
".",
"DataFrames",
"."
] | python | train |
molmod/molmod | molmod/randomize.py | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/randomize.py#L205-L213 | def iter_halfs_bond(graph):
"""Select a random bond (pair of atoms) that divides the molecule in two"""
for atom1, atom2 in graph.edges:
try:
affected_atoms1, affected_atoms2 = graph.get_halfs(atom1, atom2)
yield affected_atoms1, affected_atoms2, (atom1, atom2)
except GraphError:
# just try again
continue | [
"def",
"iter_halfs_bond",
"(",
"graph",
")",
":",
"for",
"atom1",
",",
"atom2",
"in",
"graph",
".",
"edges",
":",
"try",
":",
"affected_atoms1",
",",
"affected_atoms2",
"=",
"graph",
".",
"get_halfs",
"(",
"atom1",
",",
"atom2",
")",
"yield",
"affected_ato... | Select a random bond (pair of atoms) that divides the molecule in two | [
"Select",
"a",
"random",
"bond",
"(",
"pair",
"of",
"atoms",
")",
"that",
"divides",
"the",
"molecule",
"in",
"two"
] | python | train |
susam/ice | ice.py | https://github.com/susam/ice/blob/532e685c504ea96f9e42833594585159ac1d2068/ice.py#L381-L396 | def add(self, method, pattern, callback):
"""Add a route.
Arguments:
method (str): HTTP method, e.g. GET, POST, etc.
pattern (str): Pattern that request paths must match.
callback (str): Route handler that is invoked when a request
path matches the *pattern*.
"""
pat_type, pat = self._normalize_pattern(pattern)
if pat_type == 'literal':
self._literal[method][pat] = callback
elif pat_type == 'wildcard':
self._wildcard[method].append(WildcardRoute(pat, callback))
else:
self._regex[method].append(RegexRoute(pat, callback)) | [
"def",
"add",
"(",
"self",
",",
"method",
",",
"pattern",
",",
"callback",
")",
":",
"pat_type",
",",
"pat",
"=",
"self",
".",
"_normalize_pattern",
"(",
"pattern",
")",
"if",
"pat_type",
"==",
"'literal'",
":",
"self",
".",
"_literal",
"[",
"method",
... | Add a route.
Arguments:
method (str): HTTP method, e.g. GET, POST, etc.
pattern (str): Pattern that request paths must match.
callback (str): Route handler that is invoked when a request
path matches the *pattern*. | [
"Add",
"a",
"route",
"."
] | python | test |
LogicalDash/LiSE | allegedb/allegedb/query.py | https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/allegedb/allegedb/query.py#L364-L375 | def nodes_dump(self):
"""Dump the entire contents of the nodes table."""
self._flush_nodes()
for (graph, node, branch, turn,tick, extant) in self.sql('nodes_dump'):
yield (
self.unpack(graph),
self.unpack(node),
branch,
turn,
tick,
bool(extant)
) | [
"def",
"nodes_dump",
"(",
"self",
")",
":",
"self",
".",
"_flush_nodes",
"(",
")",
"for",
"(",
"graph",
",",
"node",
",",
"branch",
",",
"turn",
",",
"tick",
",",
"extant",
")",
"in",
"self",
".",
"sql",
"(",
"'nodes_dump'",
")",
":",
"yield",
"(",... | Dump the entire contents of the nodes table. | [
"Dump",
"the",
"entire",
"contents",
"of",
"the",
"nodes",
"table",
"."
] | python | train |
rstoneback/pysat | pysat/instruments/pysat_sgp4.py | https://github.com/rstoneback/pysat/blob/4ae1afd80e15e4449397d39dce8c3e969c32c422/pysat/instruments/pysat_sgp4.py#L464-L516 | def add_aacgm_coordinates(inst, glat_label='glat', glong_label='glong',
alt_label='alt'):
"""
Uses AACGMV2 package to add AACGM coordinates to instrument object.
The Altitude Adjusted Corrected Geomagnetic Coordinates library is used
to calculate the latitude, longitude, and local time
of the spacecraft with respect to the geomagnetic field.
Example
-------
# function added velow modifies the inst object upon every inst.load call
inst.custom.add(add_quasi_dipole_coordinates, 'modify', glat_label='custom_label')
Parameters
----------
inst : pysat.Instrument
Designed with pysat_sgp4 in mind
glat_label : string
label used in inst to identify WGS84 geodetic latitude (degrees N)
glong_label : string
label used in inst to identify WGS84 geodetic longitude (degrees E)
alt_label : string
label used in inst to identify WGS84 geodetic altitude (km, height above surface)
Returns
-------
inst
Input pysat.Instrument object modified to include quasi-dipole coordinates, 'aacgm_lat'
for magnetic latitude, 'aacgm_long' for longitude, and 'aacgm_mlt' for magnetic local time.
"""
import aacgmv2
aalat = []; aalon = []; mlt = []
for lat, lon, alt, time in zip(inst[glat_label], inst[glong_label], inst[alt_label],
inst.data.index):
# aacgmv2 latitude and longitude from geodetic coords
tlat, tlon, tmlt = aacgmv2.get_aacgm_coord(lat, lon, alt, time)
aalat.append(tlat)
aalon.append(tlon)
mlt.append(tmlt)
inst['aacgm_lat'] = aalat
inst['aacgm_long'] = aalon
inst['aacgm_mlt'] = mlt
inst.meta['aacgm_lat'] = {'units':'degrees','long_name':'AACGM latitude'}
inst.meta['aacgm_long'] = {'units':'degrees','long_name':'AACGM longitude'}
inst.meta['aacgm_mlt'] = {'units':'hrs','long_name':'AACGM Magnetic local time'}
return | [
"def",
"add_aacgm_coordinates",
"(",
"inst",
",",
"glat_label",
"=",
"'glat'",
",",
"glong_label",
"=",
"'glong'",
",",
"alt_label",
"=",
"'alt'",
")",
":",
"import",
"aacgmv2",
"aalat",
"=",
"[",
"]",
"aalon",
"=",
"[",
"]",
"mlt",
"=",
"[",
"]",
"for... | Uses AACGMV2 package to add AACGM coordinates to instrument object.
The Altitude Adjusted Corrected Geomagnetic Coordinates library is used
to calculate the latitude, longitude, and local time
of the spacecraft with respect to the geomagnetic field.
Example
-------
# function added velow modifies the inst object upon every inst.load call
inst.custom.add(add_quasi_dipole_coordinates, 'modify', glat_label='custom_label')
Parameters
----------
inst : pysat.Instrument
Designed with pysat_sgp4 in mind
glat_label : string
label used in inst to identify WGS84 geodetic latitude (degrees N)
glong_label : string
label used in inst to identify WGS84 geodetic longitude (degrees E)
alt_label : string
label used in inst to identify WGS84 geodetic altitude (km, height above surface)
Returns
-------
inst
Input pysat.Instrument object modified to include quasi-dipole coordinates, 'aacgm_lat'
for magnetic latitude, 'aacgm_long' for longitude, and 'aacgm_mlt' for magnetic local time. | [
"Uses",
"AACGMV2",
"package",
"to",
"add",
"AACGM",
"coordinates",
"to",
"instrument",
"object",
".",
"The",
"Altitude",
"Adjusted",
"Corrected",
"Geomagnetic",
"Coordinates",
"library",
"is",
"used",
"to",
"calculate",
"the",
"latitude",
"longitude",
"and",
"loca... | python | train |
rbw/pysnow | pysnow/request.py | https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/request.py#L51-L63 | def get(self, *args, **kwargs):
"""Fetches one or more records
:return:
- :class:`pysnow.Response` object
"""
self._parameters.query = kwargs.pop('query', {}) if len(args) == 0 else args[0]
self._parameters.limit = kwargs.pop('limit', 10000)
self._parameters.offset = kwargs.pop('offset', 0)
self._parameters.fields = kwargs.pop('fields', kwargs.pop('fields', []))
return self._get_response('GET', stream=kwargs.pop('stream', False)) | [
"def",
"get",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_parameters",
".",
"query",
"=",
"kwargs",
".",
"pop",
"(",
"'query'",
",",
"{",
"}",
")",
"if",
"len",
"(",
"args",
")",
"==",
"0",
"else",
"args",
... | Fetches one or more records
:return:
- :class:`pysnow.Response` object | [
"Fetches",
"one",
"or",
"more",
"records"
] | python | train |
PyGithub/PyGithub | github/Organization.py | https://github.com/PyGithub/PyGithub/blob/f716df86bbe7dc276c6596699fa9712b61ef974c/github/Organization.py#L501-L540 | def edit(self, billing_email=github.GithubObject.NotSet, blog=github.GithubObject.NotSet, company=github.GithubObject.NotSet, description=github.GithubObject.NotSet, email=github.GithubObject.NotSet, location=github.GithubObject.NotSet, name=github.GithubObject.NotSet):
"""
:calls: `PATCH /orgs/:org <http://developer.github.com/v3/orgs>`_
:param billing_email: string
:param blog: string
:param company: string
:param description: string
:param email: string
:param location: string
:param name: string
:rtype: None
"""
assert billing_email is github.GithubObject.NotSet or isinstance(billing_email, (str, unicode)), billing_email
assert blog is github.GithubObject.NotSet or isinstance(blog, (str, unicode)), blog
assert company is github.GithubObject.NotSet or isinstance(company, (str, unicode)), company
assert description is github.GithubObject.NotSet or isinstance(description, (str, unicode)), description
assert email is github.GithubObject.NotSet or isinstance(email, (str, unicode)), email
assert location is github.GithubObject.NotSet or isinstance(location, (str, unicode)), location
assert name is github.GithubObject.NotSet or isinstance(name, (str, unicode)), name
post_parameters = dict()
if billing_email is not github.GithubObject.NotSet:
post_parameters["billing_email"] = billing_email
if blog is not github.GithubObject.NotSet:
post_parameters["blog"] = blog
if company is not github.GithubObject.NotSet:
post_parameters["company"] = company
if description is not github.GithubObject.NotSet:
post_parameters["description"] = description
if email is not github.GithubObject.NotSet:
post_parameters["email"] = email
if location is not github.GithubObject.NotSet:
post_parameters["location"] = location
if name is not github.GithubObject.NotSet:
post_parameters["name"] = name
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
input=post_parameters
)
self._useAttributes(data) | [
"def",
"edit",
"(",
"self",
",",
"billing_email",
"=",
"github",
".",
"GithubObject",
".",
"NotSet",
",",
"blog",
"=",
"github",
".",
"GithubObject",
".",
"NotSet",
",",
"company",
"=",
"github",
".",
"GithubObject",
".",
"NotSet",
",",
"description",
"=",... | :calls: `PATCH /orgs/:org <http://developer.github.com/v3/orgs>`_
:param billing_email: string
:param blog: string
:param company: string
:param description: string
:param email: string
:param location: string
:param name: string
:rtype: None | [
":",
"calls",
":",
"PATCH",
"/",
"orgs",
"/",
":",
"org",
"<http",
":",
"//",
"developer",
".",
"github",
".",
"com",
"/",
"v3",
"/",
"orgs",
">",
"_",
":",
"param",
"billing_email",
":",
"string",
":",
"param",
"blog",
":",
"string",
":",
"param",... | python | train |
Erotemic/utool | utool/util_cache.py | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cache.py#L1172-L1208 | def get_lru_cache(max_size=5):
"""
Args:
max_size (int):
References:
https://github.com/amitdev/lru-dict
CommandLine:
python -m utool.util_cache --test-get_lru_cache
Example:
>>> # DISABLE_DOCTEST
>>> # UNSTABLE_DOCTEST
>>> from utool.util_cache import * # NOQA
>>> import utool as ut # NOQA
>>> max_size = 5
>>> # execute function
>>> cache_obj = get_lru_cache(max_size)
>>> cache_obj[1] = 1
>>> cache_obj[2] = 2
>>> cache_obj[3] = 3
>>> cache_obj[4] = 4
>>> cache_obj[5] = 5
>>> cache_obj[6] = 6
>>> # verify results
>>> result = ut.repr2(dict(cache_obj), nl=False)
>>> print(result)
{2: 2, 3: 3, 4: 4, 5: 5, 6: 6}
"""
USE_C_LRU = False
if USE_C_LRU:
import lru
cache_obj = lru.LRU(max_size)
else:
cache_obj = LRUDict(max_size)
return cache_obj | [
"def",
"get_lru_cache",
"(",
"max_size",
"=",
"5",
")",
":",
"USE_C_LRU",
"=",
"False",
"if",
"USE_C_LRU",
":",
"import",
"lru",
"cache_obj",
"=",
"lru",
".",
"LRU",
"(",
"max_size",
")",
"else",
":",
"cache_obj",
"=",
"LRUDict",
"(",
"max_size",
")",
... | Args:
max_size (int):
References:
https://github.com/amitdev/lru-dict
CommandLine:
python -m utool.util_cache --test-get_lru_cache
Example:
>>> # DISABLE_DOCTEST
>>> # UNSTABLE_DOCTEST
>>> from utool.util_cache import * # NOQA
>>> import utool as ut # NOQA
>>> max_size = 5
>>> # execute function
>>> cache_obj = get_lru_cache(max_size)
>>> cache_obj[1] = 1
>>> cache_obj[2] = 2
>>> cache_obj[3] = 3
>>> cache_obj[4] = 4
>>> cache_obj[5] = 5
>>> cache_obj[6] = 6
>>> # verify results
>>> result = ut.repr2(dict(cache_obj), nl=False)
>>> print(result)
{2: 2, 3: 3, 4: 4, 5: 5, 6: 6} | [
"Args",
":",
"max_size",
"(",
"int",
")",
":"
] | python | train |
mitsei/dlkit | dlkit/json_/commenting/sessions.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/commenting/sessions.py#L2094-L2133 | def update_book(self, book_form):
"""Updates an existing book.
arg: book_form (osid.commenting.BookForm): the form
containing the elements to be updated
raise: IllegalState - ``book_form`` already used in an update
transaction
raise: InvalidArgument - the form contains an invalid value
raise: NullArgument - ``book_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``book_form`` did not originte from
``get_book_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinAdminSession.update_bin_template
if self._catalog_session is not None:
return self._catalog_session.update_catalog(catalog_form=book_form)
collection = JSONClientValidated('commenting',
collection='Book',
runtime=self._runtime)
if not isinstance(book_form, ABCBookForm):
raise errors.InvalidArgument('argument type is not an BookForm')
if not book_form.is_for_update():
raise errors.InvalidArgument('the BookForm is for update only, not create')
try:
if self._forms[book_form.get_id().get_identifier()] == UPDATED:
raise errors.IllegalState('book_form already used in an update transaction')
except KeyError:
raise errors.Unsupported('book_form did not originate from this session')
if not book_form.is_valid():
raise errors.InvalidArgument('one or more of the form elements is invalid')
collection.save(book_form._my_map) # save is deprecated - change to replace_one
self._forms[book_form.get_id().get_identifier()] = UPDATED
# Note: this is out of spec. The OSIDs don't require an object to be returned
return objects.Book(osid_object_map=book_form._my_map, runtime=self._runtime, proxy=self._proxy) | [
"def",
"update_book",
"(",
"self",
",",
"book_form",
")",
":",
"# Implemented from template for",
"# osid.resource.BinAdminSession.update_bin_template",
"if",
"self",
".",
"_catalog_session",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_catalog_session",
".",
"upd... | Updates an existing book.
arg: book_form (osid.commenting.BookForm): the form
containing the elements to be updated
raise: IllegalState - ``book_form`` already used in an update
transaction
raise: InvalidArgument - the form contains an invalid value
raise: NullArgument - ``book_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``book_form`` did not originte from
``get_book_form_for_update()``
*compliance: mandatory -- This method must be implemented.* | [
"Updates",
"an",
"existing",
"book",
"."
] | python | train |
ambitioninc/python-logentries-api | logentries_api/resources.py | https://github.com/ambitioninc/python-logentries-api/blob/77ff1a7a2995d7ea2725b74e34c0f880f4ee23bc/logentries_api/resources.py#L151-L192 | def update(self, label):
"""
Update a Label
:param label: The data to update. Must include keys:
* id (str)
* appearance (dict)
* description (str)
* name (str)
* title (str)
:type label: dict
Example:
.. code-block:: python
Labels().update(
label={
'id': 'd9d4596e-49e4-4135-b3b3-847f9e7c1f43',
'appearance': {'color': '278abe'},
'name': 'My Sandbox',
'description': 'My Sandbox',
'title': 'My Sandbox',
}
)
:return:
:rtype: dict
"""
data = {
'id': label['id'],
'name': label['name'],
'appearance': label['appearance'],
'description': label['description'],
'title': label['title'],
}
return self._post(
request=ApiActions.UPDATE.value,
uri=ApiUri.TAGS.value,
params=data
) | [
"def",
"update",
"(",
"self",
",",
"label",
")",
":",
"data",
"=",
"{",
"'id'",
":",
"label",
"[",
"'id'",
"]",
",",
"'name'",
":",
"label",
"[",
"'name'",
"]",
",",
"'appearance'",
":",
"label",
"[",
"'appearance'",
"]",
",",
"'description'",
":",
... | Update a Label
:param label: The data to update. Must include keys:
* id (str)
* appearance (dict)
* description (str)
* name (str)
* title (str)
:type label: dict
Example:
.. code-block:: python
Labels().update(
label={
'id': 'd9d4596e-49e4-4135-b3b3-847f9e7c1f43',
'appearance': {'color': '278abe'},
'name': 'My Sandbox',
'description': 'My Sandbox',
'title': 'My Sandbox',
}
)
:return:
:rtype: dict | [
"Update",
"a",
"Label"
] | python | test |
dade-ai/snipy | snipy/basic.py | https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/basic.py#L286-L300 | def interrupt_guard(msg='', reraise=True):
"""
context for guard keyboardinterrupt
ex)
with interrupt_guard('need long time'):
critical_work_to_prevent()
:param str msg: message to print when interrupted
:param reraise: re-raise or not when exit
:return: context
"""
def echo():
print(msg)
return on_interrupt(echo, reraise=reraise) | [
"def",
"interrupt_guard",
"(",
"msg",
"=",
"''",
",",
"reraise",
"=",
"True",
")",
":",
"def",
"echo",
"(",
")",
":",
"print",
"(",
"msg",
")",
"return",
"on_interrupt",
"(",
"echo",
",",
"reraise",
"=",
"reraise",
")"
] | context for guard keyboardinterrupt
ex)
with interrupt_guard('need long time'):
critical_work_to_prevent()
:param str msg: message to print when interrupted
:param reraise: re-raise or not when exit
:return: context | [
"context",
"for",
"guard",
"keyboardinterrupt",
"ex",
")",
"with",
"interrupt_guard",
"(",
"need",
"long",
"time",
")",
":",
"critical_work_to_prevent",
"()"
] | python | valid |
Parsl/parsl | parsl/executors/low_latency/lowlatency_worker.py | https://github.com/Parsl/parsl/blob/d7afb3bc37f50dcf224ae78637944172edb35dac/parsl/executors/low_latency/lowlatency_worker.py#L49-L77 | def start_file_logger(filename, rank, name='parsl', level=logging.DEBUG, format_string=None):
"""Add a stream log handler.
Args:
- filename (string): Name of the file to write logs to
- name (string): Logger name
- level (logging.LEVEL): Set the logging level.
- format_string (string): Set the format string
Returns:
- None
"""
try:
os.makedirs(os.path.dirname(filename), 511, True)
except Exception as e:
print("Caught exception with trying to make log dirs: {}".format(e))
if format_string is None:
format_string = "%(asctime)s %(name)s:%(lineno)d Rank:{0} [%(levelname)s] %(message)s".format(
rank)
global logger
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(filename)
handler.setLevel(level)
formatter = logging.Formatter(format_string, datefmt='%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler) | [
"def",
"start_file_logger",
"(",
"filename",
",",
"rank",
",",
"name",
"=",
"'parsl'",
",",
"level",
"=",
"logging",
".",
"DEBUG",
",",
"format_string",
"=",
"None",
")",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"dirname",
... | Add a stream log handler.
Args:
- filename (string): Name of the file to write logs to
- name (string): Logger name
- level (logging.LEVEL): Set the logging level.
- format_string (string): Set the format string
Returns:
- None | [
"Add",
"a",
"stream",
"log",
"handler",
"."
] | python | valid |
all-umass/graphs | graphs/mixins/analysis.py | https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/mixins/analysis.py#L61-L88 | def directed_laplacian(self, D=None, eta=0.99, tol=1e-12, max_iter=500):
'''Computes the directed combinatorial graph laplacian.
http://www-all.cs.umass.edu/pubs/2007/johns_m_ICML07.pdf
D: (optional) N-array of degrees
eta: probability of not teleporting (see the paper)
tol, max_iter: convergence params for Perron vector calculation
'''
W = self.matrix('dense')
n = W.shape[0]
if D is None:
D = W.sum(axis=1)
# compute probability transition matrix
with np.errstate(invalid='ignore', divide='ignore'):
P = W.astype(float) / D[:,None]
P[D==0] = 0
# start at the uniform distribution Perron vector (phi)
old_phi = np.ones(n) / n
# iterate to the fixed point (teleporting random walk)
for _ in range(max_iter):
phi = eta * old_phi.dot(P) + (1-eta)/n
if np.abs(phi - old_phi).max() < tol:
break
old_phi = phi
else:
warnings.warn("phi failed to converge after %d iterations" % max_iter)
# L = Phi - (Phi P + P' Phi)/2
return np.diag(phi) - ((phi * P.T).T + P.T * phi)/2 | [
"def",
"directed_laplacian",
"(",
"self",
",",
"D",
"=",
"None",
",",
"eta",
"=",
"0.99",
",",
"tol",
"=",
"1e-12",
",",
"max_iter",
"=",
"500",
")",
":",
"W",
"=",
"self",
".",
"matrix",
"(",
"'dense'",
")",
"n",
"=",
"W",
".",
"shape",
"[",
"... | Computes the directed combinatorial graph laplacian.
http://www-all.cs.umass.edu/pubs/2007/johns_m_ICML07.pdf
D: (optional) N-array of degrees
eta: probability of not teleporting (see the paper)
tol, max_iter: convergence params for Perron vector calculation | [
"Computes",
"the",
"directed",
"combinatorial",
"graph",
"laplacian",
".",
"http",
":",
"//",
"www",
"-",
"all",
".",
"cs",
".",
"umass",
".",
"edu",
"/",
"pubs",
"/",
"2007",
"/",
"johns_m_ICML07",
".",
"pdf"
] | python | train |
MacHu-GWU/angora-project | angora/baseclass/classtree.py | https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/baseclass/classtree.py#L221-L248 | def repr_class_data(self, class_data):
"""Create code like this::
class Person(classtree.Base):
def __init__(self, name=None, person_id=None):
self.name = name
self.person_id = person_id
class PersonCollection(classtree.Base):
def __init__(self, collection_id=None, create_date=None, name=None):
self.collection_id = collection_id
self.create_date = create_date
self.name = name
person = Person(name='Jack', person_id=1)
self.name____Jack = person
self.person_id____1 = person
person = Person(name='Paul', person_id=2)
self.name____Paul = person
self.person_id____2 = person
"""
if "subclass" in class_data:
for subclass_data in class_data["subclass"]:
self.repr_class_data(subclass_data)
self.repr_def_class(class_data)
self.repr_setattr(class_data) | [
"def",
"repr_class_data",
"(",
"self",
",",
"class_data",
")",
":",
"if",
"\"subclass\"",
"in",
"class_data",
":",
"for",
"subclass_data",
"in",
"class_data",
"[",
"\"subclass\"",
"]",
":",
"self",
".",
"repr_class_data",
"(",
"subclass_data",
")",
"self",
"."... | Create code like this::
class Person(classtree.Base):
def __init__(self, name=None, person_id=None):
self.name = name
self.person_id = person_id
class PersonCollection(classtree.Base):
def __init__(self, collection_id=None, create_date=None, name=None):
self.collection_id = collection_id
self.create_date = create_date
self.name = name
person = Person(name='Jack', person_id=1)
self.name____Jack = person
self.person_id____1 = person
person = Person(name='Paul', person_id=2)
self.name____Paul = person
self.person_id____2 = person | [
"Create",
"code",
"like",
"this",
"::",
"class",
"Person",
"(",
"classtree",
".",
"Base",
")",
":",
"def",
"__init__",
"(",
"self",
"name",
"=",
"None",
"person_id",
"=",
"None",
")",
":",
"self",
".",
"name",
"=",
"name",
"self",
".",
"person_id",
"... | python | train |
saltstack/salt | salt/modules/dockercompose.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/dockercompose.py#L200-L223 | def __read_docker_compose_file(file_path):
'''
Read the compose file if it exists in the directory
:param file_path:
:return:
'''
if not os.path.isfile(file_path):
return __standardize_result(False,
'Path {} is not present'.format(file_path),
None, None)
try:
with salt.utils.files.fopen(file_path, 'r') as fl:
file_name = os.path.basename(file_path)
result = {file_name: ''}
for line in fl:
result[file_name] += salt.utils.stringutils.to_unicode(line)
except EnvironmentError:
return __standardize_result(False,
'Could not read {0}'.format(file_path),
None, None)
return __standardize_result(True,
'Reading content of {0}'.format(file_path),
result, None) | [
"def",
"__read_docker_compose_file",
"(",
"file_path",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"file_path",
")",
":",
"return",
"__standardize_result",
"(",
"False",
",",
"'Path {} is not present'",
".",
"format",
"(",
"file_path",
")",
"... | Read the compose file if it exists in the directory
:param file_path:
:return: | [
"Read",
"the",
"compose",
"file",
"if",
"it",
"exists",
"in",
"the",
"directory"
] | python | train |
lsbardel/python-stdnet | stdnet/backends/__init__.py | https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/backends/__init__.py#L470-L488 | def parse_backend(backend):
"""Converts the "backend" into the database connection parameters.
It returns a (scheme, host, params) tuple."""
r = urlparse.urlsplit(backend)
scheme, host = r.scheme, r.netloc
path, query = r.path, r.query
if path and not query:
query, path = path, ''
if query:
if query.find('?'):
path = query
else:
query = query[1:]
if query:
params = dict(urlparse.parse_qsl(query))
else:
params = {}
return scheme, host, params | [
"def",
"parse_backend",
"(",
"backend",
")",
":",
"r",
"=",
"urlparse",
".",
"urlsplit",
"(",
"backend",
")",
"scheme",
",",
"host",
"=",
"r",
".",
"scheme",
",",
"r",
".",
"netloc",
"path",
",",
"query",
"=",
"r",
".",
"path",
",",
"r",
".",
"qu... | Converts the "backend" into the database connection parameters.
It returns a (scheme, host, params) tuple. | [
"Converts",
"the",
"backend",
"into",
"the",
"database",
"connection",
"parameters",
".",
"It",
"returns",
"a",
"(",
"scheme",
"host",
"params",
")",
"tuple",
"."
] | python | train |
Azure/azure-cosmos-python | azure/cosmos/cosmos_client.py | https://github.com/Azure/azure-cosmos-python/blob/dd01b3c5d308c6da83cfcaa0ab7083351a476353/azure/cosmos/cosmos_client.py#L1303-L1327 | def CreateUserDefinedFunction(self, collection_link, udf, options=None):
"""Creates a user defined function in a collection.
:param str collection_link:
The link to the collection.
:param str udf:
:param dict options:
The request options for the request.
:return:
The created UDF.
:rtype:
dict
"""
if options is None:
options = {}
collection_id, path, udf = self._GetContainerIdWithPathForUDF(collection_link, udf)
return self.Create(udf,
path,
'udfs',
collection_id,
None,
options) | [
"def",
"CreateUserDefinedFunction",
"(",
"self",
",",
"collection_link",
",",
"udf",
",",
"options",
"=",
"None",
")",
":",
"if",
"options",
"is",
"None",
":",
"options",
"=",
"{",
"}",
"collection_id",
",",
"path",
",",
"udf",
"=",
"self",
".",
"_GetCon... | Creates a user defined function in a collection.
:param str collection_link:
The link to the collection.
:param str udf:
:param dict options:
The request options for the request.
:return:
The created UDF.
:rtype:
dict | [
"Creates",
"a",
"user",
"defined",
"function",
"in",
"a",
"collection",
"."
] | python | train |
libtcod/python-tcod | tcod/libtcodpy.py | https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/libtcodpy.py#L2590-L2634 | def heightmap_add_fbm(
hm: np.ndarray,
noise: tcod.noise.Noise,
mulx: float,
muly: float,
addx: float,
addy: float,
octaves: float,
delta: float,
scale: float,
) -> None:
"""Add FBM noise to the heightmap.
The noise coordinate for each map cell is
`((x + addx) * mulx / width, (y + addy) * muly / height)`.
The value added to the heightmap is `delta + noise * scale`.
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
noise (Noise): A Noise instance.
mulx (float): Scaling of each x coordinate.
muly (float): Scaling of each y coordinate.
addx (float): Translation of each x coordinate.
addy (float): Translation of each y coordinate.
octaves (float): Number of octaves in the FBM sum.
delta (float): The value added to all heightmap cells.
scale (float): The noise value is scaled with this parameter.
.. deprecated:: 8.1
An equivalent array of noise samples can be taken using a method such
as :any:`Noise.sample_ogrid`.
"""
noise = noise.noise_c if noise is not None else ffi.NULL
lib.TCOD_heightmap_add_fbm(
_heightmap_cdata(hm),
noise,
mulx,
muly,
addx,
addy,
octaves,
delta,
scale,
) | [
"def",
"heightmap_add_fbm",
"(",
"hm",
":",
"np",
".",
"ndarray",
",",
"noise",
":",
"tcod",
".",
"noise",
".",
"Noise",
",",
"mulx",
":",
"float",
",",
"muly",
":",
"float",
",",
"addx",
":",
"float",
",",
"addy",
":",
"float",
",",
"octaves",
":"... | Add FBM noise to the heightmap.
The noise coordinate for each map cell is
`((x + addx) * mulx / width, (y + addy) * muly / height)`.
The value added to the heightmap is `delta + noise * scale`.
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
noise (Noise): A Noise instance.
mulx (float): Scaling of each x coordinate.
muly (float): Scaling of each y coordinate.
addx (float): Translation of each x coordinate.
addy (float): Translation of each y coordinate.
octaves (float): Number of octaves in the FBM sum.
delta (float): The value added to all heightmap cells.
scale (float): The noise value is scaled with this parameter.
.. deprecated:: 8.1
An equivalent array of noise samples can be taken using a method such
as :any:`Noise.sample_ogrid`. | [
"Add",
"FBM",
"noise",
"to",
"the",
"heightmap",
"."
] | python | train |
blue-yonder/tsfresh | tsfresh/feature_extraction/settings.py | https://github.com/blue-yonder/tsfresh/blob/c72c9c574371cf7dd7d54e00a466792792e5d202/tsfresh/feature_extraction/settings.py#L24-L82 | def from_columns(columns, columns_to_ignore=None):
"""
Creates a mapping from kind names to fc_parameters objects
(which are itself mappings from feature calculators to settings)
to extract only the features contained in the columns.
To do so, for every feature name in columns this method
1. split the column name into col, feature, params part
2. decide which feature we are dealing with (aggregate with/without params or apply)
3. add it to the new name_to_function dict
4. set up the params
:param columns: containing the feature names
:type columns: list of str
:param columns_to_ignore: columns which do not contain tsfresh feature names
:type columns_to_ignore: list of str
:return: The kind_to_fc_parameters object ready to be used in the extract_features function.
:rtype: dict
"""
kind_to_fc_parameters = {}
if columns_to_ignore is None:
columns_to_ignore = []
for col in columns:
if col in columns_to_ignore:
continue
if not isinstance(col, basestring):
raise TypeError("Column name {} should be a string or unicode".format(col))
# Split according to our separator into <col_name>, <feature_name>, <feature_params>
parts = col.split('__')
n_parts = len(parts)
if n_parts == 1:
raise ValueError("Splitting of columnname {} resulted in only one part.".format(col))
kind = parts[0]
feature_name = parts[1]
if kind not in kind_to_fc_parameters:
kind_to_fc_parameters[kind] = {}
if not hasattr(feature_calculators, feature_name):
raise ValueError("Unknown feature name {}".format(feature_name))
config = get_config_from_string(parts)
if config:
if feature_name in kind_to_fc_parameters[kind]:
kind_to_fc_parameters[kind][feature_name].append(config)
else:
kind_to_fc_parameters[kind][feature_name] = [config]
else:
kind_to_fc_parameters[kind][feature_name] = None
return kind_to_fc_parameters | [
"def",
"from_columns",
"(",
"columns",
",",
"columns_to_ignore",
"=",
"None",
")",
":",
"kind_to_fc_parameters",
"=",
"{",
"}",
"if",
"columns_to_ignore",
"is",
"None",
":",
"columns_to_ignore",
"=",
"[",
"]",
"for",
"col",
"in",
"columns",
":",
"if",
"col",... | Creates a mapping from kind names to fc_parameters objects
(which are itself mappings from feature calculators to settings)
to extract only the features contained in the columns.
To do so, for every feature name in columns this method
1. split the column name into col, feature, params part
2. decide which feature we are dealing with (aggregate with/without params or apply)
3. add it to the new name_to_function dict
4. set up the params
:param columns: containing the feature names
:type columns: list of str
:param columns_to_ignore: columns which do not contain tsfresh feature names
:type columns_to_ignore: list of str
:return: The kind_to_fc_parameters object ready to be used in the extract_features function.
:rtype: dict | [
"Creates",
"a",
"mapping",
"from",
"kind",
"names",
"to",
"fc_parameters",
"objects",
"(",
"which",
"are",
"itself",
"mappings",
"from",
"feature",
"calculators",
"to",
"settings",
")",
"to",
"extract",
"only",
"the",
"features",
"contained",
"in",
"the",
"col... | python | train |
square/pylink | setup.py | https://github.com/square/pylink/blob/81dda0a191d923a8b2627c52cb778aba24d279d7/setup.py#L70-L89 | def run(self):
"""Runs the command.
Args:
self (CleanCommand): the ``CleanCommand`` instance
Returns:
``None``
"""
for build_dir in self.build_dirs:
if os.path.isdir(build_dir):
sys.stdout.write('Removing %s%s' % (build_dir, os.linesep))
shutil.rmtree(build_dir)
for (root, dirs, files) in os.walk(self.cwd):
for name in files:
fullpath = os.path.join(root, name)
if any(fullpath.endswith(ext) for ext in self.build_artifacts):
sys.stdout.write('Removing %s%s' % (fullpath, os.linesep))
os.remove(fullpath) | [
"def",
"run",
"(",
"self",
")",
":",
"for",
"build_dir",
"in",
"self",
".",
"build_dirs",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"build_dir",
")",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"'Removing %s%s'",
"%",
"(",
"build_dir",
",",
... | Runs the command.
Args:
self (CleanCommand): the ``CleanCommand`` instance
Returns:
``None`` | [
"Runs",
"the",
"command",
"."
] | python | train |
KennethWilke/PingdomLib | pingdomlib/pingdom.py | https://github.com/KennethWilke/PingdomLib/blob/3ed1e481f9c9d16b032558d62fb05c2166e162ed/pingdomlib/pingdom.py#L191-L219 | def getChecks(self, **parameters):
"""Pulls all checks from pingdom
Optional Parameters:
* limit -- Limits the number of returned probes to the
specified quantity.
Type: Integer (max 25000)
Default: 25000
* offset -- Offset for listing (requires limit.)
Type: Integer
Default: 0
* tags -- Filter listing by tag/s
Type: String
Default: None
"""
# Warn user about unhandled parameters
for key in parameters:
if key not in ['limit', 'offset', 'tags']:
sys.stderr.write('%s not a valid argument for getChecks()\n'
% key)
response = self.request('GET', 'checks', parameters)
return [PingdomCheck(self, x) for x in response.json()['checks']] | [
"def",
"getChecks",
"(",
"self",
",",
"*",
"*",
"parameters",
")",
":",
"# Warn user about unhandled parameters",
"for",
"key",
"in",
"parameters",
":",
"if",
"key",
"not",
"in",
"[",
"'limit'",
",",
"'offset'",
",",
"'tags'",
"]",
":",
"sys",
".",
"stderr... | Pulls all checks from pingdom
Optional Parameters:
* limit -- Limits the number of returned probes to the
specified quantity.
Type: Integer (max 25000)
Default: 25000
* offset -- Offset for listing (requires limit.)
Type: Integer
Default: 0
* tags -- Filter listing by tag/s
Type: String
Default: None | [
"Pulls",
"all",
"checks",
"from",
"pingdom"
] | python | train |
iotile/coretools | iotilesensorgraph/iotile/sg/sensor_log.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilesensorgraph/iotile/sg/sensor_log.py#L244-L255 | def destroy_walker(self, walker):
"""Destroy a previously created stream walker.
Args:
walker (StreamWalker): The walker to remove from internal updating
lists.
"""
if walker.buffered:
self._queue_walkers.remove(walker)
else:
self._virtual_walkers.remove(walker) | [
"def",
"destroy_walker",
"(",
"self",
",",
"walker",
")",
":",
"if",
"walker",
".",
"buffered",
":",
"self",
".",
"_queue_walkers",
".",
"remove",
"(",
"walker",
")",
"else",
":",
"self",
".",
"_virtual_walkers",
".",
"remove",
"(",
"walker",
")"
] | Destroy a previously created stream walker.
Args:
walker (StreamWalker): The walker to remove from internal updating
lists. | [
"Destroy",
"a",
"previously",
"created",
"stream",
"walker",
"."
] | python | train |
shoebot/shoebot | lib/database/__init__.py | https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/database/__init__.py#L120-L137 | def create_index(self, table, field, unique=False, ascending=True):
"""Creates a table index.
Creates an index on the given table,
on the given field with unique values enforced or not,
in ascending or descending order.
"""
if unique: u = "unique "
else: u = ""
if ascending: a = "asc"
else: a = "desc"
sql = "create "+u+"index index_"+table+"_"+field+" "
sql += "on "+table+"("+field+" "+a+")"
self._cur.execute(sql)
self._con.commit() | [
"def",
"create_index",
"(",
"self",
",",
"table",
",",
"field",
",",
"unique",
"=",
"False",
",",
"ascending",
"=",
"True",
")",
":",
"if",
"unique",
":",
"u",
"=",
"\"unique \"",
"else",
":",
"u",
"=",
"\"\"",
"if",
"ascending",
":",
"a",
"=",
"\"... | Creates a table index.
Creates an index on the given table,
on the given field with unique values enforced or not,
in ascending or descending order. | [
"Creates",
"a",
"table",
"index",
".",
"Creates",
"an",
"index",
"on",
"the",
"given",
"table",
"on",
"the",
"given",
"field",
"with",
"unique",
"values",
"enforced",
"or",
"not",
"in",
"ascending",
"or",
"descending",
"order",
"."
] | python | valid |
jaraco/jaraco.postgres | jaraco/postgres/__init__.py | https://github.com/jaraco/jaraco.postgres/blob/57375043314a3ce821ac3b0372ba2465135daa95/jaraco/postgres/__init__.py#L506-L555 | def start(self):
"""Launch this postgres server. If it's already running, do nothing.
If the backing storage directory isn't configured, raise
NotInitializedError.
This method is optional. If you're running in an environment
where the DBMS is provided as part of the basic infrastructure,
you probably want to skip this step!
"""
log.info('Starting PostgreSQL at %s:%s', self.host, self.port)
if not self.base_pathname:
tmpl = ('Invalid base_pathname: %r. Did you forget to call '
'.initdb()?')
raise NotInitializedError(tmpl % self.base_pathname)
conf_file = os.path.join(self.base_pathname, 'postgresql.conf')
if not os.path.exists(conf_file):
tmpl = 'No config file at: %r. Did you forget to call .initdb()?'
raise NotInitializedError(tmpl % self.base_pathname)
if not self.is_running():
version = self.get_version()
if version and version >= (9, 3):
socketop = 'unix_socket_directories'
else:
socketop = 'unix_socket_directory'
postgres_options = [
# When running not as root, postgres might try to put files
# where they're not writable (see
# https://paste.yougov.net/YKdgi). So set the socket_dir.
'-c', '{}={}'.format(socketop, self.base_pathname),
'-h', self.host,
'-i', # enable TCP/IP connections
'-p', self.port,
]
subprocess.check_call([
PostgresFinder.find_root() / 'pg_ctl',
'start',
'-D', self.base_pathname,
'-l', os.path.join(self.base_pathname, 'postgresql.log'),
'-o', subprocess.list2cmdline(postgres_options),
])
# Postgres may launch, then abort if it's unhappy with some parameter.
# This post-launch test helps us decide.
if not self.is_running():
tmpl = ('%s aborted immediately after launch, check '
'postgresql.log in storage dir')
raise RuntimeError(tmpl % self) | [
"def",
"start",
"(",
"self",
")",
":",
"log",
".",
"info",
"(",
"'Starting PostgreSQL at %s:%s'",
",",
"self",
".",
"host",
",",
"self",
".",
"port",
")",
"if",
"not",
"self",
".",
"base_pathname",
":",
"tmpl",
"=",
"(",
"'Invalid base_pathname: %r. Did you... | Launch this postgres server. If it's already running, do nothing.
If the backing storage directory isn't configured, raise
NotInitializedError.
This method is optional. If you're running in an environment
where the DBMS is provided as part of the basic infrastructure,
you probably want to skip this step! | [
"Launch",
"this",
"postgres",
"server",
".",
"If",
"it",
"s",
"already",
"running",
"do",
"nothing",
"."
] | python | train |
mdickinson/bigfloat | bigfloat/core.py | https://github.com/mdickinson/bigfloat/blob/e5fdd1048615191ed32a2b7460e14b3b3ff24662/bigfloat/core.py#L2000-L2010 | def lngamma(x, context=None):
"""
Return the value of the logarithm of the Gamma function of x.
"""
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_lngamma,
(BigFloat._implicit_convert(x),),
context,
) | [
"def",
"lngamma",
"(",
"x",
",",
"context",
"=",
"None",
")",
":",
"return",
"_apply_function_in_current_context",
"(",
"BigFloat",
",",
"mpfr",
".",
"mpfr_lngamma",
",",
"(",
"BigFloat",
".",
"_implicit_convert",
"(",
"x",
")",
",",
")",
",",
"context",
"... | Return the value of the logarithm of the Gamma function of x. | [
"Return",
"the",
"value",
"of",
"the",
"logarithm",
"of",
"the",
"Gamma",
"function",
"of",
"x",
"."
] | python | train |
apple/turicreate | src/unity/python/turicreate/meta/bytecodetools/bytecode_consumer.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/meta/bytecodetools/bytecode_consumer.py#L25-L39 | def consume(self):
'''
Consume byte-code
'''
generic_consume = getattr(self, 'generic_consume', None)
for instr in disassembler(self.code):
method_name = 'consume_%s' % (instr.opname)
method = getattr(self, method_name, generic_consume)
if not method:
raise AttributeError("class %r has no method %r" % (type(self).__name__, method_name))
self.instruction_pre(instr)
method(instr)
self.instruction_post(instr) | [
"def",
"consume",
"(",
"self",
")",
":",
"generic_consume",
"=",
"getattr",
"(",
"self",
",",
"'generic_consume'",
",",
"None",
")",
"for",
"instr",
"in",
"disassembler",
"(",
"self",
".",
"code",
")",
":",
"method_name",
"=",
"'consume_%s'",
"%",
"(",
"... | Consume byte-code | [
"Consume",
"byte",
"-",
"code"
] | python | train |
chinapnr/fishbase | fishbase/fish_file.py | https://github.com/chinapnr/fishbase/blob/23c5147a6bc0d8ed36409e55352ffb2c5b0edc82/fishbase/fish_file.py#L94-L140 | def check_sub_path_create(sub_path):
"""
检查当前路径下的某个子路径是否存在, 不存在则创建;
:param:
* sub_path: (string) 下一级的某路径名称
:return:
* 返回类型 (tuple),有两个值
* True: 路径存在,False: 不需要创建
* False: 路径不存在,True: 创建成功
举例如下::
print('--- check_sub_path_create demo ---')
# 定义子路径名称
sub_path = 'demo_sub_dir'
# 检查当前路径下的一个子路径是否存在,不存在则创建
print('check sub path:', sub_path)
result = check_sub_path_create(sub_path)
print(result)
print('---')
输出结果::
--- check_sub_path_create demo ---
check sub path: demo_sub_dir
(True, False)
---
"""
# 获得当前路径
temp_path = pathlib.Path()
cur_path = temp_path.resolve()
# 生成 带有 sub_path_name 的路径
path = cur_path / pathlib.Path(sub_path)
# 判断是否存在带有 sub_path 路径
if path.exists():
# 返回 True: 路径存在, False: 不需要创建
return True, False
else:
path.mkdir(parents=True)
# 返回 False: 路径不存在 True: 路径已经创建
return False, True | [
"def",
"check_sub_path_create",
"(",
"sub_path",
")",
":",
"# 获得当前路径",
"temp_path",
"=",
"pathlib",
".",
"Path",
"(",
")",
"cur_path",
"=",
"temp_path",
".",
"resolve",
"(",
")",
"# 生成 带有 sub_path_name 的路径",
"path",
"=",
"cur_path",
"/",
"pathlib",
".",
"Path"... | 检查当前路径下的某个子路径是否存在, 不存在则创建;
:param:
* sub_path: (string) 下一级的某路径名称
:return:
* 返回类型 (tuple),有两个值
* True: 路径存在,False: 不需要创建
* False: 路径不存在,True: 创建成功
举例如下::
print('--- check_sub_path_create demo ---')
# 定义子路径名称
sub_path = 'demo_sub_dir'
# 检查当前路径下的一个子路径是否存在,不存在则创建
print('check sub path:', sub_path)
result = check_sub_path_create(sub_path)
print(result)
print('---')
输出结果::
--- check_sub_path_create demo ---
check sub path: demo_sub_dir
(True, False)
--- | [
"检查当前路径下的某个子路径是否存在",
"不存在则创建;"
] | python | train |
angr/angr | angr/state_plugins/heap/heap_freelist.py | https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/state_plugins/heap/heap_freelist.py#L73-L78 | def fwd_chunk(self):
"""
Returns the chunk following this chunk in the list of free chunks.
"""
raise NotImplementedError("%s not implemented for %s" % (self.fwd_chunk.__func__.__name__,
self.__class__.__name__)) | [
"def",
"fwd_chunk",
"(",
"self",
")",
":",
"raise",
"NotImplementedError",
"(",
"\"%s not implemented for %s\"",
"%",
"(",
"self",
".",
"fwd_chunk",
".",
"__func__",
".",
"__name__",
",",
"self",
".",
"__class__",
".",
"__name__",
")",
")"
] | Returns the chunk following this chunk in the list of free chunks. | [
"Returns",
"the",
"chunk",
"following",
"this",
"chunk",
"in",
"the",
"list",
"of",
"free",
"chunks",
"."
] | python | train |
gwastro/pycbc-glue | pycbc_glue/pipeline.py | https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/pipeline.py#L3214-L3250 | def make_optimised_chunks(self, min_length, max_length, pad_data=0):
"""
Splits ScienceSegments up into chunks, of a given maximum length.
The length of the last two chunks are chosen so that the data
utilisation is optimised.
@param min_length: minimum chunk length.
@param max_length: maximum chunk length.
@param pad_data: exclude the first and last pad_data seconds of the
segment when generating chunks
"""
for seg in self.__sci_segs:
# pad data if requested
seg_start = seg.start() + pad_data
seg_end = seg.end() - pad_data
if seg.unused() > max_length:
# get number of max_length chunks
N = (seg_end - seg_start)/max_length
# split into chunks of max_length
for i in range(N-1):
start = seg_start + (i * max_length)
stop = start + max_length
seg.add_chunk(start, stop)
# optimise data usage for last 2 chunks
start = seg_start + ((N-1) * max_length)
middle = (start + seg_end)/2
seg.add_chunk(start, middle)
seg.add_chunk(middle, seg_end)
seg.set_unused(0)
elif seg.unused() > min_length:
# utilise as single chunk
seg.add_chunk(seg_start, seg_end)
else:
# no chunk of usable length
seg.set_unused(0) | [
"def",
"make_optimised_chunks",
"(",
"self",
",",
"min_length",
",",
"max_length",
",",
"pad_data",
"=",
"0",
")",
":",
"for",
"seg",
"in",
"self",
".",
"__sci_segs",
":",
"# pad data if requested",
"seg_start",
"=",
"seg",
".",
"start",
"(",
")",
"+",
"pa... | Splits ScienceSegments up into chunks, of a given maximum length.
The length of the last two chunks are chosen so that the data
utilisation is optimised.
@param min_length: minimum chunk length.
@param max_length: maximum chunk length.
@param pad_data: exclude the first and last pad_data seconds of the
segment when generating chunks | [
"Splits",
"ScienceSegments",
"up",
"into",
"chunks",
"of",
"a",
"given",
"maximum",
"length",
".",
"The",
"length",
"of",
"the",
"last",
"two",
"chunks",
"are",
"chosen",
"so",
"that",
"the",
"data",
"utilisation",
"is",
"optimised",
"."
] | python | train |
schlamar/latexmk.py | latexmake.py | https://github.com/schlamar/latexmk.py/blob/88baba40ff3e844e4542de60d2032503e206d996/latexmake.py#L336-L361 | def open_preview(self):
'''
Try to open a preview of the generated document.
Currently only supported on Windows.
'''
self.log.info('Opening preview...')
if self.opt.pdf:
ext = 'pdf'
else:
ext = 'dvi'
filename = '%s.%s' % (self.project_name, ext)
if sys.platform == 'win32':
try:
os.startfile(filename)
except OSError:
self.log.error(
'Preview-Error: Extension .%s is not linked to a '
'specific application!' % ext
)
elif sys.platform == 'darwin':
call(['open', filename])
else:
self.log.error(
'Preview-Error: Preview function is currently not '
'supported on Linux.'
) | [
"def",
"open_preview",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"'Opening preview...'",
")",
"if",
"self",
".",
"opt",
".",
"pdf",
":",
"ext",
"=",
"'pdf'",
"else",
":",
"ext",
"=",
"'dvi'",
"filename",
"=",
"'%s.%s'",
"%",
"(",
... | Try to open a preview of the generated document.
Currently only supported on Windows. | [
"Try",
"to",
"open",
"a",
"preview",
"of",
"the",
"generated",
"document",
".",
"Currently",
"only",
"supported",
"on",
"Windows",
"."
] | python | train |
senaite/senaite.core | bika/lims/api/__init__.py | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/api/__init__.py#L865-L880 | def get_transitions_for(brain_or_object):
"""List available workflow transitions for all workflows
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: All possible available and allowed transitions
:rtype: list[dict]
"""
workflow = get_tool('portal_workflow')
transitions = []
instance = get_object(brain_or_object)
for wfid in get_workflows_for(brain_or_object):
wf = workflow[wfid]
tlist = wf.getTransitionsFor(instance)
transitions.extend([t for t in tlist if t not in transitions])
return transitions | [
"def",
"get_transitions_for",
"(",
"brain_or_object",
")",
":",
"workflow",
"=",
"get_tool",
"(",
"'portal_workflow'",
")",
"transitions",
"=",
"[",
"]",
"instance",
"=",
"get_object",
"(",
"brain_or_object",
")",
"for",
"wfid",
"in",
"get_workflows_for",
"(",
"... | List available workflow transitions for all workflows
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: All possible available and allowed transitions
:rtype: list[dict] | [
"List",
"available",
"workflow",
"transitions",
"for",
"all",
"workflows"
] | python | train |
JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v20/ardupilotmega.py | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v20/ardupilotmega.py#L10072-L10089 | def simstate_encode(self, roll, pitch, yaw, xacc, yacc, zacc, xgyro, ygyro, zgyro, lat, lng):
'''
Status of simulation environment, if used
roll : Roll angle (rad) (float)
pitch : Pitch angle (rad) (float)
yaw : Yaw angle (rad) (float)
xacc : X acceleration m/s/s (float)
yacc : Y acceleration m/s/s (float)
zacc : Z acceleration m/s/s (float)
xgyro : Angular speed around X axis rad/s (float)
ygyro : Angular speed around Y axis rad/s (float)
zgyro : Angular speed around Z axis rad/s (float)
lat : Latitude in degrees * 1E7 (int32_t)
lng : Longitude in degrees * 1E7 (int32_t)
'''
return MAVLink_simstate_message(roll, pitch, yaw, xacc, yacc, zacc, xgyro, ygyro, zgyro, lat, lng) | [
"def",
"simstate_encode",
"(",
"self",
",",
"roll",
",",
"pitch",
",",
"yaw",
",",
"xacc",
",",
"yacc",
",",
"zacc",
",",
"xgyro",
",",
"ygyro",
",",
"zgyro",
",",
"lat",
",",
"lng",
")",
":",
"return",
"MAVLink_simstate_message",
"(",
"roll",
",",
"... | Status of simulation environment, if used
roll : Roll angle (rad) (float)
pitch : Pitch angle (rad) (float)
yaw : Yaw angle (rad) (float)
xacc : X acceleration m/s/s (float)
yacc : Y acceleration m/s/s (float)
zacc : Z acceleration m/s/s (float)
xgyro : Angular speed around X axis rad/s (float)
ygyro : Angular speed around Y axis rad/s (float)
zgyro : Angular speed around Z axis rad/s (float)
lat : Latitude in degrees * 1E7 (int32_t)
lng : Longitude in degrees * 1E7 (int32_t) | [
"Status",
"of",
"simulation",
"environment",
"if",
"used"
] | python | train |
BerkeleyAutomation/autolab_core | autolab_core/random_variables.py | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/random_variables.py#L221-L249 | def sample(self, size=1):
""" Sample rigid transform random variables.
Parameters
----------
size : int
number of sample to take
Returns
-------
:obj:`list` of :obj:`RigidTransform`
sampled rigid transformations
"""
samples = []
for i in range(size):
# sample random pose
xi = self._r_xi_rv.rvs(size=1)
S_xi = skew(xi)
R_sample = scipy.linalg.expm(S_xi)
t_sample = self._t_rv.rvs(size=1)
samples.append(RigidTransform(rotation=R_sample,
translation=t_sample,
from_frame=self._from_frame,
to_frame=self._to_frame))
# not a list if only 1 sample
if size == 1 and len(samples) > 0:
return samples[0]
return samples | [
"def",
"sample",
"(",
"self",
",",
"size",
"=",
"1",
")",
":",
"samples",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"size",
")",
":",
"# sample random pose",
"xi",
"=",
"self",
".",
"_r_xi_rv",
".",
"rvs",
"(",
"size",
"=",
"1",
")",
"S_xi",... | Sample rigid transform random variables.
Parameters
----------
size : int
number of sample to take
Returns
-------
:obj:`list` of :obj:`RigidTransform`
sampled rigid transformations | [
"Sample",
"rigid",
"transform",
"random",
"variables",
"."
] | python | train |
gwastro/pycbc | pycbc/workflow/pegasus_workflow.py | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/workflow/pegasus_workflow.py#L313-L361 | def add_node(self, node):
""" Add a node to this workflow
This function adds nodes to the workflow. It also determines
parent/child relations from the DataStorage inputs to this job.
Parameters
----------
node : pycbc.workflow.pegasus_workflow.Node
A node that should be executed as part of this workflow.
"""
node._finalize()
node.in_workflow = self
self._adag.addJob(node._dax_node)
# Determine the parent child relationships based on the inputs that
# this node requires.
added_nodes = []
for inp in node._inputs:
if inp.node is not None and inp.node.in_workflow == self:
if inp.node not in added_nodes:
parent = inp.node._dax_node
child = node._dax_node
dep = dax.Dependency(parent=parent, child=child)
self._adag.addDependency(dep)
added_nodes.append(inp.node)
elif inp.node is not None and not inp.node.in_workflow:
raise ValueError('Parents of this node must be added to the '
'workflow first.')
elif inp.node is None and not inp.workflow_input:
self._inputs += [inp]
inp.workflow_input = True
elif inp.node is not None and inp.node.in_workflow != self and inp not in self._inputs:
self._inputs += [inp]
self._external_workflow_inputs += [inp]
# Record the outputs that this node generates
self._outputs += node._outputs
# Record the executable that this node uses
if not node.executable.in_workflow:
node.executable.in_workflow = True
self._executables += [node.executable]
return self | [
"def",
"add_node",
"(",
"self",
",",
"node",
")",
":",
"node",
".",
"_finalize",
"(",
")",
"node",
".",
"in_workflow",
"=",
"self",
"self",
".",
"_adag",
".",
"addJob",
"(",
"node",
".",
"_dax_node",
")",
"# Determine the parent child relationships based on th... | Add a node to this workflow
This function adds nodes to the workflow. It also determines
parent/child relations from the DataStorage inputs to this job.
Parameters
----------
node : pycbc.workflow.pegasus_workflow.Node
A node that should be executed as part of this workflow. | [
"Add",
"a",
"node",
"to",
"this",
"workflow"
] | python | train |
Erotemic/utool | utool/util_path.py | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L1378-L1404 | def get_modname_from_modpath(module_fpath):
"""
returns importable name from file path
get_modname_from_modpath
Args:
module_fpath (str): module filepath
Returns:
str: modname
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> module_fpath = ut.util_path.__file__
>>> modname = ut.get_modname_from_modpath(module_fpath)
>>> result = modname
>>> print(result)
utool.util_path
"""
modsubdir_list = get_module_subdir_list(module_fpath)
modname = '.'.join(modsubdir_list)
modname = modname.replace('.__init__', '').strip()
modname = modname.replace('.__main__', '').strip()
return modname | [
"def",
"get_modname_from_modpath",
"(",
"module_fpath",
")",
":",
"modsubdir_list",
"=",
"get_module_subdir_list",
"(",
"module_fpath",
")",
"modname",
"=",
"'.'",
".",
"join",
"(",
"modsubdir_list",
")",
"modname",
"=",
"modname",
".",
"replace",
"(",
"'.__init__... | returns importable name from file path
get_modname_from_modpath
Args:
module_fpath (str): module filepath
Returns:
str: modname
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> module_fpath = ut.util_path.__file__
>>> modname = ut.get_modname_from_modpath(module_fpath)
>>> result = modname
>>> print(result)
utool.util_path | [
"returns",
"importable",
"name",
"from",
"file",
"path"
] | python | train |
mcs07/MolVS | molvs/standardize.py | https://github.com/mcs07/MolVS/blob/d815fe52d160abcecbcbf117e6437bf727dbd8ad/molvs/standardize.py#L306-L317 | def enumerate_tautomers_smiles(smiles):
"""Return a set of tautomers as SMILES strings, given a SMILES string.
:param smiles: A SMILES string.
:returns: A set containing SMILES strings for every possible tautomer.
:rtype: set of strings.
"""
# Skip sanitize as standardize does this anyway
mol = Chem.MolFromSmiles(smiles, sanitize=False)
mol = Standardizer().standardize(mol)
tautomers = TautomerEnumerator().enumerate(mol)
return {Chem.MolToSmiles(m, isomericSmiles=True) for m in tautomers} | [
"def",
"enumerate_tautomers_smiles",
"(",
"smiles",
")",
":",
"# Skip sanitize as standardize does this anyway",
"mol",
"=",
"Chem",
".",
"MolFromSmiles",
"(",
"smiles",
",",
"sanitize",
"=",
"False",
")",
"mol",
"=",
"Standardizer",
"(",
")",
".",
"standardize",
... | Return a set of tautomers as SMILES strings, given a SMILES string.
:param smiles: A SMILES string.
:returns: A set containing SMILES strings for every possible tautomer.
:rtype: set of strings. | [
"Return",
"a",
"set",
"of",
"tautomers",
"as",
"SMILES",
"strings",
"given",
"a",
"SMILES",
"string",
"."
] | python | test |
Clinical-Genomics/scout | scout/parse/case.py | https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/parse/case.py#L350-L377 | def parse_ped(ped_stream, family_type='ped'):
"""Parse out minimal family information from a PED file.
Args:
ped_stream(iterable(str))
family_type(str): Format of the pedigree information
Returns:
family_id(str), samples(list[dict])
"""
pedigree = FamilyParser(ped_stream, family_type=family_type)
if len(pedigree.families) != 1:
raise PedigreeError("Only one case per ped file is allowed")
family_id = list(pedigree.families.keys())[0]
family = pedigree.families[family_id]
samples = [{
'sample_id': ind_id,
'father': individual.father,
'mother': individual.mother,
# Convert sex to human readable
'sex': SEX_MAP[individual.sex],
'phenotype': PHENOTYPE_MAP[int(individual.phenotype)],
} for ind_id, individual in family.individuals.items()]
return family_id, samples | [
"def",
"parse_ped",
"(",
"ped_stream",
",",
"family_type",
"=",
"'ped'",
")",
":",
"pedigree",
"=",
"FamilyParser",
"(",
"ped_stream",
",",
"family_type",
"=",
"family_type",
")",
"if",
"len",
"(",
"pedigree",
".",
"families",
")",
"!=",
"1",
":",
"raise",... | Parse out minimal family information from a PED file.
Args:
ped_stream(iterable(str))
family_type(str): Format of the pedigree information
Returns:
family_id(str), samples(list[dict]) | [
"Parse",
"out",
"minimal",
"family",
"information",
"from",
"a",
"PED",
"file",
"."
] | python | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.