repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
LCAV/pylocus
pylocus/edm_completion.py
https://github.com/LCAV/pylocus/blob/c56a38c251d8a435caf4641a8ae6027ecba2c8c6/pylocus/edm_completion.py#L144-L159
def completion_acd(edm, X0, W=None, tol=1e-6, sweeps=3): """ Complete an denoise EDM using alternating decent. The idea here is to simply run reconstruct_acd for a few iterations, yieding a position estimate, which can in turn be used to get a completed and denoised edm. :param edm: noisy matrix (NxN) :param X0: starting points (Nxd) :param W: optional weight matrix. :param tol: Stopping criterion of iterative algorithm. :param sweeps: Maximum number of sweeps. """ from .algorithms import reconstruct_acd Xhat, costs = reconstruct_acd(edm, X0, W, tol=tol, sweeps=sweeps) return get_edm(Xhat)
[ "def", "completion_acd", "(", "edm", ",", "X0", ",", "W", "=", "None", ",", "tol", "=", "1e-6", ",", "sweeps", "=", "3", ")", ":", "from", ".", "algorithms", "import", "reconstruct_acd", "Xhat", ",", "costs", "=", "reconstruct_acd", "(", "edm", ",", ...
Complete an denoise EDM using alternating decent. The idea here is to simply run reconstruct_acd for a few iterations, yieding a position estimate, which can in turn be used to get a completed and denoised edm. :param edm: noisy matrix (NxN) :param X0: starting points (Nxd) :param W: optional weight matrix. :param tol: Stopping criterion of iterative algorithm. :param sweeps: Maximum number of sweeps.
[ "Complete", "an", "denoise", "EDM", "using", "alternating", "decent", "." ]
python
train
saltstack/salt
salt/runners/vault.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/runners/vault.py#L28-L96
def generate_token(minion_id, signature, impersonated_by_master=False): ''' Generate a Vault token for minion minion_id minion_id The id of the minion that requests a token signature Cryptographic signature which validates that the request is indeed sent by the minion (or the master, see impersonated_by_master). impersonated_by_master If the master needs to create a token on behalf of the minion, this is True. This happens when the master generates minion pillars. ''' log.debug( 'Token generation request for %s (impersonated by master: %s)', minion_id, impersonated_by_master ) _validate_signature(minion_id, signature, impersonated_by_master) try: config = __opts__['vault'] verify = config.get('verify', None) if config['auth']['method'] == 'approle': if _selftoken_expired(): log.debug('Vault token expired. Recreating one') # Requesting a short ttl token url = '{0}/v1/auth/approle/login'.format(config['url']) payload = {'role_id': config['auth']['role_id']} if 'secret_id' in config['auth']: payload['secret_id'] = config['auth']['secret_id'] response = requests.post(url, json=payload, verify=verify) if response.status_code != 200: return {'error': response.reason} config['auth']['token'] = response.json()['auth']['client_token'] url = _get_token_create_url(config) headers = {'X-Vault-Token': config['auth']['token']} audit_data = { 'saltstack-jid': globals().get('__jid__', '<no jid set>'), 'saltstack-minion': minion_id, 'saltstack-user': globals().get('__user__', '<no user set>') } payload = { 'policies': _get_policies(minion_id, config), 'num_uses': 1, 'meta': audit_data } if payload['policies'] == []: return {'error': 'No policies matched minion'} log.trace('Sending token creation request to Vault') response = requests.post(url, headers=headers, json=payload, verify=verify) if response.status_code != 200: return {'error': response.reason} auth_data = response.json()['auth'] return { 'token': auth_data['client_token'], 'url': config['url'], 'verify': verify, } except Exception as e: return {'error': six.text_type(e)}
[ "def", "generate_token", "(", "minion_id", ",", "signature", ",", "impersonated_by_master", "=", "False", ")", ":", "log", ".", "debug", "(", "'Token generation request for %s (impersonated by master: %s)'", ",", "minion_id", ",", "impersonated_by_master", ")", "_validate...
Generate a Vault token for minion minion_id minion_id The id of the minion that requests a token signature Cryptographic signature which validates that the request is indeed sent by the minion (or the master, see impersonated_by_master). impersonated_by_master If the master needs to create a token on behalf of the minion, this is True. This happens when the master generates minion pillars.
[ "Generate", "a", "Vault", "token", "for", "minion", "minion_id" ]
python
train
pypa/pipenv
pipenv/patched/notpip/_vendor/webencodings/__init__.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_vendor/webencodings/__init__.py#L61-L88
def lookup(label): """ Look for an encoding by its label. This is the spec’s `get an encoding <http://encoding.spec.whatwg.org/#concept-encoding-get>`_ algorithm. Supported labels are listed there. :param label: A string. :returns: An :class:`Encoding` object, or :obj:`None` for an unknown label. """ # Only strip ASCII whitespace: U+0009, U+000A, U+000C, U+000D, and U+0020. label = ascii_lower(label.strip('\t\n\f\r ')) name = LABELS.get(label) if name is None: return None encoding = CACHE.get(name) if encoding is None: if name == 'x-user-defined': from .x_user_defined import codec_info else: python_name = PYTHON_NAMES.get(name, name) # Any python_name value that gets to here should be valid. codec_info = codecs.lookup(python_name) encoding = Encoding(name, codec_info) CACHE[name] = encoding return encoding
[ "def", "lookup", "(", "label", ")", ":", "# Only strip ASCII whitespace: U+0009, U+000A, U+000C, U+000D, and U+0020.", "label", "=", "ascii_lower", "(", "label", ".", "strip", "(", "'\\t\\n\\f\\r '", ")", ")", "name", "=", "LABELS", ".", "get", "(", "label", ")", ...
Look for an encoding by its label. This is the spec’s `get an encoding <http://encoding.spec.whatwg.org/#concept-encoding-get>`_ algorithm. Supported labels are listed there. :param label: A string. :returns: An :class:`Encoding` object, or :obj:`None` for an unknown label.
[ "Look", "for", "an", "encoding", "by", "its", "label", ".", "This", "is", "the", "spec’s", "get", "an", "encoding", "<http", ":", "//", "encoding", ".", "spec", ".", "whatwg", ".", "org", "/", "#concept", "-", "encoding", "-", "get", ">", "_", "algor...
python
train
horazont/aioxmpp
aioxmpp/roster/service.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/roster/service.py#L697-L719
def approve(self, peer_jid): """ (Pre-)approve a subscription request from `peer_jid`. :param peer_jid: The peer to (pre-)approve. This sends a ``"subscribed"`` presence to the peer; if the peer has previously asked for a subscription, this will seal the deal and create the subscription. If the peer has not requested a subscription (yet), it is marked as pre-approved by the server. A future subscription request by the peer will then be confirmed by the server automatically. .. note:: Pre-approval is an OPTIONAL feature in :rfc:`6121`. It is announced as a stream feature. """ self.client.enqueue( stanza.Presence(type_=structs.PresenceType.SUBSCRIBED, to=peer_jid) )
[ "def", "approve", "(", "self", ",", "peer_jid", ")", ":", "self", ".", "client", ".", "enqueue", "(", "stanza", ".", "Presence", "(", "type_", "=", "structs", ".", "PresenceType", ".", "SUBSCRIBED", ",", "to", "=", "peer_jid", ")", ")" ]
(Pre-)approve a subscription request from `peer_jid`. :param peer_jid: The peer to (pre-)approve. This sends a ``"subscribed"`` presence to the peer; if the peer has previously asked for a subscription, this will seal the deal and create the subscription. If the peer has not requested a subscription (yet), it is marked as pre-approved by the server. A future subscription request by the peer will then be confirmed by the server automatically. .. note:: Pre-approval is an OPTIONAL feature in :rfc:`6121`. It is announced as a stream feature.
[ "(", "Pre", "-", ")", "approve", "a", "subscription", "request", "from", "peer_jid", "." ]
python
train
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L477-L483
def duty_cycle(self): """ Returns the current duty cycle of the motor. Units are percent. Values are -100 to 100. """ self._duty_cycle, value = self.get_attr_int(self._duty_cycle, 'duty_cycle') return value
[ "def", "duty_cycle", "(", "self", ")", ":", "self", ".", "_duty_cycle", ",", "value", "=", "self", ".", "get_attr_int", "(", "self", ".", "_duty_cycle", ",", "'duty_cycle'", ")", "return", "value" ]
Returns the current duty cycle of the motor. Units are percent. Values are -100 to 100.
[ "Returns", "the", "current", "duty", "cycle", "of", "the", "motor", ".", "Units", "are", "percent", ".", "Values", "are", "-", "100", "to", "100", "." ]
python
train
jamieleshaw/lurklib
lurklib/squeries.py
https://github.com/jamieleshaw/lurklib/blob/a861f35d880140422103dd78ec3239814e85fd7e/lurklib/squeries.py#L212-L233
def admin(self, server=None): """ Get the admin information. Optional arguments: * server=None - Get admin information for - server instead of the current server. """ with self.lock: if not server: self.send('ADMIN') else: self.send('ADMIN %s' % server) rvalue = [] while self.readable(): admin_ncodes = '257', '258', '259' msg = self._recv(expected_replies=('256',) + admin_ncodes) if msg[0] == '256': pass elif msg[0] in admin_ncodes: rvalue.append(' '.join(msg[2:])[1:]) return rvalue
[ "def", "admin", "(", "self", ",", "server", "=", "None", ")", ":", "with", "self", ".", "lock", ":", "if", "not", "server", ":", "self", ".", "send", "(", "'ADMIN'", ")", "else", ":", "self", ".", "send", "(", "'ADMIN %s'", "%", "server", ")", "r...
Get the admin information. Optional arguments: * server=None - Get admin information for - server instead of the current server.
[ "Get", "the", "admin", "information", ".", "Optional", "arguments", ":", "*", "server", "=", "None", "-", "Get", "admin", "information", "for", "-", "server", "instead", "of", "the", "current", "server", "." ]
python
train
openstack/python-monascaclient
monascaclient/v2_0/shell.py
https://github.com/openstack/python-monascaclient/blob/03b07534145928eb2debad938da033c232dda105/monascaclient/v2_0/shell.py#L1405-L1424
def do_alarm_history_list(mc, args): '''List alarms state history.''' fields = {} if args.dimensions: fields['dimensions'] = utils.format_parameters(args.dimensions) if args.starttime: _translate_starttime(args) fields['start_time'] = args.starttime if args.endtime: fields['end_time'] = args.endtime if args.limit: fields['limit'] = args.limit if args.offset: fields['offset'] = args.offset try: alarm = mc.alarms.history_list(**fields) except (osc_exc.ClientException, k_exc.HttpError) as he: raise osc_exc.CommandError('%s\n%s' % (he.message, he.details)) else: output_alarm_history(args, alarm)
[ "def", "do_alarm_history_list", "(", "mc", ",", "args", ")", ":", "fields", "=", "{", "}", "if", "args", ".", "dimensions", ":", "fields", "[", "'dimensions'", "]", "=", "utils", ".", "format_parameters", "(", "args", ".", "dimensions", ")", "if", "args"...
List alarms state history.
[ "List", "alarms", "state", "history", "." ]
python
train
phn/angles
angles.py
https://github.com/phn/angles/blob/5c30ed7c3a7412177daaed180bf3b2351b287589/angles.py#L1908-L1916
def from_spherical(cls, r=1.0, alpha=0.0, delta=0.0): """Construct Cartesian vector from spherical coordinates. alpha and delta must be in radians. """ x = r * math.cos(delta) * math.cos(alpha) y = r * math.cos(delta) * math.sin(alpha) z = r * math.sin(delta) return cls(x=x, y=y, z=z)
[ "def", "from_spherical", "(", "cls", ",", "r", "=", "1.0", ",", "alpha", "=", "0.0", ",", "delta", "=", "0.0", ")", ":", "x", "=", "r", "*", "math", ".", "cos", "(", "delta", ")", "*", "math", ".", "cos", "(", "alpha", ")", "y", "=", "r", "...
Construct Cartesian vector from spherical coordinates. alpha and delta must be in radians.
[ "Construct", "Cartesian", "vector", "from", "spherical", "coordinates", "." ]
python
train
MycroftAI/padatious
padatious/intent_container.py
https://github.com/MycroftAI/padatious/blob/794a2530d6079bdd06e193edd0d30b2cc793e631/padatious/intent_container.py#L148-L151
def remove_entity(self, name): """Unload an entity""" self.entities.remove(name) self.padaos.remove_entity(name)
[ "def", "remove_entity", "(", "self", ",", "name", ")", ":", "self", ".", "entities", ".", "remove", "(", "name", ")", "self", ".", "padaos", ".", "remove_entity", "(", "name", ")" ]
Unload an entity
[ "Unload", "an", "entity" ]
python
valid
wonambi-python/wonambi
wonambi/widgets/traces.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/widgets/traces.py#L670-L674
def page_next(self): """Go to the next page.""" window_start = (self.parent.value('window_start') + self.parent.value('window_length')) self.parent.overview.update_position(window_start)
[ "def", "page_next", "(", "self", ")", ":", "window_start", "=", "(", "self", ".", "parent", ".", "value", "(", "'window_start'", ")", "+", "self", ".", "parent", ".", "value", "(", "'window_length'", ")", ")", "self", ".", "parent", ".", "overview", "....
Go to the next page.
[ "Go", "to", "the", "next", "page", "." ]
python
train
openstack/networking-cisco
networking_cisco/apps/saf/server/services/firewall/native/fw_mgr.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/fw_mgr.py#L759-L770
def convert_fwdb_event_msg(self, rule, tenant_id, rule_id, policy_id): """Convert the Firewall DB to a event message format. From inputs from DB, this will create a FW rule dictionary that resembles the actual data from Openstack when a rule is created. This is usually called after restart, in order to populate local cache. """ rule.update({'tenant_id': tenant_id, 'id': rule_id, 'firewall_policy_id': policy_id}) fw_rule_data = {'firewall_rule': rule} return fw_rule_data
[ "def", "convert_fwdb_event_msg", "(", "self", ",", "rule", ",", "tenant_id", ",", "rule_id", ",", "policy_id", ")", ":", "rule", ".", "update", "(", "{", "'tenant_id'", ":", "tenant_id", ",", "'id'", ":", "rule_id", ",", "'firewall_policy_id'", ":", "policy_...
Convert the Firewall DB to a event message format. From inputs from DB, this will create a FW rule dictionary that resembles the actual data from Openstack when a rule is created. This is usually called after restart, in order to populate local cache.
[ "Convert", "the", "Firewall", "DB", "to", "a", "event", "message", "format", "." ]
python
train
dwavesystems/dimod
dimod/reference/composites/scalecomposite.py
https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/reference/composites/scalecomposite.py#L213-L222
def _scale_back_response(bqm, response, scalar, ignored_interactions, ignored_variables, ignore_offset): """Helper function to scale back the response of sample method""" if len(ignored_interactions) + len( ignored_variables) + ignore_offset == 0: response.record.energy = np.divide(response.record.energy, scalar) else: response.record.energy = bqm.energies((response.record.sample, response.variables)) return response
[ "def", "_scale_back_response", "(", "bqm", ",", "response", ",", "scalar", ",", "ignored_interactions", ",", "ignored_variables", ",", "ignore_offset", ")", ":", "if", "len", "(", "ignored_interactions", ")", "+", "len", "(", "ignored_variables", ")", "+", "igno...
Helper function to scale back the response of sample method
[ "Helper", "function", "to", "scale", "back", "the", "response", "of", "sample", "method" ]
python
train
mnooner256/pyqrcode
pyqrcode/builder.py
https://github.com/mnooner256/pyqrcode/blob/674a77b5eaf850d063f518bd90c243ee34ad6b5d/pyqrcode/builder.py#L1517-L1528
def _hex_to_rgb(color): """\ Helper function to convert a color provided in hexadecimal format as RGB triple. """ if color[0] == '#': color = color[1:] if len(color) == 3: color = color[0] * 2 + color[1] * 2 + color[2] * 2 if len(color) != 6: raise ValueError('Input #{0} is not in #RRGGBB format'.format(color)) return [int(n, 16) for n in (color[:2], color[2:4], color[4:])]
[ "def", "_hex_to_rgb", "(", "color", ")", ":", "if", "color", "[", "0", "]", "==", "'#'", ":", "color", "=", "color", "[", "1", ":", "]", "if", "len", "(", "color", ")", "==", "3", ":", "color", "=", "color", "[", "0", "]", "*", "2", "+", "c...
\ Helper function to convert a color provided in hexadecimal format as RGB triple.
[ "\\", "Helper", "function", "to", "convert", "a", "color", "provided", "in", "hexadecimal", "format", "as", "RGB", "triple", "." ]
python
train
jobovy/galpy
galpy/orbit/planarOrbit.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/orbit/planarOrbit.py#L763-L817
def _EOM_dxdv(x,t,pot): """ NAME: _EOM_dxdv PURPOSE: implements the EOM, i.e., the right-hand side of the differential equation, for integrating phase space differences, rectangular INPUT: x - current phase-space position t - current time pot - (list of) Potential instance(s) OUTPUT: dy/dt HISTORY: 2011-10-18 - Written - Bovy (NYU) """ #x is rectangular so calculate R and phi R= nu.sqrt(x[0]**2.+x[1]**2.) phi= nu.arccos(x[0]/R) sinphi= x[1]/R cosphi= x[0]/R if x[1] < 0.: phi= 2.*nu.pi-phi #calculate forces Rforce= _evaluateplanarRforces(pot,R,phi=phi,t=t) phiforce= _evaluateplanarphiforces(pot,R,phi=phi,t=t) R2deriv= _evaluateplanarPotentials(pot,R,phi=phi,t=t,dR=2) phi2deriv= _evaluateplanarPotentials(pot,R,phi=phi,t=t,dphi=2) Rphideriv= _evaluateplanarPotentials(pot,R,phi=phi,t=t,dR=1,dphi=1) #Calculate derivatives and derivatives+time derivatives dFxdx= -cosphi**2.*R2deriv\ +2.*cosphi*sinphi/R**2.*phiforce\ +sinphi**2./R*Rforce\ +2.*sinphi*cosphi/R*Rphideriv\ -sinphi**2./R**2.*phi2deriv dFxdy= -sinphi*cosphi*R2deriv\ +(sinphi**2.-cosphi**2.)/R**2.*phiforce\ -cosphi*sinphi/R*Rforce\ -(cosphi**2.-sinphi**2.)/R*Rphideriv\ +cosphi*sinphi/R**2.*phi2deriv dFydx= -cosphi*sinphi*R2deriv\ +(sinphi**2.-cosphi**2.)/R**2.*phiforce\ +(sinphi**2.-cosphi**2.)/R*Rphideriv\ -sinphi*cosphi/R*Rforce\ +sinphi*cosphi/R**2.*phi2deriv dFydy= -sinphi**2.*R2deriv\ -2.*sinphi*cosphi/R**2.*phiforce\ -2.*sinphi*cosphi/R*Rphideriv\ +cosphi**2./R*Rforce\ -cosphi**2./R**2.*phi2deriv return nu.array([x[2],x[3], cosphi*Rforce-1./R*sinphi*phiforce, sinphi*Rforce+1./R*cosphi*phiforce, x[6],x[7], dFxdx*x[4]+dFxdy*x[5], dFydx*x[4]+dFydy*x[5]])
[ "def", "_EOM_dxdv", "(", "x", ",", "t", ",", "pot", ")", ":", "#x is rectangular so calculate R and phi", "R", "=", "nu", ".", "sqrt", "(", "x", "[", "0", "]", "**", "2.", "+", "x", "[", "1", "]", "**", "2.", ")", "phi", "=", "nu", ".", "arccos",...
NAME: _EOM_dxdv PURPOSE: implements the EOM, i.e., the right-hand side of the differential equation, for integrating phase space differences, rectangular INPUT: x - current phase-space position t - current time pot - (list of) Potential instance(s) OUTPUT: dy/dt HISTORY: 2011-10-18 - Written - Bovy (NYU)
[ "NAME", ":", "_EOM_dxdv", "PURPOSE", ":", "implements", "the", "EOM", "i", ".", "e", ".", "the", "right", "-", "hand", "side", "of", "the", "differential", "equation", "for", "integrating", "phase", "space", "differences", "rectangular", "INPUT", ":", "x", ...
python
train
quantmind/pulsar
pulsar/apps/http/client.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/apps/http/client.py#L119-L125
def origin_req_host(self): """Required by Cookies handlers """ if self.history: return self.history[0].request.origin_req_host else: return scheme_host_port(self.url)[1]
[ "def", "origin_req_host", "(", "self", ")", ":", "if", "self", ".", "history", ":", "return", "self", ".", "history", "[", "0", "]", ".", "request", ".", "origin_req_host", "else", ":", "return", "scheme_host_port", "(", "self", ".", "url", ")", "[", "...
Required by Cookies handlers
[ "Required", "by", "Cookies", "handlers" ]
python
train
hazelcast/hazelcast-python-client
hazelcast/protocol/codec/client_authentication_codec.py
https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/protocol/codec/client_authentication_codec.py#L13-L28
def calculate_size(username, password, uuid, owner_uuid, is_owner_connection, client_type, serialization_version, client_hazelcast_version): """ Calculates the request payload size""" data_size = 0 data_size += calculate_size_str(username) data_size += calculate_size_str(password) data_size += BOOLEAN_SIZE_IN_BYTES if uuid is not None: data_size += calculate_size_str(uuid) data_size += BOOLEAN_SIZE_IN_BYTES if owner_uuid is not None: data_size += calculate_size_str(owner_uuid) data_size += BOOLEAN_SIZE_IN_BYTES data_size += calculate_size_str(client_type) data_size += BYTE_SIZE_IN_BYTES data_size += calculate_size_str(client_hazelcast_version) return data_size
[ "def", "calculate_size", "(", "username", ",", "password", ",", "uuid", ",", "owner_uuid", ",", "is_owner_connection", ",", "client_type", ",", "serialization_version", ",", "client_hazelcast_version", ")", ":", "data_size", "=", "0", "data_size", "+=", "calculate_s...
Calculates the request payload size
[ "Calculates", "the", "request", "payload", "size" ]
python
train
gitpython-developers/GitPython
git/refs/remote.py
https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/refs/remote.py#L19-L25
def iter_items(cls, repo, common_path=None, remote=None): """Iterate remote references, and if given, constrain them to the given remote""" common_path = common_path or cls._common_path_default if remote is not None: common_path = join_path(common_path, str(remote)) # END handle remote constraint return super(RemoteReference, cls).iter_items(repo, common_path)
[ "def", "iter_items", "(", "cls", ",", "repo", ",", "common_path", "=", "None", ",", "remote", "=", "None", ")", ":", "common_path", "=", "common_path", "or", "cls", ".", "_common_path_default", "if", "remote", "is", "not", "None", ":", "common_path", "=", ...
Iterate remote references, and if given, constrain them to the given remote
[ "Iterate", "remote", "references", "and", "if", "given", "constrain", "them", "to", "the", "given", "remote" ]
python
train
treycucco/bidon
bidon/db/core/sql_writer.py
https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/db/core/sql_writer.py#L45-L47
def to_tuple(self, iterable, surround="()", joiner=", "): """Returns the iterable as a SQL tuple.""" return "{0}{1}{2}".format(surround[0], joiner.join(iterable), surround[1])
[ "def", "to_tuple", "(", "self", ",", "iterable", ",", "surround", "=", "\"()\"", ",", "joiner", "=", "\", \"", ")", ":", "return", "\"{0}{1}{2}\"", ".", "format", "(", "surround", "[", "0", "]", ",", "joiner", ".", "join", "(", "iterable", ")", ",", ...
Returns the iterable as a SQL tuple.
[ "Returns", "the", "iterable", "as", "a", "SQL", "tuple", "." ]
python
train
briney/abutils
abutils/utils/convert.py
https://github.com/briney/abutils/blob/944755fc7d28bfc7d4f1ffad94ca0bf9d74ec54b/abutils/utils/convert.py#L37-L61
def abi_to_fasta(input, output): ''' Converts ABI or AB1 files to FASTA format. Args: input (str): Path to a file or directory containing abi/ab1 files or zip archives of abi/ab1 files output (str): Path to a directory for the output FASTA files ''' direcs = [input, ] # unzip any zip archives zip_files = list_files(input, ['zip']) if zip_files: direcs.extend(_process_zip_files(zip_files)) # convert files for d in direcs: files = list_files(d, ['ab1', 'abi']) seqs = [SeqIO.read(open(f, 'rb'), 'abi') for f in files] # seqs = list(chain.from_iterable(seqs)) fastas = ['>{}\n{}'.format(s.id, str(s.seq)) for s in seqs] ofile = os.path.basename(os.path.normpath(d)) + '.fasta' opath = os.path.join(output, ofile) open(opath, 'w').write('\n'.join(fastas))
[ "def", "abi_to_fasta", "(", "input", ",", "output", ")", ":", "direcs", "=", "[", "input", ",", "]", "# unzip any zip archives", "zip_files", "=", "list_files", "(", "input", ",", "[", "'zip'", "]", ")", "if", "zip_files", ":", "direcs", ".", "extend", "...
Converts ABI or AB1 files to FASTA format. Args: input (str): Path to a file or directory containing abi/ab1 files or zip archives of abi/ab1 files output (str): Path to a directory for the output FASTA files
[ "Converts", "ABI", "or", "AB1", "files", "to", "FASTA", "format", "." ]
python
train
andymccurdy/redis-py
redis/client.py
https://github.com/andymccurdy/redis-py/blob/cdfe2befbe00db4a3c48c9ddd6d64dea15f6f0db/redis/client.py#L1423-L1453
def set(self, name, value, ex=None, px=None, nx=False, xx=False): """ Set the value at key ``name`` to ``value`` ``ex`` sets an expire flag on key ``name`` for ``ex`` seconds. ``px`` sets an expire flag on key ``name`` for ``px`` milliseconds. ``nx`` if set to True, set the value at key ``name`` to ``value`` only if it does not exist. ``xx`` if set to True, set the value at key ``name`` to ``value`` only if it already exists. """ pieces = [name, value] if ex is not None: pieces.append('EX') if isinstance(ex, datetime.timedelta): ex = int(ex.total_seconds()) pieces.append(ex) if px is not None: pieces.append('PX') if isinstance(px, datetime.timedelta): px = int(px.total_seconds() * 1000) pieces.append(px) if nx: pieces.append('NX') if xx: pieces.append('XX') return self.execute_command('SET', *pieces)
[ "def", "set", "(", "self", ",", "name", ",", "value", ",", "ex", "=", "None", ",", "px", "=", "None", ",", "nx", "=", "False", ",", "xx", "=", "False", ")", ":", "pieces", "=", "[", "name", ",", "value", "]", "if", "ex", "is", "not", "None", ...
Set the value at key ``name`` to ``value`` ``ex`` sets an expire flag on key ``name`` for ``ex`` seconds. ``px`` sets an expire flag on key ``name`` for ``px`` milliseconds. ``nx`` if set to True, set the value at key ``name`` to ``value`` only if it does not exist. ``xx`` if set to True, set the value at key ``name`` to ``value`` only if it already exists.
[ "Set", "the", "value", "at", "key", "name", "to", "value" ]
python
train
limodou/uliweb
uliweb/utils/generic.py
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/generic.py#L519-L530
def get_field_model(name, model=None): """ get model field according to name """ if '.' in name: m, name = name.split('.') model = get_model(m) if model: return getattr(model, name, None), model else: return None, None
[ "def", "get_field_model", "(", "name", ",", "model", "=", "None", ")", ":", "if", "'.'", "in", "name", ":", "m", ",", "name", "=", "name", ".", "split", "(", "'.'", ")", "model", "=", "get_model", "(", "m", ")", "if", "model", ":", "return", "get...
get model field according to name
[ "get", "model", "field", "according", "to", "name" ]
python
train
mattjj/pylds
pylds/util.py
https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/util.py#L199-L218
def compute_symm_block_tridiag_covariances(H_diag, H_upper_diag): """ use the info smoother to solve a symmetric block tridiagonal system """ T, D, _ = H_diag.shape assert H_diag.ndim == 3 and H_diag.shape[2] == D assert H_upper_diag.shape == (T - 1, D, D) J_init = J_11 = J_22 = np.zeros((D, D)) h_init = h_1 = h_2 = np.zeros((D,)) J_21 = np.swapaxes(H_upper_diag, -1, -2) J_node = H_diag h_node = np.zeros((T, D)) _, _, sigmas, E_xt_xtp1 = \ info_E_step(J_init, h_init, 0, J_11, J_21, J_22, h_1, h_2, np.zeros((T-1)), J_node, h_node, np.zeros(T)) return sigmas, E_xt_xtp1
[ "def", "compute_symm_block_tridiag_covariances", "(", "H_diag", ",", "H_upper_diag", ")", ":", "T", ",", "D", ",", "_", "=", "H_diag", ".", "shape", "assert", "H_diag", ".", "ndim", "==", "3", "and", "H_diag", ".", "shape", "[", "2", "]", "==", "D", "a...
use the info smoother to solve a symmetric block tridiagonal system
[ "use", "the", "info", "smoother", "to", "solve", "a", "symmetric", "block", "tridiagonal", "system" ]
python
train
google/grr
grr/server/grr_response_server/databases/mysql_cronjobs.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mysql_cronjobs.py#L101-L136
def UpdateCronJob(self, cronjob_id, last_run_status=db.Database.unchanged, last_run_time=db.Database.unchanged, current_run_id=db.Database.unchanged, state=db.Database.unchanged, forced_run_requested=db.Database.unchanged, cursor=None): """Updates run information for an existing cron job.""" updates = [] args = [] if last_run_status != db.Database.unchanged: updates.append("last_run_status=%s") args.append(int(last_run_status)) if last_run_time != db.Database.unchanged: updates.append("last_run_time=FROM_UNIXTIME(%s)") args.append(mysql_utils.RDFDatetimeToTimestamp(last_run_time)) if current_run_id != db.Database.unchanged: updates.append("current_run_id=%s") args.append(db_utils.CronJobRunIDToInt(current_run_id)) if state != db.Database.unchanged: updates.append("state=%s") args.append(state.SerializeToString()) if forced_run_requested != db.Database.unchanged: updates.append("forced_run_requested=%s") args.append(forced_run_requested) if not updates: return query = "UPDATE cron_jobs SET " query += ", ".join(updates) query += " WHERE job_id=%s" res = cursor.execute(query, args + [cronjob_id]) if res != 1: raise db.UnknownCronJobError("CronJob with id %s not found." % cronjob_id)
[ "def", "UpdateCronJob", "(", "self", ",", "cronjob_id", ",", "last_run_status", "=", "db", ".", "Database", ".", "unchanged", ",", "last_run_time", "=", "db", ".", "Database", ".", "unchanged", ",", "current_run_id", "=", "db", ".", "Database", ".", "unchang...
Updates run information for an existing cron job.
[ "Updates", "run", "information", "for", "an", "existing", "cron", "job", "." ]
python
train
ewels/MultiQC
multiqc/modules/bowtie2/bowtie2.py
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/bowtie2/bowtie2.py#L54-L188
def parse_bowtie2_logs(self, f): """ Warning: This function may make you want to stab yourself. Parse logs from bowtie2. These miss several key bits of information such as input files, so we try to look for logs from other wrapper tools that may have logged this info. If not found, we default to using the filename. Note that concatenated logs only parse if we have the command printed in there. The bowtie log uses the same strings mulitple times in different contexts to mean different things, making parsing very messy. Handle with care. Example single-end output from bowtie2: Time loading reference: 00:00:08 Time loading forward index: 00:00:16 Time loading mirror index: 00:00:09 [samopen] SAM header is present: 25 sequences. Multiseed full-index search: 00:58:04 38377305 reads; of these: 38377305 (100.00%) were unpaired; of these: 2525577 (6.58%) aligned 0 times 27593593 (71.90%) aligned exactly 1 time 8258135 (21.52%) aligned >1 times 93.42% overall alignment rate Time searching: 00:58:37 Overall time: 00:58:37 Example paired-end output from bowtie2: Time loading reference: 00:01:07 Time loading forward index: 00:00:26 Time loading mirror index: 00:00:09 Multiseed full-index search: 01:32:55 15066949 reads; of these: 15066949 (100.00%) were paired; of these: 516325 (3.43%) aligned concordantly 0 times 11294617 (74.96%) aligned concordantly exactly 1 time 3256007 (21.61%) aligned concordantly >1 times ---- 516325 pairs aligned concordantly 0 times; of these: 26692 (5.17%) aligned discordantly 1 time ---- 489633 pairs aligned 0 times concordantly or discordantly; of these: 979266 mates make up the pairs; of these: 592900 (60.55%) aligned 0 times 209206 (21.36%) aligned exactly 1 time 177160 (18.09%) aligned >1 times 98.03% overall alignment rate Time searching: 01:34:37 Overall time: 01:34:37 """ # Regexes regexes = { 'unpaired': { 'unpaired_aligned_none': r"(\d+) \([\d\.]+%\) aligned 0 times", 'unpaired_aligned_one': r"(\d+) \([\d\.]+%\) aligned exactly 1 time", 'unpaired_aligned_multi': r"(\d+) \([\d\.]+%\) aligned >1 times" }, 'paired': { 'paired_aligned_none': r"(\d+) \([\d\.]+%\) aligned concordantly 0 times", 'paired_aligned_one': r"(\d+) \([\d\.]+%\) aligned concordantly exactly 1 time", 'paired_aligned_multi': r"(\d+) \([\d\.]+%\) aligned concordantly >1 times", 'paired_aligned_discord_one': r"(\d+) \([\d\.]+%\) aligned discordantly 1 time", 'paired_aligned_discord_multi': r"(\d+) \([\d\.]+%\) aligned discordantly >1 times", 'paired_aligned_mate_one': r"(\d+) \([\d\.]+%\) aligned exactly 1 time", 'paired_aligned_mate_multi': r"(\d+) \([\d\.]+%\) aligned >1 times", 'paired_aligned_mate_none': r"(\d+) \([\d\.]+%\) aligned 0 times" } } # Go through log file line by line s_name = f['s_name'] parsed_data = {} for l in f['f']: # Attempt in vain to find original bowtie2 command, logged by another program btcmd = re.search(r"bowtie2 .+ -[1U] ([^\s,]+)", l) if btcmd: s_name = self.clean_s_name(btcmd.group(1), f['root']) log.debug("Found a bowtie2 command, updating sample name to '{}'".format(s_name)) # Total reads total = re.search(r"(\d+) reads; of these:", l) if total: parsed_data['total_reads'] = int(total.group(1)) # Single end reads unpaired = re.search(r"(\d+) \([\d\.]+%\) were unpaired; of these:", l) if unpaired: parsed_data['unpaired_total'] = int(unpaired.group(1)) self.num_se += 1 # Do nested loop whilst we have this level of indentation l = f['f'].readline() while l.startswith(' '): for k, r in regexes['unpaired'].items(): match = re.search(r, l) if match: parsed_data[k] = int(match.group(1)) l = f['f'].readline() # Paired end reads paired = re.search(r"(\d+) \([\d\.]+%\) were paired; of these:", l) if paired: parsed_data['paired_total'] = int(paired.group(1)) self.num_pe += 1 # Do nested loop whilst we have this level of indentation l = f['f'].readline() while l.startswith(' '): for k, r in regexes['paired'].items(): match = re.search(r, l) if match: parsed_data[k] = int(match.group(1)) l = f['f'].readline() # Overall alignment rate overall = re.search(r"([\d\.]+)% overall alignment rate", l) if overall: parsed_data['overall_alignment_rate'] = float(overall.group(1)) # End of log section # Save half 'pairs' of mate counts m_keys = ['paired_aligned_mate_multi', 'paired_aligned_mate_none', 'paired_aligned_mate_one'] for k in m_keys: if k in parsed_data: parsed_data['{}_halved'.format(k)] = float(parsed_data[k]) / 2.0 # Save parsed data if s_name in self.bowtie2_data: log.debug("Duplicate sample name found! Overwriting: {}".format(s_name)) self.add_data_source(f, s_name) self.bowtie2_data[s_name] = parsed_data # Reset in case we find more in this log file s_name = f['s_name'] parsed_data = {}
[ "def", "parse_bowtie2_logs", "(", "self", ",", "f", ")", ":", "# Regexes", "regexes", "=", "{", "'unpaired'", ":", "{", "'unpaired_aligned_none'", ":", "r\"(\\d+) \\([\\d\\.]+%\\) aligned 0 times\"", ",", "'unpaired_aligned_one'", ":", "r\"(\\d+) \\([\\d\\.]+%\\) aligned ex...
Warning: This function may make you want to stab yourself. Parse logs from bowtie2. These miss several key bits of information such as input files, so we try to look for logs from other wrapper tools that may have logged this info. If not found, we default to using the filename. Note that concatenated logs only parse if we have the command printed in there. The bowtie log uses the same strings mulitple times in different contexts to mean different things, making parsing very messy. Handle with care. Example single-end output from bowtie2: Time loading reference: 00:00:08 Time loading forward index: 00:00:16 Time loading mirror index: 00:00:09 [samopen] SAM header is present: 25 sequences. Multiseed full-index search: 00:58:04 38377305 reads; of these: 38377305 (100.00%) were unpaired; of these: 2525577 (6.58%) aligned 0 times 27593593 (71.90%) aligned exactly 1 time 8258135 (21.52%) aligned >1 times 93.42% overall alignment rate Time searching: 00:58:37 Overall time: 00:58:37 Example paired-end output from bowtie2: Time loading reference: 00:01:07 Time loading forward index: 00:00:26 Time loading mirror index: 00:00:09 Multiseed full-index search: 01:32:55 15066949 reads; of these: 15066949 (100.00%) were paired; of these: 516325 (3.43%) aligned concordantly 0 times 11294617 (74.96%) aligned concordantly exactly 1 time 3256007 (21.61%) aligned concordantly >1 times ---- 516325 pairs aligned concordantly 0 times; of these: 26692 (5.17%) aligned discordantly 1 time ---- 489633 pairs aligned 0 times concordantly or discordantly; of these: 979266 mates make up the pairs; of these: 592900 (60.55%) aligned 0 times 209206 (21.36%) aligned exactly 1 time 177160 (18.09%) aligned >1 times 98.03% overall alignment rate Time searching: 01:34:37 Overall time: 01:34:37
[ "Warning", ":", "This", "function", "may", "make", "you", "want", "to", "stab", "yourself", "." ]
python
train
jonathf/chaospy
chaospy/distributions/baseclass.py
https://github.com/jonathf/chaospy/blob/25ecfa7bf5608dc10c0b31d142ded0e3755f5d74/chaospy/distributions/baseclass.py#L154-L187
def inv(self, q_data, max_iterations=100, tollerance=1e-5): """ Inverse Rosenblatt transformation. If possible the transformation is done analytically. If not possible, transformation is approximated using an algorithm that alternates between Newton-Raphson and binary search. Args: q_data (numpy.ndarray): Probabilities to be inverse. If any values are outside ``[0, 1]``, error will be raised. ``q_data.shape`` must be compatible with distribution shape. max_iterations (int): If approximation is used, this sets the maximum number of allowed iterations in the Newton-Raphson algorithm. tollerance (float): If approximation is used, this set the error tolerance level required to define a sample as converged. Returns: (numpy.ndarray): Inverted probability values where ``out.shape == q_data.shape``. """ q_data = numpy.asfarray(q_data) assert numpy.all((q_data >= 0) & (q_data <= 1)), "sanitize your inputs!" shape = q_data.shape q_data = q_data.reshape(len(self), -1) x_data = evaluation.evaluate_inverse(self, q_data) lower, upper = evaluation.evaluate_bound(self, x_data) x_data = numpy.clip(x_data, a_min=lower, a_max=upper) x_data = x_data.reshape(shape) return x_data
[ "def", "inv", "(", "self", ",", "q_data", ",", "max_iterations", "=", "100", ",", "tollerance", "=", "1e-5", ")", ":", "q_data", "=", "numpy", ".", "asfarray", "(", "q_data", ")", "assert", "numpy", ".", "all", "(", "(", "q_data", ">=", "0", ")", "...
Inverse Rosenblatt transformation. If possible the transformation is done analytically. If not possible, transformation is approximated using an algorithm that alternates between Newton-Raphson and binary search. Args: q_data (numpy.ndarray): Probabilities to be inverse. If any values are outside ``[0, 1]``, error will be raised. ``q_data.shape`` must be compatible with distribution shape. max_iterations (int): If approximation is used, this sets the maximum number of allowed iterations in the Newton-Raphson algorithm. tollerance (float): If approximation is used, this set the error tolerance level required to define a sample as converged. Returns: (numpy.ndarray): Inverted probability values where ``out.shape == q_data.shape``.
[ "Inverse", "Rosenblatt", "transformation", "." ]
python
train
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/index.py
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/index.py#L236-L240
def unique(self): """returns a tuple of unique key columns""" return tuple( (array_as_typed(s, k.dtype, k.shape) if k.ndim>1 else s)[self.start] for s, k in zip(self.sorted, self._keys))
[ "def", "unique", "(", "self", ")", ":", "return", "tuple", "(", "(", "array_as_typed", "(", "s", ",", "k", ".", "dtype", ",", "k", ".", "shape", ")", "if", "k", ".", "ndim", ">", "1", "else", "s", ")", "[", "self", ".", "start", "]", "for", "...
returns a tuple of unique key columns
[ "returns", "a", "tuple", "of", "unique", "key", "columns" ]
python
train
NetworkAutomation/jaide
jaide/core.py
https://github.com/NetworkAutomation/jaide/blob/8571b987a8c24c246dc09f1bcc11cb0f045ec33f/jaide/core.py#L737-L772
def interface_errors(self): """ Parse 'show interfaces extensive' and return interfaces with errors. Purpose: This function is called for the -e flag. It will let the user | know if there are any interfaces with errors, and what those | interfaces are. @returns: The output that should be shown to the user. @rtype: str """ output = [] # used to store the list of interfaces with errors. # get a string of each physical and logical interface element dev_response = self._session.command('sh interfaces extensive') ints = dev_response.xpath('//physical-interface') ints += dev_response.xpath('//logical-interface') for i in ints: # Grab the interface name for user output. int_name = i.xpath('name')[0].text.strip() # Only check certain interface types. if (('ge' or 'fe' or 'ae' or 'xe' or 'so' or 'et' or 'vlan' or 'lo0' or 'irb') in int_name): try: status = (i.xpath('admin-status')[0].text.strip() + '/' + i.xpath('oper-status')[0].text.strip()) except IndexError: pass else: for error in self._error_parse(i, "input"): output.append("%s (%s)%s" % (int_name, status, error)) for error in self._error_parse(i, "output"): output.append("%s (%s)%s" % (int_name, status, error)) if output == []: output.append('No interface errors were detected on this device.') return '\n'.join(output) + '\n'
[ "def", "interface_errors", "(", "self", ")", ":", "output", "=", "[", "]", "# used to store the list of interfaces with errors.", "# get a string of each physical and logical interface element", "dev_response", "=", "self", ".", "_session", ".", "command", "(", "'sh interface...
Parse 'show interfaces extensive' and return interfaces with errors. Purpose: This function is called for the -e flag. It will let the user | know if there are any interfaces with errors, and what those | interfaces are. @returns: The output that should be shown to the user. @rtype: str
[ "Parse", "show", "interfaces", "extensive", "and", "return", "interfaces", "with", "errors", "." ]
python
train
zero-os/0-core
client/py-client/zeroos/core0/client/client.py
https://github.com/zero-os/0-core/blob/69f6ce845ab8b8ad805a79a415227e7ac566c218/client/py-client/zeroos/core0/client/client.py#L771-L781
def json(self, command, arguments, tags=None, id=None): """ Same as self.sync except it assumes the returned result is json, and loads the payload of the return object if the returned (data) is not of level (20) an error is raised. :Return: Data """ result = self.sync(command, arguments, tags=tags, id=id) if result.level != 20: raise RuntimeError('invalid result level, expecting json(20) got (%d)' % result.level) return json.loads(result.data)
[ "def", "json", "(", "self", ",", "command", ",", "arguments", ",", "tags", "=", "None", ",", "id", "=", "None", ")", ":", "result", "=", "self", ".", "sync", "(", "command", ",", "arguments", ",", "tags", "=", "tags", ",", "id", "=", "id", ")", ...
Same as self.sync except it assumes the returned result is json, and loads the payload of the return object if the returned (data) is not of level (20) an error is raised. :Return: Data
[ "Same", "as", "self", ".", "sync", "except", "it", "assumes", "the", "returned", "result", "is", "json", "and", "loads", "the", "payload", "of", "the", "return", "object", "if", "the", "returned", "(", "data", ")", "is", "not", "of", "level", "(", "20"...
python
train
note35/sinon
sinon/lib/stub.py
https://github.com/note35/sinon/blob/f1d551b679b393d64d926a8a279320904c38d0f5/sinon/lib/stub.py#L137-L161
def __get_return_value_no_withargs(self, *args, **kwargs): """ Pre-conditions: (1) The user has created a stub and specified the stub behaviour (2) The user has called the stub function with the specified "args" and "kwargs" (3) No 'withArgs' conditions were applicable in this case Args: args: tuple, the arguments inputed by the user kwargs: dictionary, the keyword arguments inputed by the user Returns: any type, the appropriate return value, based on the stub's behaviour setup and the user input """ c = self._conditions call_count = self._wrapper.callCount # if there might be applicable onCall conditions if call_count in c["oncall"]: index_list = [i for i, x in enumerate(c["oncall"]) if x and not c["args"][i] and not c["kwargs"][i]] for i in reversed(index_list): # if the onCall condition applies if call_count == c["oncall"][i]: return c["action"][i](*args, **kwargs) # else all conditions did not match return c["default"](*args, **kwargs)
[ "def", "__get_return_value_no_withargs", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "c", "=", "self", ".", "_conditions", "call_count", "=", "self", ".", "_wrapper", ".", "callCount", "# if there might be applicable onCall conditions", "if",...
Pre-conditions: (1) The user has created a stub and specified the stub behaviour (2) The user has called the stub function with the specified "args" and "kwargs" (3) No 'withArgs' conditions were applicable in this case Args: args: tuple, the arguments inputed by the user kwargs: dictionary, the keyword arguments inputed by the user Returns: any type, the appropriate return value, based on the stub's behaviour setup and the user input
[ "Pre", "-", "conditions", ":", "(", "1", ")", "The", "user", "has", "created", "a", "stub", "and", "specified", "the", "stub", "behaviour", "(", "2", ")", "The", "user", "has", "called", "the", "stub", "function", "with", "the", "specified", "args", "a...
python
train
minhhoit/yacms
yacms/core/admin.py
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/core/admin.py#L322-L338
def change_view(self, request, object_id, **kwargs): """ For the concrete model, check ``get_content_model()`` for a subclass and redirect to its admin change view. """ instance = get_object_or_404(self.concrete_model, pk=object_id) content_model = instance.get_content_model() self.check_permission(request, content_model, "change") if content_model.__class__ != self.model: change_url = admin_url(content_model.__class__, "change", content_model.id) return HttpResponseRedirect(change_url) return super(ContentTypedAdmin, self).change_view( request, object_id, **kwargs)
[ "def", "change_view", "(", "self", ",", "request", ",", "object_id", ",", "*", "*", "kwargs", ")", ":", "instance", "=", "get_object_or_404", "(", "self", ".", "concrete_model", ",", "pk", "=", "object_id", ")", "content_model", "=", "instance", ".", "get_...
For the concrete model, check ``get_content_model()`` for a subclass and redirect to its admin change view.
[ "For", "the", "concrete", "model", "check", "get_content_model", "()", "for", "a", "subclass", "and", "redirect", "to", "its", "admin", "change", "view", "." ]
python
train
xmunoz/sodapy
sodapy/__init__.py
https://github.com/xmunoz/sodapy/blob/dad2ca9560cde0acb03bdb4423260e891ca40d7b/sodapy/__init__.py#L365-L374
def upsert(self, dataset_identifier, payload, content_type="json"): ''' Insert, update or delete data to/from an existing dataset. Currently supports json and csv file objects. See here for the upsert documentation: http://dev.socrata.com/publishers/upsert.html ''' resource = _format_new_api_request(dataid=dataset_identifier, content_type=content_type) return self._perform_update("post", resource, payload)
[ "def", "upsert", "(", "self", ",", "dataset_identifier", ",", "payload", ",", "content_type", "=", "\"json\"", ")", ":", "resource", "=", "_format_new_api_request", "(", "dataid", "=", "dataset_identifier", ",", "content_type", "=", "content_type", ")", "return", ...
Insert, update or delete data to/from an existing dataset. Currently supports json and csv file objects. See here for the upsert documentation: http://dev.socrata.com/publishers/upsert.html
[ "Insert", "update", "or", "delete", "data", "to", "/", "from", "an", "existing", "dataset", ".", "Currently", "supports", "json", "and", "csv", "file", "objects", ".", "See", "here", "for", "the", "upsert", "documentation", ":", "http", ":", "//", "dev", ...
python
train
mezz64/pyHik
pyhik/hikvision.py
https://github.com/mezz64/pyHik/blob/1e7afca926e2b045257a43cbf8b1236a435493c2/pyhik/hikvision.py#L612-L620
def update_attributes(self, event, channel, attr): """Update attribute list for current event/channel.""" try: for i, sensor in enumerate(self.event_states[event]): if sensor[1] == int(channel): self.event_states[event][i] = attr except KeyError: _LOGGING.debug('Error updating attributes for: (%s, %s)', event, channel)
[ "def", "update_attributes", "(", "self", ",", "event", ",", "channel", ",", "attr", ")", ":", "try", ":", "for", "i", ",", "sensor", "in", "enumerate", "(", "self", ".", "event_states", "[", "event", "]", ")", ":", "if", "sensor", "[", "1", "]", "=...
Update attribute list for current event/channel.
[ "Update", "attribute", "list", "for", "current", "event", "/", "channel", "." ]
python
train
python-wink/python-wink
src/pywink/devices/piggy_bank.py
https://github.com/python-wink/python-wink/blob/cf8bdce8c6518f30b91b23aa7aa32e89c2ce48da/src/pywink/devices/piggy_bank.py#L22-L34
def set_state(self, color_hex): """ :param color_hex: a hex string indicating the color of the porkfolio nose :return: nothing From the api... "the color of the nose is not in the desired_state but on the object itself." """ root_name = self.json_state.get('piggy_bank_id', self.name()) response = self.api_interface.set_device_state(self, { "nose_color": color_hex }, root_name) self._update_state_from_response(response)
[ "def", "set_state", "(", "self", ",", "color_hex", ")", ":", "root_name", "=", "self", ".", "json_state", ".", "get", "(", "'piggy_bank_id'", ",", "self", ".", "name", "(", ")", ")", "response", "=", "self", ".", "api_interface", ".", "set_device_state", ...
:param color_hex: a hex string indicating the color of the porkfolio nose :return: nothing From the api... "the color of the nose is not in the desired_state but on the object itself."
[ ":", "param", "color_hex", ":", "a", "hex", "string", "indicating", "the", "color", "of", "the", "porkfolio", "nose", ":", "return", ":", "nothing", "From", "the", "api", "...", "the", "color", "of", "the", "nose", "is", "not", "in", "the", "desired_stat...
python
train
pantsbuild/pants
src/python/pants/backend/jvm/tasks/jvm_dependency_check.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/backend/jvm/tasks/jvm_dependency_check.py#L59-L62
def _skip(options): """Return true if the task should be entirely skipped, and thus have no product requirements.""" values = [options.missing_direct_deps, options.unnecessary_deps] return all(v == 'off' for v in values)
[ "def", "_skip", "(", "options", ")", ":", "values", "=", "[", "options", ".", "missing_direct_deps", ",", "options", ".", "unnecessary_deps", "]", "return", "all", "(", "v", "==", "'off'", "for", "v", "in", "values", ")" ]
Return true if the task should be entirely skipped, and thus have no product requirements.
[ "Return", "true", "if", "the", "task", "should", "be", "entirely", "skipped", "and", "thus", "have", "no", "product", "requirements", "." ]
python
train
restran/mountains
mountains/json/__init__.py
https://github.com/restran/mountains/blob/a97fee568b112f4e10d878f815d0db3dd0a98d74/mountains/json/__init__.py#L51-L65
def dumps(dict_data, ensure_ascii=True, indent=None, sort_keys=False, encoding='utf-8', **kwargs): """ 返回json数据 :param encoding: :param ensure_ascii: :param sort_keys: :param indent: :param dict_data: :return: """ return json.dumps(dict_data, default=json_default, ensure_ascii=ensure_ascii, indent=indent, sort_keys=sort_keys, encoding=encoding, **kwargs)
[ "def", "dumps", "(", "dict_data", ",", "ensure_ascii", "=", "True", ",", "indent", "=", "None", ",", "sort_keys", "=", "False", ",", "encoding", "=", "'utf-8'", ",", "*", "*", "kwargs", ")", ":", "return", "json", ".", "dumps", "(", "dict_data", ",", ...
返回json数据 :param encoding: :param ensure_ascii: :param sort_keys: :param indent: :param dict_data: :return:
[ "返回json数据", ":", "param", "encoding", ":", ":", "param", "ensure_ascii", ":", ":", "param", "sort_keys", ":", ":", "param", "indent", ":", ":", "param", "dict_data", ":", ":", "return", ":" ]
python
train
flowersteam/explauto
explauto/sensorimotor_model/inverse/cma.py
https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/sensorimotor_model/inverse/cma.py#L8237-L8242
def somenan(self, x, fun, p=0.1): """returns sometimes np.NaN, otherwise fun(x)""" if np.random.rand(1) < p: return np.NaN else: return fun(x)
[ "def", "somenan", "(", "self", ",", "x", ",", "fun", ",", "p", "=", "0.1", ")", ":", "if", "np", ".", "random", ".", "rand", "(", "1", ")", "<", "p", ":", "return", "np", ".", "NaN", "else", ":", "return", "fun", "(", "x", ")" ]
returns sometimes np.NaN, otherwise fun(x)
[ "returns", "sometimes", "np", ".", "NaN", "otherwise", "fun", "(", "x", ")" ]
python
train
myint/autoflake
autoflake.py
https://github.com/myint/autoflake/blob/68fea68646922b920d55975f9f2adaeafd84df4f/autoflake.py#L754-L766
def is_exclude_file(filename, exclude): """Return True if file matches exclude pattern.""" base_name = os.path.basename(filename) if base_name.startswith('.'): return True for pattern in exclude: if fnmatch.fnmatch(base_name, pattern): return True if fnmatch.fnmatch(filename, pattern): return True return False
[ "def", "is_exclude_file", "(", "filename", ",", "exclude", ")", ":", "base_name", "=", "os", ".", "path", ".", "basename", "(", "filename", ")", "if", "base_name", ".", "startswith", "(", "'.'", ")", ":", "return", "True", "for", "pattern", "in", "exclud...
Return True if file matches exclude pattern.
[ "Return", "True", "if", "file", "matches", "exclude", "pattern", "." ]
python
test
SeattleTestbed/seash
seash_modules.py
https://github.com/SeattleTestbed/seash/blob/40f9d2285662ff8b61e0468b4196acee089b273b/seash_modules.py#L190-L213
def are_cmddicts_same(dict1, dict2): """ Checks to see if two cmddicts are the same. Two cmddicts are defined to be the same if they have the same callbacks/ helptexts/children/summaries for all nodes. """ # If the set of all keys are not the same, they must not be the same. if set(dict1.keys()) != set(dict2.keys()): return False # Everything in dict1 should be in dict2 for key in dict1: # Check everything except children; Check for children recursively for propertytype in dict1[key]: if (not propertytype in dict2[key] or dict1[key][propertytype] != dict2[key][propertytype]): return False # Check children if not are_cmddicts_same(dict1[key]['children'], dict2[key]['children']): return False return True
[ "def", "are_cmddicts_same", "(", "dict1", ",", "dict2", ")", ":", "# If the set of all keys are not the same, they must not be the same.", "if", "set", "(", "dict1", ".", "keys", "(", ")", ")", "!=", "set", "(", "dict2", ".", "keys", "(", ")", ")", ":", "retur...
Checks to see if two cmddicts are the same. Two cmddicts are defined to be the same if they have the same callbacks/ helptexts/children/summaries for all nodes.
[ "Checks", "to", "see", "if", "two", "cmddicts", "are", "the", "same", ".", "Two", "cmddicts", "are", "defined", "to", "be", "the", "same", "if", "they", "have", "the", "same", "callbacks", "/", "helptexts", "/", "children", "/", "summaries", "for", "all"...
python
train
hotdoc/hotdoc
hotdoc/extensions/c/clang/cindex.py
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/extensions/c/clang/cindex.py#L1488-L1496
def location(self): """ Return the source location (the starting character) of the entity pointed at by the cursor. """ if not hasattr(self, '_loc'): self._loc = conf.lib.clang_getCursorLocation(self) return self._loc
[ "def", "location", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_loc'", ")", ":", "self", ".", "_loc", "=", "conf", ".", "lib", ".", "clang_getCursorLocation", "(", "self", ")", "return", "self", ".", "_loc" ]
Return the source location (the starting character) of the entity pointed at by the cursor.
[ "Return", "the", "source", "location", "(", "the", "starting", "character", ")", "of", "the", "entity", "pointed", "at", "by", "the", "cursor", "." ]
python
train
mthornhill/django-postal
src/postal/resource.py
https://github.com/mthornhill/django-postal/blob/21d65e09b45f0515cde6166345f46c3f506dd08f/src/postal/resource.py#L626-L680
def error_handler(self, e, request, meth, em_format): """ Override this method to add handling of errors customized for your needs """ if isinstance(e, FormValidationError): return self.form_validation_response(e) elif isinstance(e, TypeError): result = rc.BAD_REQUEST hm = HandlerMethod(meth) sig = hm.signature msg = 'Method signature does not match.\n\n' if sig: msg += 'Signature should be: %s' % sig else: msg += 'Resource does not expect any parameters.' if self.display_errors: msg += '\n\nException was: %s' % str(e) result.content = format_error(msg) return result elif isinstance(e, Http404): return rc.NOT_FOUND elif isinstance(e, HttpStatusCode): return e.response else: """ On errors (like code errors), we'd like to be able to give crash reports to both admins and also the calling user. There's two setting parameters for this: Parameters:: - `PISTON_EMAIL_ERRORS`: Will send a Django formatted error email to people in `settings.ADMINS`. - `PISTON_DISPLAY_ERRORS`: Will return a simple traceback to the caller, so he can tell you what error they got. If `PISTON_DISPLAY_ERRORS` is not enabled, the caller will receive a basic "500 Internal Server Error" message. """ exc_type, exc_value, tb = sys.exc_info() rep = ExceptionReporter(request, exc_type, exc_value, tb.tb_next) if self.email_errors: self.email_exception(rep) if self.display_errors: return HttpResponseServerError( format_error('\n'.join(rep.format_exception()))) else: raise
[ "def", "error_handler", "(", "self", ",", "e", ",", "request", ",", "meth", ",", "em_format", ")", ":", "if", "isinstance", "(", "e", ",", "FormValidationError", ")", ":", "return", "self", ".", "form_validation_response", "(", "e", ")", "elif", "isinstanc...
Override this method to add handling of errors customized for your needs
[ "Override", "this", "method", "to", "add", "handling", "of", "errors", "customized", "for", "your", "needs" ]
python
train
sckott/pygbif
pygbif/occurrences/download.py
https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/occurrences/download.py#L245-L267
def add_iterative_predicate(self, key, values_list): """add an iterative predicate with a key and set of values which it can be equal to in and or function. The individual predicates are specified with the type ``equals`` and combined with a type ``or``. The main reason for this addition is the inability of using ``in`` as predicate type wfor multiple taxon_key values (cfr. http://dev.gbif.org/issues/browse/POR-2753) :param key: API key to use for the query. :param values_list: Filename or list containing the taxon keys to be s searched. """ values = self._extract_values(values_list) predicate = {'type': 'equals', 'key': key, 'value': None} predicates = [] while values: predicate['value'] = values.pop() predicates.append(predicate.copy()) self.predicates.append({'type': 'or', 'predicates': predicates})
[ "def", "add_iterative_predicate", "(", "self", ",", "key", ",", "values_list", ")", ":", "values", "=", "self", ".", "_extract_values", "(", "values_list", ")", "predicate", "=", "{", "'type'", ":", "'equals'", ",", "'key'", ":", "key", ",", "'value'", ":"...
add an iterative predicate with a key and set of values which it can be equal to in and or function. The individual predicates are specified with the type ``equals`` and combined with a type ``or``. The main reason for this addition is the inability of using ``in`` as predicate type wfor multiple taxon_key values (cfr. http://dev.gbif.org/issues/browse/POR-2753) :param key: API key to use for the query. :param values_list: Filename or list containing the taxon keys to be s searched.
[ "add", "an", "iterative", "predicate", "with", "a", "key", "and", "set", "of", "values", "which", "it", "can", "be", "equal", "to", "in", "and", "or", "function", ".", "The", "individual", "predicates", "are", "specified", "with", "the", "type", "equals", ...
python
train
smarie/python-valid8
valid8/entry_points_annotations.py
https://github.com/smarie/python-valid8/blob/5e15d1de11602933c5114eb9f73277ad91d97800/valid8/entry_points_annotations.py#L945-L963
def _assert_input_is_valid(input_value, # type: Any validators, # type: List[InputValidator] validated_func, # type: Callable input_name # type: str ): """ Called by the `validating_wrapper` in the first step (a) `apply_on_each_func_args` for each function input before executing the function. It simply delegates to the validator. The signature of this function is hardcoded to correspond to `apply_on_each_func_args`'s behaviour and should therefore not be changed. :param input_value: the value to validate :param validator: the Validator object that will be applied on input_value_to_validate :param validated_func: the function for which this validation is performed. This is not used since the Validator knows it already, but we should not change the signature here. :param input_name: the name of the function input that is being validated :return: Nothing """ for validator in validators: validator.assert_valid(input_name, input_value)
[ "def", "_assert_input_is_valid", "(", "input_value", ",", "# type: Any", "validators", ",", "# type: List[InputValidator]", "validated_func", ",", "# type: Callable", "input_name", "# type: str", ")", ":", "for", "validator", "in", "validators", ":", "validator", ".", "...
Called by the `validating_wrapper` in the first step (a) `apply_on_each_func_args` for each function input before executing the function. It simply delegates to the validator. The signature of this function is hardcoded to correspond to `apply_on_each_func_args`'s behaviour and should therefore not be changed. :param input_value: the value to validate :param validator: the Validator object that will be applied on input_value_to_validate :param validated_func: the function for which this validation is performed. This is not used since the Validator knows it already, but we should not change the signature here. :param input_name: the name of the function input that is being validated :return: Nothing
[ "Called", "by", "the", "validating_wrapper", "in", "the", "first", "step", "(", "a", ")", "apply_on_each_func_args", "for", "each", "function", "input", "before", "executing", "the", "function", ".", "It", "simply", "delegates", "to", "the", "validator", ".", ...
python
train
CalebBell/fluids
fluids/friction.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/friction.py#L1889-L2022
def friction_factor(Re, eD=0, Method='Clamond', Darcy=True, AvailableMethods=False): r'''Calculates friction factor. Uses a specified method, or automatically picks one from the dictionary of available methods. 29 approximations are available as well as the direct solution, described in the table below. The default is to use the exact solution. Can also be accessed under the name `fd`. For Re < 2040, [1]_ the laminar solution is always returned, regardless of selected method. Examples -------- >>> friction_factor(Re=1E5, eD=1E-4) 0.01851386607747165 Parameters ---------- Re : float Reynolds number, [-] eD : float, optional Relative roughness of the wall, [-] Returns ------- f : float Friction factor, [-] methods : list, only returned if AvailableMethods == True List of methods which claim to be valid for the range of `Re` and `eD` given Other Parameters ---------------- Method : string, optional A string of the function name to use Darcy : bool, optional If False, will return fanning friction factor, 1/4 of the Darcy value AvailableMethods : bool, optional If True, function will consider which methods claim to be valid for the range of `Re` and `eD` given See Also -------- Colebrook Clamond Notes ----- +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Nice name |Re min|Re max|Re Default|:math:`\epsilon/D` Min|:math:`\epsilon/D` Max|:math:`\epsilon/D` Default| +===================+======+======+==========+======================+======================+==========================+ |Clamond |0 |None |None |0 |None |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Rao Kumar 2007 |None |None |None |None |None |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Eck 1973 |None |None |None |None |None |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Jain 1976 |5000 |1.0E+7|None |4.0E-5 |0.05 |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Avci Karagoz 2009 |None |None |None |None |None |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Swamee Jain 1976 |5000 |1.0E+8|None |1.0E-6 |0.05 |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Churchill 1977 |None |None |None |None |None |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Brkic 2011 1 |None |None |None |None |None |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Chen 1979 |4000 |4.0E+8|None |1.0E-7 |0.05 |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Round 1980 |4000 |4.0E+8|None |0 |0.05 |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Papaevangelo 2010 |10000 |1.0E+7|None |1.0E-5 |0.001 |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Fang 2011 |3000 |1.0E+8|None |0 |0.05 |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Shacham 1980 |4000 |4.0E+8|None |None |None |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Barr 1981 |None |None |None |None |None |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Churchill 1973 |None |None |None |None |None |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Moody |4000 |1.0E+8|None |0 |1 |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Zigrang Sylvester 1|4000 |1.0E+8|None |4.0E-5 |0.05 |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Zigrang Sylvester 2|4000 |1.0E+8|None |4.0E-5 |0.05 |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Buzzelli 2008 |None |None |None |None |None |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Haaland |4000 |1.0E+8|None |1.0E-6 |0.05 |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Serghides 1 |None |None |None |None |None |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Serghides 2 |None |None |None |None |None |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Tsal 1989 |4000 |1.0E+8|None |0 |0.05 |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Alshul 1952 |None |None |None |None |None |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Wood 1966 |4000 |5.0E+7|None |1.0E-5 |0.04 |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Manadilli 1997 |5245 |1.0E+8|None |0 |0.05 |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Brkic 2011 2 |None |None |None |None |None |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Romeo 2002 |3000 |1.5E+8|None |0 |0.05 |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Sonnad Goudar 2006 |4000 |1.0E+8|None |1.0E-6 |0.05 |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ References ---------- .. [1] Avila, Kerstin, David Moxey, Alberto de Lozar, Marc Avila, Dwight Barkley, and Björn Hof. "The Onset of Turbulence in Pipe Flow." Science 333, no. 6039 (July 8, 2011): 192-96. doi:10.1126/science.1203223. ''' def list_methods(): methods = [i for i in fmethods if (not fmethods[i]['Arguments']['eD']['Min'] or fmethods[i]['Arguments']['eD']['Min'] <= eD) and (not fmethods[i]['Arguments']['eD']['Max'] or eD <= fmethods[i]['Arguments']['eD']['Max']) and (not fmethods[i]['Arguments']['Re']['Min'] or Re > fmethods[i]['Arguments']['Re']['Min']) and (not fmethods[i]['Arguments']['Re']['Max'] or Re <= fmethods[i]['Arguments']['Re']['Max'])] return methods if AvailableMethods: return list_methods() elif not Method: Method = 'Clamond' if Re < LAMINAR_TRANSITION_PIPE: f = friction_laminar(Re) else: f = globals()[Method](Re=Re, eD=eD) if not Darcy: f *= 0.25 return f
[ "def", "friction_factor", "(", "Re", ",", "eD", "=", "0", ",", "Method", "=", "'Clamond'", ",", "Darcy", "=", "True", ",", "AvailableMethods", "=", "False", ")", ":", "def", "list_methods", "(", ")", ":", "methods", "=", "[", "i", "for", "i", "in", ...
r'''Calculates friction factor. Uses a specified method, or automatically picks one from the dictionary of available methods. 29 approximations are available as well as the direct solution, described in the table below. The default is to use the exact solution. Can also be accessed under the name `fd`. For Re < 2040, [1]_ the laminar solution is always returned, regardless of selected method. Examples -------- >>> friction_factor(Re=1E5, eD=1E-4) 0.01851386607747165 Parameters ---------- Re : float Reynolds number, [-] eD : float, optional Relative roughness of the wall, [-] Returns ------- f : float Friction factor, [-] methods : list, only returned if AvailableMethods == True List of methods which claim to be valid for the range of `Re` and `eD` given Other Parameters ---------------- Method : string, optional A string of the function name to use Darcy : bool, optional If False, will return fanning friction factor, 1/4 of the Darcy value AvailableMethods : bool, optional If True, function will consider which methods claim to be valid for the range of `Re` and `eD` given See Also -------- Colebrook Clamond Notes ----- +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Nice name |Re min|Re max|Re Default|:math:`\epsilon/D` Min|:math:`\epsilon/D` Max|:math:`\epsilon/D` Default| +===================+======+======+==========+======================+======================+==========================+ |Clamond |0 |None |None |0 |None |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Rao Kumar 2007 |None |None |None |None |None |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Eck 1973 |None |None |None |None |None |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Jain 1976 |5000 |1.0E+7|None |4.0E-5 |0.05 |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Avci Karagoz 2009 |None |None |None |None |None |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Swamee Jain 1976 |5000 |1.0E+8|None |1.0E-6 |0.05 |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Churchill 1977 |None |None |None |None |None |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Brkic 2011 1 |None |None |None |None |None |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Chen 1979 |4000 |4.0E+8|None |1.0E-7 |0.05 |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Round 1980 |4000 |4.0E+8|None |0 |0.05 |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Papaevangelo 2010 |10000 |1.0E+7|None |1.0E-5 |0.001 |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Fang 2011 |3000 |1.0E+8|None |0 |0.05 |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Shacham 1980 |4000 |4.0E+8|None |None |None |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Barr 1981 |None |None |None |None |None |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Churchill 1973 |None |None |None |None |None |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Moody |4000 |1.0E+8|None |0 |1 |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Zigrang Sylvester 1|4000 |1.0E+8|None |4.0E-5 |0.05 |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Zigrang Sylvester 2|4000 |1.0E+8|None |4.0E-5 |0.05 |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Buzzelli 2008 |None |None |None |None |None |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Haaland |4000 |1.0E+8|None |1.0E-6 |0.05 |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Serghides 1 |None |None |None |None |None |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Serghides 2 |None |None |None |None |None |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Tsal 1989 |4000 |1.0E+8|None |0 |0.05 |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Alshul 1952 |None |None |None |None |None |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Wood 1966 |4000 |5.0E+7|None |1.0E-5 |0.04 |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Manadilli 1997 |5245 |1.0E+8|None |0 |0.05 |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Brkic 2011 2 |None |None |None |None |None |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Romeo 2002 |3000 |1.5E+8|None |0 |0.05 |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ |Sonnad Goudar 2006 |4000 |1.0E+8|None |1.0E-6 |0.05 |None | +-------------------+------+------+----------+----------------------+----------------------+--------------------------+ References ---------- .. [1] Avila, Kerstin, David Moxey, Alberto de Lozar, Marc Avila, Dwight Barkley, and Björn Hof. "The Onset of Turbulence in Pipe Flow." Science 333, no. 6039 (July 8, 2011): 192-96. doi:10.1126/science.1203223.
[ "r", "Calculates", "friction", "factor", ".", "Uses", "a", "specified", "method", "or", "automatically", "picks", "one", "from", "the", "dictionary", "of", "available", "methods", ".", "29", "approximations", "are", "available", "as", "well", "as", "the", "dir...
python
train
anrosent/cli
cli.py
https://github.com/anrosent/cli/blob/8f89df4564c5124c95aaa288441f9d574dfa89a3/cli.py#L99-L116
def print_help(self): """Prints usage of all registered commands, collapsing aliases into one record """ seen_aliases = set() print('-'*80) for cmd in sorted(self.cmds): if cmd not in self.builtin_cmds: if cmd not in seen_aliases: if cmd in self.aliases: seen_aliases.update(self.aliases[cmd]) disp = '/'.join(self.aliases[cmd]) else: disp = cmd _, parser = self.cmds[cmd] usage = parser.format_usage() print('%s: %s' % (disp, ' '.join(usage.split()[2:]))) print('External CLIs: %s' % ', '.join(sorted(self.clis)))
[ "def", "print_help", "(", "self", ")", ":", "seen_aliases", "=", "set", "(", ")", "print", "(", "'-'", "*", "80", ")", "for", "cmd", "in", "sorted", "(", "self", ".", "cmds", ")", ":", "if", "cmd", "not", "in", "self", ".", "builtin_cmds", ":", "...
Prints usage of all registered commands, collapsing aliases into one record
[ "Prints", "usage", "of", "all", "registered", "commands", "collapsing", "aliases", "into", "one", "record" ]
python
train
christophertbrown/bioscripts
ctbBio/fix_fasta.py
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/fix_fasta.py#L18-L25
def fix_fasta(fasta): """ remove pesky characters from fasta file header """ for seq in parse_fasta(fasta): seq[0] = remove_char(seq[0]) if len(seq[1]) > 0: yield seq
[ "def", "fix_fasta", "(", "fasta", ")", ":", "for", "seq", "in", "parse_fasta", "(", "fasta", ")", ":", "seq", "[", "0", "]", "=", "remove_char", "(", "seq", "[", "0", "]", ")", "if", "len", "(", "seq", "[", "1", "]", ")", ">", "0", ":", "yiel...
remove pesky characters from fasta file header
[ "remove", "pesky", "characters", "from", "fasta", "file", "header" ]
python
train
sdispater/orator
orator/orm/relations/belongs_to_many.py
https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/orm/relations/belongs_to_many.py#L720-L732
def _set_timestamps_on_attach(self, record, exists=False): """ Set the creation an update timestamps on an attach record. """ fresh = self._parent.fresh_timestamp() if not exists and self._has_pivot_column(self.created_at()): record[self.created_at()] = fresh if self._has_pivot_column(self.updated_at()): record[self.updated_at()] = fresh return record
[ "def", "_set_timestamps_on_attach", "(", "self", ",", "record", ",", "exists", "=", "False", ")", ":", "fresh", "=", "self", ".", "_parent", ".", "fresh_timestamp", "(", ")", "if", "not", "exists", "and", "self", ".", "_has_pivot_column", "(", "self", ".",...
Set the creation an update timestamps on an attach record.
[ "Set", "the", "creation", "an", "update", "timestamps", "on", "an", "attach", "record", "." ]
python
train
insanum/gcalcli
gcalcli/utils.py
https://github.com/insanum/gcalcli/blob/428378a88f89d154c8d4046deb9bdb5eb4e81019/gcalcli/utils.py#L97-L112
def get_time_from_str(when): """Convert a string to a time: first uses the dateutil parser, falls back on fuzzy matching with parsedatetime """ zero_oclock_today = datetime.now(tzlocal()).replace( hour=0, minute=0, second=0, microsecond=0) try: event_time = dateutil_parse(when, default=zero_oclock_today) except ValueError: struct, result = fuzzy_date_parse(when) if not result: raise ValueError('Date and time is invalid: %s' % (when)) event_time = datetime.fromtimestamp(time.mktime(struct), tzlocal()) return event_time
[ "def", "get_time_from_str", "(", "when", ")", ":", "zero_oclock_today", "=", "datetime", ".", "now", "(", "tzlocal", "(", ")", ")", ".", "replace", "(", "hour", "=", "0", ",", "minute", "=", "0", ",", "second", "=", "0", ",", "microsecond", "=", "0",...
Convert a string to a time: first uses the dateutil parser, falls back on fuzzy matching with parsedatetime
[ "Convert", "a", "string", "to", "a", "time", ":", "first", "uses", "the", "dateutil", "parser", "falls", "back", "on", "fuzzy", "matching", "with", "parsedatetime" ]
python
train
gitpython-developers/smmap
smmap/util.py
https://github.com/gitpython-developers/smmap/blob/48e9e30b0ef3c24ac7ed88e6e3bfa37dc945bf4c/smmap/util.py#L229-L238
def file_size(self): """:return: size of file we manager""" if self._file_size is None: if isinstance(self._path_or_fd, string_types()): self._file_size = os.stat(self._path_or_fd).st_size else: self._file_size = os.fstat(self._path_or_fd).st_size # END handle path type # END update file size return self._file_size
[ "def", "file_size", "(", "self", ")", ":", "if", "self", ".", "_file_size", "is", "None", ":", "if", "isinstance", "(", "self", ".", "_path_or_fd", ",", "string_types", "(", ")", ")", ":", "self", ".", "_file_size", "=", "os", ".", "stat", "(", "self...
:return: size of file we manager
[ ":", "return", ":", "size", "of", "file", "we", "manager" ]
python
train
NLeSC/scriptcwl
scriptcwl/step.py
https://github.com/NLeSC/scriptcwl/blob/33bb847a875379da3a5702c7a98dfa585306b960/scriptcwl/step.py#L113-L128
def output_reference(self, name): """Return a reference to the given output for use in an input of a next Step. For a Step named `echo` that has an output called `echoed`, the reference `echo/echoed` is returned. Args: name (str): the name of the Step output Raises: ValueError: The name provided is not a valid output name for this Step. """ if name not in self.output_names: raise ValueError('Invalid output "{}"'.format(name)) return Reference(step_name=self.name_in_workflow, output_name=name)
[ "def", "output_reference", "(", "self", ",", "name", ")", ":", "if", "name", "not", "in", "self", ".", "output_names", ":", "raise", "ValueError", "(", "'Invalid output \"{}\"'", ".", "format", "(", "name", ")", ")", "return", "Reference", "(", "step_name", ...
Return a reference to the given output for use in an input of a next Step. For a Step named `echo` that has an output called `echoed`, the reference `echo/echoed` is returned. Args: name (str): the name of the Step output Raises: ValueError: The name provided is not a valid output name for this Step.
[ "Return", "a", "reference", "to", "the", "given", "output", "for", "use", "in", "an", "input", "of", "a", "next", "Step", "." ]
python
train
openeemeter/eemeter
eemeter/transform.py
https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/transform.py#L62-L138
def as_freq(data_series, freq, atomic_freq="1 Min", series_type="cumulative"): """Resample data to a different frequency. This method can be used to upsample or downsample meter data. The assumption it makes to do so is that meter data is constant and averaged over the given periods. For instance, to convert billing-period data to daily data, this method first upsamples to the atomic frequency (1 minute freqency, by default), "spreading" usage evenly across all minutes in each period. Then it downsamples to hourly frequency and returns that result. With instantaneous series, the data is copied to all contiguous time intervals and the mean over `freq` is returned. **Caveats**: - This method gives a fair amount of flexibility in resampling as long as you are OK with the assumption that usage is constant over the period (this assumption is generally broken in observed data at large enough frequencies, so this caveat should not be taken lightly). Parameters ---------- data_series : :any:`pandas.Series` Data to resample. Should have a :any:`pandas.DatetimeIndex`. freq : :any:`str` The frequency to resample to. This should be given in a form recognized by the :any:`pandas.Series.resample` method. atomic_freq : :any:`str`, optional The "atomic" frequency of the intermediate data form. This can be adjusted to a higher atomic frequency to increase speed or memory performance. series_type : :any:`str`, {'cumulative', ‘instantaneous’}, default 'cumulative' Type of data sampling. 'cumulative' data can be spread over smaller time intervals and is aggregated using addition (e.g. meter data). 'instantaneous' data is copied (not spread) over smaller time intervals and is aggregated by averaging (e.g. weather data). Returns ------- resampled_data : :any:`pandas.Series` Data resampled to the given frequency. """ # TODO(philngo): make sure this complies with CalTRACK 2.2.2.1 if not isinstance(data_series, pd.Series): raise ValueError( "expected series, got object with class {}".format(data_series.__class__) ) if data_series.empty: return data_series series = remove_duplicates(data_series) target_freq = pd.Timedelta(atomic_freq) timedeltas = (series.index[1:] - series.index[:-1]).append( pd.TimedeltaIndex([pd.NaT]) ) if series_type == "cumulative": spread_factor = target_freq.total_seconds() / timedeltas.total_seconds() series_spread = series * spread_factor atomic_series = series_spread.asfreq(atomic_freq, method="ffill") resampled = atomic_series.resample(freq).sum() resampled_with_nans = atomic_series.resample(freq).mean() resampled = resampled[resampled_with_nans.notnull()].reindex(resampled.index) elif series_type == "instantaneous": atomic_series = series.asfreq(atomic_freq, method="ffill") resampled = atomic_series.resample(freq).mean() if resampled.index[-1] < series.index[-1]: # this adds a null at the end using the target frequency last_index = pd.date_range(resampled.index[-1], freq=freq, periods=2)[1:] resampled = ( pd.concat([resampled, pd.Series(np.nan, index=last_index)]) .resample(freq) .mean() ) return resampled
[ "def", "as_freq", "(", "data_series", ",", "freq", ",", "atomic_freq", "=", "\"1 Min\"", ",", "series_type", "=", "\"cumulative\"", ")", ":", "# TODO(philngo): make sure this complies with CalTRACK 2.2.2.1", "if", "not", "isinstance", "(", "data_series", ",", "pd", "....
Resample data to a different frequency. This method can be used to upsample or downsample meter data. The assumption it makes to do so is that meter data is constant and averaged over the given periods. For instance, to convert billing-period data to daily data, this method first upsamples to the atomic frequency (1 minute freqency, by default), "spreading" usage evenly across all minutes in each period. Then it downsamples to hourly frequency and returns that result. With instantaneous series, the data is copied to all contiguous time intervals and the mean over `freq` is returned. **Caveats**: - This method gives a fair amount of flexibility in resampling as long as you are OK with the assumption that usage is constant over the period (this assumption is generally broken in observed data at large enough frequencies, so this caveat should not be taken lightly). Parameters ---------- data_series : :any:`pandas.Series` Data to resample. Should have a :any:`pandas.DatetimeIndex`. freq : :any:`str` The frequency to resample to. This should be given in a form recognized by the :any:`pandas.Series.resample` method. atomic_freq : :any:`str`, optional The "atomic" frequency of the intermediate data form. This can be adjusted to a higher atomic frequency to increase speed or memory performance. series_type : :any:`str`, {'cumulative', ‘instantaneous’}, default 'cumulative' Type of data sampling. 'cumulative' data can be spread over smaller time intervals and is aggregated using addition (e.g. meter data). 'instantaneous' data is copied (not spread) over smaller time intervals and is aggregated by averaging (e.g. weather data). Returns ------- resampled_data : :any:`pandas.Series` Data resampled to the given frequency.
[ "Resample", "data", "to", "a", "different", "frequency", "." ]
python
train
numenta/htmresearch
htmresearch/algorithms/multiconnections.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/algorithms/multiconnections.py#L109-L137
def setPermanences(self, segments, presynapticCellsBySource, permanence): """ Set the permanence of a specific set of synapses. Any synapses that don't exist will be initialized. Any existing synapses will be overwritten. Conceptually, this method takes a list of [segment, presynapticCell] pairs and initializes their permanence. For each segment, one synapse is added (although one might be added for each "source"). To add multiple synapses to a segment, include it in the list multiple times. The total number of affected synapses is len(segments)*number_of_sources*1. @param segments (numpy array) One segment for each synapse that should be added @param presynapticCellsBySource (dict of numpy arrays) One presynaptic cell for each segment. Example: {"customInputName1": np.array([42, 69])} @param permanence (float) The permanence to assign the synapse """ permanences = np.repeat(np.float32(permanence), len(segments)) for source, connections in self.connectionsBySource.iteritems(): if source in presynapticCellsBySource: connections.matrix.setElements(segments, presynapticCellsBySource[source], permanences)
[ "def", "setPermanences", "(", "self", ",", "segments", ",", "presynapticCellsBySource", ",", "permanence", ")", ":", "permanences", "=", "np", ".", "repeat", "(", "np", ".", "float32", "(", "permanence", ")", ",", "len", "(", "segments", ")", ")", "for", ...
Set the permanence of a specific set of synapses. Any synapses that don't exist will be initialized. Any existing synapses will be overwritten. Conceptually, this method takes a list of [segment, presynapticCell] pairs and initializes their permanence. For each segment, one synapse is added (although one might be added for each "source"). To add multiple synapses to a segment, include it in the list multiple times. The total number of affected synapses is len(segments)*number_of_sources*1. @param segments (numpy array) One segment for each synapse that should be added @param presynapticCellsBySource (dict of numpy arrays) One presynaptic cell for each segment. Example: {"customInputName1": np.array([42, 69])} @param permanence (float) The permanence to assign the synapse
[ "Set", "the", "permanence", "of", "a", "specific", "set", "of", "synapses", ".", "Any", "synapses", "that", "don", "t", "exist", "will", "be", "initialized", ".", "Any", "existing", "synapses", "will", "be", "overwritten", "." ]
python
train
DocNow/twarc
twarc/decorators.py
https://github.com/DocNow/twarc/blob/47dd87d0c00592a4d583412c9d660ba574fc6f26/twarc/decorators.py#L96-L108
def catch_gzip_errors(f): """ A decorator to handle gzip encoding errors which have been known to happen during hydration. """ def new_f(self, *args, **kwargs): try: return f(self, *args, **kwargs) except requests.exceptions.ContentDecodingError as e: log.warning("caught gzip error: %s", e) self.connect() return f(self, *args, **kwargs) return new_f
[ "def", "catch_gzip_errors", "(", "f", ")", ":", "def", "new_f", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "f", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", "requests", ".", ...
A decorator to handle gzip encoding errors which have been known to happen during hydration.
[ "A", "decorator", "to", "handle", "gzip", "encoding", "errors", "which", "have", "been", "known", "to", "happen", "during", "hydration", "." ]
python
train
jantman/awslimitchecker
awslimitchecker/services/ec2.py
https://github.com/jantman/awslimitchecker/blob/e50197f70f3d0abcc5cfc7fde6336f548b790e34/awslimitchecker/services/ec2.py#L154-L194
def _find_usage_spot_fleets(self): """calculate spot fleet request usage and update Limits""" logger.debug('Getting spot fleet request usage') try: res = self.conn.describe_spot_fleet_requests() except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == 'UnsupportedOperation': return raise if 'NextToken' in res: logger.error('Error: describe_spot_fleet_requests() response ' 'includes pagination token, but pagination not ' 'configured in awslimitchecker.') active_fleets = 0 total_target_cap = 0 lim_cap_per_fleet = self.limits['Max target capacity per spot fleet'] lim_launch_specs = self.limits[ 'Max launch specifications per spot fleet'] for fleet in res['SpotFleetRequestConfigs']: _id = fleet['SpotFleetRequestId'] if fleet['SpotFleetRequestState'] != 'active': logger.debug('Skipping spot fleet request %s in state %s', _id, fleet['SpotFleetRequestState']) continue active_fleets += 1 cap = fleet['SpotFleetRequestConfig']['TargetCapacity'] launch_specs = len( fleet['SpotFleetRequestConfig']['LaunchSpecifications']) total_target_cap += cap lim_cap_per_fleet._add_current_usage( cap, resource_id=_id, aws_type='AWS::EC2::SpotFleetRequest') lim_launch_specs._add_current_usage( launch_specs, resource_id=_id, aws_type='AWS::EC2::SpotFleetRequest') self.limits['Max active spot fleets per region']._add_current_usage( active_fleets, aws_type='AWS::EC2::SpotFleetRequest' ) self.limits['Max target capacity for all spot ' 'fleets in region']._add_current_usage( total_target_cap, aws_type='AWS::EC2::SpotFleetRequest' )
[ "def", "_find_usage_spot_fleets", "(", "self", ")", ":", "logger", ".", "debug", "(", "'Getting spot fleet request usage'", ")", "try", ":", "res", "=", "self", ".", "conn", ".", "describe_spot_fleet_requests", "(", ")", "except", "botocore", ".", "exceptions", ...
calculate spot fleet request usage and update Limits
[ "calculate", "spot", "fleet", "request", "usage", "and", "update", "Limits" ]
python
train
olucurious/PyFCM
pyfcm/fcm.py
https://github.com/olucurious/PyFCM/blob/28096cd5f6ef515bb6034e63327723d12304249a/pyfcm/fcm.py#L315-L387
def multiple_devices_data_message(self, registration_ids=None, condition=None, collapse_key=None, delay_while_idle=False, time_to_live=None, restricted_package_name=None, low_priority=False, dry_run=False, data_message=None, content_available=None, timeout=5, extra_notification_kwargs=None, extra_kwargs={}): """ Sends push message to multiple devices, can send to over 1000 devices Args: registration_ids (list, optional): FCM device registration IDs condition (str, optiona): Topic condition to deliver messages to collapse_key (str, optional): Identifier for a group of messages that can be collapsed so that only the last message gets sent when delivery can be resumed. Defaults to `None`. delay_while_idle (bool, optional): deprecated time_to_live (int, optional): How long (in seconds) the message should be kept in FCM storage if the device is offline. The maximum time to live supported is 4 weeks. Defaults to `None` which uses the FCM default of 4 weeks. restricted_package_name (str, optional): Name of package low_priority (bool, optional): Whether to send notification with the low priority flag. Defaults to `False`. dry_run (bool, optional): If `True` no message will be sent but request will be tested. data_message (dict, optional): Custom key-value pairs content_available (bool, optional): Inactive client app is awoken timeout (int, optional): set time limit for the request extra_notification_kwargs (dict, optional): More notification keyword arguments extra_kwargs (dict, optional): More keyword arguments Returns: dict: Response from FCM server (`multicast_id`, `success`, `failure`, `canonical_ids`, `results`) Raises: AuthenticationError: If :attr:`api_key` is not set or provided or there is an error authenticating the sender. FCMServerError: Internal server error or timeout error on Firebase cloud messaging server InvalidDataError: Invalid data provided InternalPackageError: JSON parsing error, mostly from changes in the response of FCM, create a new github issue to resolve it. """ if not isinstance(registration_ids, list): raise InvalidDataError('Invalid registration IDs (should be list)') payloads = [] registration_id_chunks = self.registration_id_chunks(registration_ids) for registration_ids in registration_id_chunks: # appends a payload with a chunk of registration ids here payloads.append(self.parse_payload( registration_ids=registration_ids, condition=condition, collapse_key=collapse_key, delay_while_idle=delay_while_idle, time_to_live=time_to_live, restricted_package_name=restricted_package_name, low_priority=low_priority, dry_run=dry_run, data_message=data_message, content_available=content_available, remove_notification=True, extra_notification_kwargs=extra_notification_kwargs, **extra_kwargs) ) self.send_request(payloads, timeout) return self.parse_responses()
[ "def", "multiple_devices_data_message", "(", "self", ",", "registration_ids", "=", "None", ",", "condition", "=", "None", ",", "collapse_key", "=", "None", ",", "delay_while_idle", "=", "False", ",", "time_to_live", "=", "None", ",", "restricted_package_name", "="...
Sends push message to multiple devices, can send to over 1000 devices Args: registration_ids (list, optional): FCM device registration IDs condition (str, optiona): Topic condition to deliver messages to collapse_key (str, optional): Identifier for a group of messages that can be collapsed so that only the last message gets sent when delivery can be resumed. Defaults to `None`. delay_while_idle (bool, optional): deprecated time_to_live (int, optional): How long (in seconds) the message should be kept in FCM storage if the device is offline. The maximum time to live supported is 4 weeks. Defaults to `None` which uses the FCM default of 4 weeks. restricted_package_name (str, optional): Name of package low_priority (bool, optional): Whether to send notification with the low priority flag. Defaults to `False`. dry_run (bool, optional): If `True` no message will be sent but request will be tested. data_message (dict, optional): Custom key-value pairs content_available (bool, optional): Inactive client app is awoken timeout (int, optional): set time limit for the request extra_notification_kwargs (dict, optional): More notification keyword arguments extra_kwargs (dict, optional): More keyword arguments Returns: dict: Response from FCM server (`multicast_id`, `success`, `failure`, `canonical_ids`, `results`) Raises: AuthenticationError: If :attr:`api_key` is not set or provided or there is an error authenticating the sender. FCMServerError: Internal server error or timeout error on Firebase cloud messaging server InvalidDataError: Invalid data provided InternalPackageError: JSON parsing error, mostly from changes in the response of FCM, create a new github issue to resolve it.
[ "Sends", "push", "message", "to", "multiple", "devices", "can", "send", "to", "over", "1000", "devices" ]
python
train
blockstack/pybitcoin
pybitcoin/transactions/serialize.py
https://github.com/blockstack/pybitcoin/blob/92c8da63c40f7418594b1ce395990c3f5a4787cc/pybitcoin/transactions/serialize.py#L20-L42
def serialize_input(input, signature_script_hex=''): """ Serializes a transaction input. """ if not (isinstance(input, dict) and 'transaction_hash' in input \ and 'output_index' in input): raise Exception('Required parameters: transaction_hash, output_index') if is_hex(str(input['transaction_hash'])) and len(str(input['transaction_hash'])) != 64: raise Exception("Transaction hash '%s' must be 32 bytes" % input['transaction_hash']) elif not is_hex(str(input['transaction_hash'])) and len(str(input['transaction_hash'])) != 32: raise Exception("Transaction hash '%s' must be 32 bytes" % hexlify(input['transaction_hash'])) if not 'sequence' in input: input['sequence'] = UINT_MAX return ''.join([ flip_endian(input['transaction_hash']), hexlify(struct.pack('<I', input['output_index'])), hexlify(variable_length_int(len(signature_script_hex)/2)), signature_script_hex, hexlify(struct.pack('<I', input['sequence'])) ])
[ "def", "serialize_input", "(", "input", ",", "signature_script_hex", "=", "''", ")", ":", "if", "not", "(", "isinstance", "(", "input", ",", "dict", ")", "and", "'transaction_hash'", "in", "input", "and", "'output_index'", "in", "input", ")", ":", "raise", ...
Serializes a transaction input.
[ "Serializes", "a", "transaction", "input", "." ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/extract_text.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/extract_text.py#L435-L447
def availability_pdf() -> bool: """ Is a PDF-to-text tool available? """ pdftotext = tools['pdftotext'] if pdftotext: return True elif pdfminer: log.warning("PDF conversion: pdftotext missing; " "using pdfminer (less efficient)") return True else: return False
[ "def", "availability_pdf", "(", ")", "->", "bool", ":", "pdftotext", "=", "tools", "[", "'pdftotext'", "]", "if", "pdftotext", ":", "return", "True", "elif", "pdfminer", ":", "log", ".", "warning", "(", "\"PDF conversion: pdftotext missing; \"", "\"using pdfminer ...
Is a PDF-to-text tool available?
[ "Is", "a", "PDF", "-", "to", "-", "text", "tool", "available?" ]
python
train
pycontribs/pyrax
pyrax/autoscale.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/autoscale.py#L1161-L1171
def update_policy(self, scaling_group, policy, name=None, policy_type=None, cooldown=None, change=None, is_percent=False, desired_capacity=None, args=None): """ Updates the specified policy. One or more of the parameters may be specified. """ return self._manager.update_policy(scaling_group, policy, name=name, policy_type=policy_type, cooldown=cooldown, change=change, is_percent=is_percent, desired_capacity=desired_capacity, args=args)
[ "def", "update_policy", "(", "self", ",", "scaling_group", ",", "policy", ",", "name", "=", "None", ",", "policy_type", "=", "None", ",", "cooldown", "=", "None", ",", "change", "=", "None", ",", "is_percent", "=", "False", ",", "desired_capacity", "=", ...
Updates the specified policy. One or more of the parameters may be specified.
[ "Updates", "the", "specified", "policy", ".", "One", "or", "more", "of", "the", "parameters", "may", "be", "specified", "." ]
python
train
petl-developers/petl
petl/util/base.py
https://github.com/petl-developers/petl/blob/1d33ca055f7e04e0d28a772041c9fd30c8d415d6/petl/util/base.py#L670-L727
def rowgroupby(table, key, value=None): """Convenient adapter for :func:`itertools.groupby`. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar', 'baz'], ... ['a', 1, True], ... ['b', 3, True], ... ['b', 2]] >>> # group entire rows ... for key, group in etl.rowgroupby(table1, 'foo'): ... print(key, list(group)) ... a [('a', 1, True)] b [('b', 3, True), ('b', 2)] >>> # group specific values ... for key, group in etl.rowgroupby(table1, 'foo', 'bar'): ... print(key, list(group)) ... a [1] b [3, 2] N.B., assumes the input table is already sorted by the given key. """ it = iter(table) hdr = next(it) flds = list(map(text_type, hdr)) # wrap rows as records it = (Record(row, flds) for row in it) # determine key function if callable(key): getkey = key native_key = True else: kindices = asindices(hdr, key) getkey = comparable_itemgetter(*kindices) native_key = False git = groupby(it, key=getkey) if value is None: if native_key: return git else: return ((k.inner, vals) for (k, vals) in git) else: if callable(value): getval = value else: vindices = asindices(hdr, value) getval = operator.itemgetter(*vindices) if native_key: return ((k, (getval(v) for v in vals)) for (k, vals) in git) else: return ((k.inner, (getval(v) for v in vals)) for (k, vals) in git)
[ "def", "rowgroupby", "(", "table", ",", "key", ",", "value", "=", "None", ")", ":", "it", "=", "iter", "(", "table", ")", "hdr", "=", "next", "(", "it", ")", "flds", "=", "list", "(", "map", "(", "text_type", ",", "hdr", ")", ")", "# wrap rows as...
Convenient adapter for :func:`itertools.groupby`. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar', 'baz'], ... ['a', 1, True], ... ['b', 3, True], ... ['b', 2]] >>> # group entire rows ... for key, group in etl.rowgroupby(table1, 'foo'): ... print(key, list(group)) ... a [('a', 1, True)] b [('b', 3, True), ('b', 2)] >>> # group specific values ... for key, group in etl.rowgroupby(table1, 'foo', 'bar'): ... print(key, list(group)) ... a [1] b [3, 2] N.B., assumes the input table is already sorted by the given key.
[ "Convenient", "adapter", "for", ":", "func", ":", "itertools", ".", "groupby", ".", "E", ".", "g", ".", "::" ]
python
train
defunkt/pystache
pystache/renderengine.py
https://github.com/defunkt/pystache/blob/17a5dfdcd56eb76af731d141de395a7632a905b8/pystache/renderengine.py#L167-L181
def render(self, template, context_stack, delimiters=None): """ Render a unicode template string, and return as unicode. Arguments: template: a template string of type unicode (but not a proper subclass of unicode). context_stack: a ContextStack instance. """ parsed_template = parse(template, delimiters) return parsed_template.render(self, context_stack)
[ "def", "render", "(", "self", ",", "template", ",", "context_stack", ",", "delimiters", "=", "None", ")", ":", "parsed_template", "=", "parse", "(", "template", ",", "delimiters", ")", "return", "parsed_template", ".", "render", "(", "self", ",", "context_st...
Render a unicode template string, and return as unicode. Arguments: template: a template string of type unicode (but not a proper subclass of unicode). context_stack: a ContextStack instance.
[ "Render", "a", "unicode", "template", "string", "and", "return", "as", "unicode", "." ]
python
train
QInfer/python-qinfer
src/qinfer/abstract_model.py
https://github.com/QInfer/python-qinfer/blob/8170c84a0be1723f8c6b09e0d3c7a40a886f1fe3/src/qinfer/abstract_model.py#L336-L355
def distance(self, a, b): r""" Gives the distance between two model parameter vectors :math:`\vec{a}` and :math:`\vec{b}`. By default, this is the vector 1-norm of the difference :math:`\mathbf{Q} (\vec{a} - \vec{b})` rescaled by :attr:`~Model.Q`. :param np.ndarray a: Array of model parameter vectors having shape ``(n_models, n_modelparams)``. :param np.ndarray b: Array of model parameters to compare to, having the same shape as ``a``. :return: An array ``d`` of distances ``d[i]`` between ``a[i, :]`` and ``b[i, :]``. """ return np.apply_along_axis( lambda vec: np.linalg.norm(vec, 1), 1, self.Q * (a - b) )
[ "def", "distance", "(", "self", ",", "a", ",", "b", ")", ":", "return", "np", ".", "apply_along_axis", "(", "lambda", "vec", ":", "np", ".", "linalg", ".", "norm", "(", "vec", ",", "1", ")", ",", "1", ",", "self", ".", "Q", "*", "(", "a", "-"...
r""" Gives the distance between two model parameter vectors :math:`\vec{a}` and :math:`\vec{b}`. By default, this is the vector 1-norm of the difference :math:`\mathbf{Q} (\vec{a} - \vec{b})` rescaled by :attr:`~Model.Q`. :param np.ndarray a: Array of model parameter vectors having shape ``(n_models, n_modelparams)``. :param np.ndarray b: Array of model parameters to compare to, having the same shape as ``a``. :return: An array ``d`` of distances ``d[i]`` between ``a[i, :]`` and ``b[i, :]``.
[ "r", "Gives", "the", "distance", "between", "two", "model", "parameter", "vectors", ":", "math", ":", "\\", "vec", "{", "a", "}", "and", ":", "math", ":", "\\", "vec", "{", "b", "}", ".", "By", "default", "this", "is", "the", "vector", "1", "-", ...
python
train
TheHive-Project/Cortex-Analyzers
analyzers/MaxMind/ipaddr.py
https://github.com/TheHive-Project/Cortex-Analyzers/blob/8dae6a8c4cf9af5554ae8c844985c4b44d4bd4bf/analyzers/MaxMind/ipaddr.py#L1559-L1582
def _explode_shorthand_ip_string(self): """Expand a shortened IPv6 address. Args: ip_str: A string, the IPv6 address. Returns: A string, the expanded IPv6 address. """ if isinstance(self, _BaseNet): ip_str = str(self.ip) else: ip_str = str(self) ip_int = self._ip_int_from_string(ip_str) parts = [] for i in xrange(self._HEXTET_COUNT): parts.append('%04x' % (ip_int & 0xFFFF)) ip_int >>= 16 parts.reverse() if isinstance(self, _BaseNet): return '%s/%d' % (':'.join(parts), self.prefixlen) return ':'.join(parts)
[ "def", "_explode_shorthand_ip_string", "(", "self", ")", ":", "if", "isinstance", "(", "self", ",", "_BaseNet", ")", ":", "ip_str", "=", "str", "(", "self", ".", "ip", ")", "else", ":", "ip_str", "=", "str", "(", "self", ")", "ip_int", "=", "self", "...
Expand a shortened IPv6 address. Args: ip_str: A string, the IPv6 address. Returns: A string, the expanded IPv6 address.
[ "Expand", "a", "shortened", "IPv6", "address", "." ]
python
train
ContextLab/hypertools
hypertools/_shared/helpers.py
https://github.com/ContextLab/hypertools/blob/b76c7ac8061998b560e969ff8e4f4c915088e7a0/hypertools/_shared/helpers.py#L190-L222
def get_type(data): """ Checks what the data type is and returns it as a string label """ import six from ..datageometry import DataGeometry if isinstance(data, list): if isinstance(data[0], (six.string_types, six.text_type, six.binary_type)): return 'list_str' elif isinstance(data[0], (int, float)): return 'list_num' elif isinstance(data[0], np.ndarray): return 'list_arr' else: raise TypeError('Unsupported data type passed. Supported types: ' 'Numpy Array, Pandas DataFrame, String, List of strings' ', List of numbers') elif isinstance(data, np.ndarray): if isinstance(data[0][0], (six.string_types, six.text_type, six.binary_type)): return 'arr_str' else: return 'arr_num' elif isinstance(data, pd.DataFrame): return 'df' elif isinstance(data, (six.string_types, six.text_type, six.binary_type)): return 'str' elif isinstance(data, DataGeometry): return 'geo' else: raise TypeError('Unsupported data type passed. Supported types: ' 'Numpy Array, Pandas DataFrame, String, List of strings' ', List of numbers')
[ "def", "get_type", "(", "data", ")", ":", "import", "six", "from", ".", ".", "datageometry", "import", "DataGeometry", "if", "isinstance", "(", "data", ",", "list", ")", ":", "if", "isinstance", "(", "data", "[", "0", "]", ",", "(", "six", ".", "stri...
Checks what the data type is and returns it as a string label
[ "Checks", "what", "the", "data", "type", "is", "and", "returns", "it", "as", "a", "string", "label" ]
python
train
AustralianSynchrotron/lightflow
lightflow/models/parameters.py
https://github.com/AustralianSynchrotron/lightflow/blob/dc53dbc1d961e20fb144273baca258060705c03e/lightflow/models/parameters.py#L99-L114
def check_missing(self, args): """ Returns the names of all options that are required but were not specified. All options that don't have a default value are required in order to run the workflow. Args: args (dict): A dictionary of the provided arguments that is checked for missing options. Returns: list: A list with the names of the options that are missing from the provided arguments. """ return [opt.name for opt in self if (opt.name not in args) and (opt.default is None)]
[ "def", "check_missing", "(", "self", ",", "args", ")", ":", "return", "[", "opt", ".", "name", "for", "opt", "in", "self", "if", "(", "opt", ".", "name", "not", "in", "args", ")", "and", "(", "opt", ".", "default", "is", "None", ")", "]" ]
Returns the names of all options that are required but were not specified. All options that don't have a default value are required in order to run the workflow. Args: args (dict): A dictionary of the provided arguments that is checked for missing options. Returns: list: A list with the names of the options that are missing from the provided arguments.
[ "Returns", "the", "names", "of", "all", "options", "that", "are", "required", "but", "were", "not", "specified", "." ]
python
train
robhowley/nhlscrapi
nhlscrapi/games/game.py
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/games/game.py#L159-L185
def matchup(self): """ Return the game meta information displayed in report banners including team names, final score, game date, location, and attendance. Data format is .. code:: python { 'home': home, 'away': away, 'final': final, 'attendance': att, 'date': date, 'location': loc } :returns: matchup banner info :rtype: dict """ if self.play_by_play.matchup: return self.play_by_play.matchup elif self.rosters.matchup: return self.rosters.matchup elif self.toi.matchup: return self.toi.matchup else: self.face_off_comp.matchup
[ "def", "matchup", "(", "self", ")", ":", "if", "self", ".", "play_by_play", ".", "matchup", ":", "return", "self", ".", "play_by_play", ".", "matchup", "elif", "self", ".", "rosters", ".", "matchup", ":", "return", "self", ".", "rosters", ".", "matchup",...
Return the game meta information displayed in report banners including team names, final score, game date, location, and attendance. Data format is .. code:: python { 'home': home, 'away': away, 'final': final, 'attendance': att, 'date': date, 'location': loc } :returns: matchup banner info :rtype: dict
[ "Return", "the", "game", "meta", "information", "displayed", "in", "report", "banners", "including", "team", "names", "final", "score", "game", "date", "location", "and", "attendance", ".", "Data", "format", "is", "..", "code", "::", "python", "{", "home", "...
python
train
chrisjsewell/jsonextended
jsonextended/edict.py
https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L76-L81
def is_path_like(obj, attr=('name', 'is_file', 'is_dir', 'iterdir')): """test if object is pathlib.Path like""" for a in attr: if not hasattr(obj, a): return False return True
[ "def", "is_path_like", "(", "obj", ",", "attr", "=", "(", "'name'", ",", "'is_file'", ",", "'is_dir'", ",", "'iterdir'", ")", ")", ":", "for", "a", "in", "attr", ":", "if", "not", "hasattr", "(", "obj", ",", "a", ")", ":", "return", "False", "retur...
test if object is pathlib.Path like
[ "test", "if", "object", "is", "pathlib", ".", "Path", "like" ]
python
train
NoneGG/aredis
aredis/cache.py
https://github.com/NoneGG/aredis/blob/204caad740ac13e5760d46444a2ba7632982a046/aredis/cache.py#L148-L154
def _pack(self, content): """pack the content using serializer and compressor""" if self.serializer: content = self.serializer.serialize(content) if self.compressor: content = self.compressor.compress(content) return content
[ "def", "_pack", "(", "self", ",", "content", ")", ":", "if", "self", ".", "serializer", ":", "content", "=", "self", ".", "serializer", ".", "serialize", "(", "content", ")", "if", "self", ".", "compressor", ":", "content", "=", "self", ".", "compresso...
pack the content using serializer and compressor
[ "pack", "the", "content", "using", "serializer", "and", "compressor" ]
python
train
pybel/pybel
src/pybel/struct/mutation/transfer.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/struct/mutation/transfer.py#L15-L21
def iter_children(graph, node: BaseEntity) -> Iterable[BaseEntity]: """Iterate over children of the node.""" return ( node for node, _, d in graph.in_edges(node, data=True) if d[RELATION] == IS_A )
[ "def", "iter_children", "(", "graph", ",", "node", ":", "BaseEntity", ")", "->", "Iterable", "[", "BaseEntity", "]", ":", "return", "(", "node", "for", "node", ",", "_", ",", "d", "in", "graph", ".", "in_edges", "(", "node", ",", "data", "=", "True",...
Iterate over children of the node.
[ "Iterate", "over", "children", "of", "the", "node", "." ]
python
train
kvesteri/postgresql-audit
postgresql_audit/migrations.py
https://github.com/kvesteri/postgresql-audit/blob/91b497ced2e04dd44bb757b02983d2a64a2b1514/postgresql_audit/migrations.py#L96-L153
def change_column_name( conn, table, old_column_name, new_column_name, schema=None ): """ Changes given `activity` jsonb data column key. This function is useful when you want to reflect column name changes to activity table. :: from alembic import op from postgresql_audit import change_column_name def upgrade(): op.alter_column( 'my_table', 'my_column', new_column_name='some_column' ) change_column_name(op, 'my_table', 'my_column', 'some_column') :param conn: An object that is able to execute SQL (either SQLAlchemy Connection, Engine or Alembic Operations object) :param table: The table to run the column name changes against :param old_column_name: Name of the column to change :param new_column_name: New colum name :param schema: Optional name of schema to use. """ activity_table = get_activity_table(schema=schema) query = ( activity_table .update() .values( old_data=jsonb_change_key_name( activity_table.c.old_data, old_column_name, new_column_name ), changed_data=jsonb_change_key_name( activity_table.c.changed_data, old_column_name, new_column_name ) ) .where(activity_table.c.table_name == table) ) return conn.execute(query)
[ "def", "change_column_name", "(", "conn", ",", "table", ",", "old_column_name", ",", "new_column_name", ",", "schema", "=", "None", ")", ":", "activity_table", "=", "get_activity_table", "(", "schema", "=", "schema", ")", "query", "=", "(", "activity_table", "...
Changes given `activity` jsonb data column key. This function is useful when you want to reflect column name changes to activity table. :: from alembic import op from postgresql_audit import change_column_name def upgrade(): op.alter_column( 'my_table', 'my_column', new_column_name='some_column' ) change_column_name(op, 'my_table', 'my_column', 'some_column') :param conn: An object that is able to execute SQL (either SQLAlchemy Connection, Engine or Alembic Operations object) :param table: The table to run the column name changes against :param old_column_name: Name of the column to change :param new_column_name: New colum name :param schema: Optional name of schema to use.
[ "Changes", "given", "activity", "jsonb", "data", "column", "key", ".", "This", "function", "is", "useful", "when", "you", "want", "to", "reflect", "column", "name", "changes", "to", "activity", "table", "." ]
python
train
istresearch/scrapy-cluster
redis-monitor/redis_monitor.py
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/redis-monitor/redis_monitor.py#L494-L501
def close(self): ''' Closes the Redis Monitor and plugins ''' for plugin_key in self.plugins_dict: obj = self.plugins_dict[plugin_key] instance = obj['instance'] instance.close()
[ "def", "close", "(", "self", ")", ":", "for", "plugin_key", "in", "self", ".", "plugins_dict", ":", "obj", "=", "self", ".", "plugins_dict", "[", "plugin_key", "]", "instance", "=", "obj", "[", "'instance'", "]", "instance", ".", "close", "(", ")" ]
Closes the Redis Monitor and plugins
[ "Closes", "the", "Redis", "Monitor", "and", "plugins" ]
python
train
urbn/Caesium
caesium/document.py
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/document.py#L666-L674
def delete(self, _id): """Delete a document or create a DELETE revision :param str _id: The ID of the document to be deleted :returns: JSON Mongo client response including the "n" key to show number of objects effected """ mongo_response = yield self.collection.remove({"_id": ObjectId(_id)}) raise Return(mongo_response)
[ "def", "delete", "(", "self", ",", "_id", ")", ":", "mongo_response", "=", "yield", "self", ".", "collection", ".", "remove", "(", "{", "\"_id\"", ":", "ObjectId", "(", "_id", ")", "}", ")", "raise", "Return", "(", "mongo_response", ")" ]
Delete a document or create a DELETE revision :param str _id: The ID of the document to be deleted :returns: JSON Mongo client response including the "n" key to show number of objects effected
[ "Delete", "a", "document", "or", "create", "a", "DELETE", "revision" ]
python
train
shichao-an/115wangpan
u115/api.py
https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L1585-L1621
def status_human(self): """ Human readable status :return: * `DOWNLOADING`: the task is downloading files * `BEING TRANSFERRED`: the task is being transferred * `TRANSFERRED`: the task has been transferred to downloads \ directory * `SEARCHING RESOURCES`: the task is searching resources * `FAILED`: the task is failed * `DELETED`: the task is deleted * `UNKNOWN STATUS` :rtype: str """ res = None if self._deleted: return 'DELETED' if self.status == 1: res = 'DOWNLOADING' elif self.status == 2: if self.move == 0: res = 'BEING TRANSFERRED' elif self.move == 1: res = 'TRANSFERRED' elif self.move == 2: res = 'PARTIALLY TRANSFERRED' elif self.status == 4: res = 'SEARCHING RESOURCES' elif self.status == -1: res = 'FAILED' if res is not None: return res return 'UNKNOWN STATUS'
[ "def", "status_human", "(", "self", ")", ":", "res", "=", "None", "if", "self", ".", "_deleted", ":", "return", "'DELETED'", "if", "self", ".", "status", "==", "1", ":", "res", "=", "'DOWNLOADING'", "elif", "self", ".", "status", "==", "2", ":", "if"...
Human readable status :return: * `DOWNLOADING`: the task is downloading files * `BEING TRANSFERRED`: the task is being transferred * `TRANSFERRED`: the task has been transferred to downloads \ directory * `SEARCHING RESOURCES`: the task is searching resources * `FAILED`: the task is failed * `DELETED`: the task is deleted * `UNKNOWN STATUS` :rtype: str
[ "Human", "readable", "status" ]
python
train
respondcreate/django-versatileimagefield
versatileimagefield/versatileimagefield.py
https://github.com/respondcreate/django-versatileimagefield/blob/d41e279c39cccffafbe876c67596184704ae8877/versatileimagefield/versatileimagefield.py#L164-L180
def process_image(self, image, image_format, save_kwargs, width, height): """ Return a BytesIO instance of `image` that fits in a bounding box. Bounding box dimensions are `width`x`height`. """ imagefile = BytesIO() image.thumbnail( (width, height), Image.ANTIALIAS ) image.save( imagefile, **save_kwargs ) return imagefile
[ "def", "process_image", "(", "self", ",", "image", ",", "image_format", ",", "save_kwargs", ",", "width", ",", "height", ")", ":", "imagefile", "=", "BytesIO", "(", ")", "image", ".", "thumbnail", "(", "(", "width", ",", "height", ")", ",", "Image", "....
Return a BytesIO instance of `image` that fits in a bounding box. Bounding box dimensions are `width`x`height`.
[ "Return", "a", "BytesIO", "instance", "of", "image", "that", "fits", "in", "a", "bounding", "box", "." ]
python
test
aboSamoor/polyglot
polyglot/downloader.py
https://github.com/aboSamoor/polyglot/blob/d0d2aa8d06cec4e03bd96618ae960030f7069a17/polyglot/downloader.py#L963-L975
def supported_languages(self, task=None): """Languages that are covered by a specific task. Args: task (string): Task name. """ if task: collection = self.get_collection(task=task) return [isoLangs[x.id.split('.')[1]]["name"] for x in collection.packages] else: return [x.name.split()[0] for x in self.collections() if Downloader.LANG_PREFIX in x.id]
[ "def", "supported_languages", "(", "self", ",", "task", "=", "None", ")", ":", "if", "task", ":", "collection", "=", "self", ".", "get_collection", "(", "task", "=", "task", ")", "return", "[", "isoLangs", "[", "x", ".", "id", ".", "split", "(", "'.'...
Languages that are covered by a specific task. Args: task (string): Task name.
[ "Languages", "that", "are", "covered", "by", "a", "specific", "task", "." ]
python
train
pandas-dev/pandas
pandas/core/arrays/sparse.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/sparse.py#L386-L407
def _get_fill(arr: ABCSparseArray) -> np.ndarray: """ Create a 0-dim ndarray containing the fill value Parameters ---------- arr : SparseArray Returns ------- fill_value : ndarray 0-dim ndarray with just the fill value. Notes ----- coerce fill_value to arr dtype if possible int64 SparseArray can have NaN as fill_value if there is no missing """ try: return np.asarray(arr.fill_value, dtype=arr.dtype.subtype) except ValueError: return np.asarray(arr.fill_value)
[ "def", "_get_fill", "(", "arr", ":", "ABCSparseArray", ")", "->", "np", ".", "ndarray", ":", "try", ":", "return", "np", ".", "asarray", "(", "arr", ".", "fill_value", ",", "dtype", "=", "arr", ".", "dtype", ".", "subtype", ")", "except", "ValueError",...
Create a 0-dim ndarray containing the fill value Parameters ---------- arr : SparseArray Returns ------- fill_value : ndarray 0-dim ndarray with just the fill value. Notes ----- coerce fill_value to arr dtype if possible int64 SparseArray can have NaN as fill_value if there is no missing
[ "Create", "a", "0", "-", "dim", "ndarray", "containing", "the", "fill", "value" ]
python
train
juga0/dhcpcanon
dhcpcanon/dhcpcapfsm.py
https://github.com/juga0/dhcpcanon/blob/9f51a29e57fe93dc93fb22bb0ed12fcfe9557e59/dhcpcanon/dhcpcapfsm.py#L38-L40
def dict_self(self): """Return the self object attributes not inherited as dict.""" return {k: v for k, v in self.__dict__.items() if k in FSM_ATTRS}
[ "def", "dict_self", "(", "self", ")", ":", "return", "{", "k", ":", "v", "for", "k", ",", "v", "in", "self", ".", "__dict__", ".", "items", "(", ")", "if", "k", "in", "FSM_ATTRS", "}" ]
Return the self object attributes not inherited as dict.
[ "Return", "the", "self", "object", "attributes", "not", "inherited", "as", "dict", "." ]
python
test
jayclassless/tidypy
src/tidypy/finder.py
https://github.com/jayclassless/tidypy/blob/3c3497ca377fbbe937103b77b02b326c860c748f/src/tidypy/finder.py#L103-L120
def files(self, filters=None): """ A generator that produces a sequence of paths to files in the project that matches the specified filters. :param filters: the regular expressions to use when finding files in the project. If not specified, all files are returned. :type filters: list(str) """ filters = compile_masks(filters or [r'.*']) for files in itervalues(self._found): for file_ in files: relpath = text_type(Path(file_).relative_to(self.base_path)) if matches_masks(relpath, filters): yield file_
[ "def", "files", "(", "self", ",", "filters", "=", "None", ")", ":", "filters", "=", "compile_masks", "(", "filters", "or", "[", "r'.*'", "]", ")", "for", "files", "in", "itervalues", "(", "self", ".", "_found", ")", ":", "for", "file_", "in", "files"...
A generator that produces a sequence of paths to files in the project that matches the specified filters. :param filters: the regular expressions to use when finding files in the project. If not specified, all files are returned. :type filters: list(str)
[ "A", "generator", "that", "produces", "a", "sequence", "of", "paths", "to", "files", "in", "the", "project", "that", "matches", "the", "specified", "filters", "." ]
python
valid
go-macaroon-bakery/py-macaroon-bakery
macaroonbakery/httpbakery/_client.py
https://github.com/go-macaroon-bakery/py-macaroon-bakery/blob/63ce1ef1dabe816eb8aaec48fbb46761c34ddf77/macaroonbakery/httpbakery/_client.py#L302-L344
def extract_macaroons(headers_or_request): ''' Returns an array of any macaroons found in the given slice of cookies. If the argument implements a get_header method, that will be used instead of the get method to retrieve headers. @param headers_or_request: dict of headers or a urllib.request.Request-like object. @return: A list of list of mpy macaroons ''' def get_header(key, default=None): try: return headers_or_request.get_header(key, default) except AttributeError: return headers_or_request.get(key, default) mss = [] def add_macaroon(data): try: data = utils.b64decode(data) data_as_objs = json.loads(data.decode('utf-8')) except ValueError: return ms = [utils.macaroon_from_dict(x) for x in data_as_objs] mss.append(ms) cookie_header = get_header('Cookie') if cookie_header is not None: cs = SimpleCookie() # The cookie might be a unicode object, so convert it # to ASCII. This may cause an exception under Python 2. # TODO is that a problem? cs.load(str(cookie_header)) for c in cs: if c.startswith('macaroon-'): add_macaroon(cs[c].value) # Python doesn't make it easy to have multiple values for a # key, so split the header instead, which is necessary # for HTTP1.1 compatibility anyway (see RFC 7230, section 3.2.2) macaroon_header = get_header('Macaroons') if macaroon_header is not None: for h in macaroon_header.split(','): add_macaroon(h) return mss
[ "def", "extract_macaroons", "(", "headers_or_request", ")", ":", "def", "get_header", "(", "key", ",", "default", "=", "None", ")", ":", "try", ":", "return", "headers_or_request", ".", "get_header", "(", "key", ",", "default", ")", "except", "AttributeError",...
Returns an array of any macaroons found in the given slice of cookies. If the argument implements a get_header method, that will be used instead of the get method to retrieve headers. @param headers_or_request: dict of headers or a urllib.request.Request-like object. @return: A list of list of mpy macaroons
[ "Returns", "an", "array", "of", "any", "macaroons", "found", "in", "the", "given", "slice", "of", "cookies", ".", "If", "the", "argument", "implements", "a", "get_header", "method", "that", "will", "be", "used", "instead", "of", "the", "get", "method", "to...
python
train
mabuchilab/QNET
docs/_extensions/inheritance_diagram.py
https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/docs/_extensions/inheritance_diagram.py#L75-L100
def try_import(objname): # type: (unicode) -> Any """Import a object or module using *name* and *currentmodule*. *name* should be a relative name from *currentmodule* or a fully-qualified name. Returns imported object or module. If failed, returns None value. """ try: __import__(objname) return sys.modules.get(objname) # type: ignore except (ImportError, ValueError): # ValueError,py27 -> ImportError,py3 matched = module_sig_re.match(objname) # type: ignore if not matched: return None modname, attrname = matched.groups() if modname is None: return None try: __import__(modname) return getattr(sys.modules.get(modname), attrname, None) except (ImportError, ValueError): # ValueError,py27 -> ImportError,py3 return None
[ "def", "try_import", "(", "objname", ")", ":", "# type: (unicode) -> Any", "try", ":", "__import__", "(", "objname", ")", "return", "sys", ".", "modules", ".", "get", "(", "objname", ")", "# type: ignore", "except", "(", "ImportError", ",", "ValueError", ")", ...
Import a object or module using *name* and *currentmodule*. *name* should be a relative name from *currentmodule* or a fully-qualified name. Returns imported object or module. If failed, returns None value.
[ "Import", "a", "object", "or", "module", "using", "*", "name", "*", "and", "*", "currentmodule", "*", ".", "*", "name", "*", "should", "be", "a", "relative", "name", "from", "*", "currentmodule", "*", "or", "a", "fully", "-", "qualified", "name", "." ]
python
train
bachya/pyairvisual
example.py
https://github.com/bachya/pyairvisual/blob/1d4809998d87f85d53bb281e1eb54d43acee06fa/example.py#L13-L87
async def main() -> None: # pylint: disable=too-many-statements """Create the aiohttp session and run the example.""" logging.basicConfig(level=logging.INFO) async with ClientSession() as websession: client = Client(websession, api_key='<API KEY>') # Get supported locations (by location): try: _LOGGER.info(await client.supported.countries()) _LOGGER.info(await client.supported.states('USA')) _LOGGER.info(await client.supported.cities('USA', 'Colorado')) except AirVisualError as err: _LOGGER.error('There was an error: %s', err) # Get supported locations (by station): try: _LOGGER.info( await client.supported.stations( 'USA', 'Colorado', 'Denver')) except UnauthorizedError as err: _LOGGER.error(err) except AirVisualError as err: _LOGGER.error('There was an error: %s', err) # Get data by nearest location (by IP): try: _LOGGER.info(await client.api.nearest_city()) except AirVisualError as err: _LOGGER.error('There was an error: %s', err) # Get data by nearest location (coordinates or explicit location): try: _LOGGER.info( await client.api.nearest_city( latitude=39.742599, longitude=-104.9942557)) _LOGGER.info( await client.api.city( city='Los Angeles', state='California', country='USA')) except AirVisualError as err: _LOGGER.error('There was an error: %s', err) # Get data by nearest station (by IP): try: _LOGGER.info(await client.api.nearest_station()) except UnauthorizedError as err: _LOGGER.error(err) except AirVisualError as err: _LOGGER.error('There was an error: %s', err) # Get data by nearest station (by coordinates or explicit location): try: _LOGGER.info( await client.api.nearest_station( latitude=39.742599, longitude=-104.9942557)) _LOGGER.info( await client.api.station( station='US Embassy in Beijing', city='Beijing', state='Beijing', country='China')) except UnauthorizedError as err: _LOGGER.error(err) except AirVisualError as err: _LOGGER.error('There was an error: %s', err) # Get data on AQI ranking: try: _LOGGER.info(await client.api.ranking()) except UnauthorizedError as err: _LOGGER.error(err) except AirVisualError as err: _LOGGER.error('There was an error: %s', err) # Get info on a AirVisual Pro node: _LOGGER.info(await client.api.node('zEp8CifbnasWtToBc'))
[ "async", "def", "main", "(", ")", "->", "None", ":", "# pylint: disable=too-many-statements", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "INFO", ")", "async", "with", "ClientSession", "(", ")", "as", "websession", ":", "client", "=", "...
Create the aiohttp session and run the example.
[ "Create", "the", "aiohttp", "session", "and", "run", "the", "example", "." ]
python
train
numenta/htmresearch
projects/feedback/feedback_sequences.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/feedback/feedback_sequences.py#L58-L71
def generateSequences(n=2048, w=40, sequenceLength=5, sequenceCount=2, sharedRange=None, seed=42): """ Generate high order sequences using SequenceMachine """ # Lots of room for noise sdrs patternAlphabetSize = 10*(sequenceLength * sequenceCount) patternMachine = PatternMachine(n, w, patternAlphabetSize, seed) sequenceMachine = SequenceMachine(patternMachine, seed) numbers = sequenceMachine.generateNumbers(sequenceCount, sequenceLength, sharedRange=sharedRange ) generatedSequences = sequenceMachine.generateFromNumbers(numbers) return sequenceMachine, generatedSequences, numbers
[ "def", "generateSequences", "(", "n", "=", "2048", ",", "w", "=", "40", ",", "sequenceLength", "=", "5", ",", "sequenceCount", "=", "2", ",", "sharedRange", "=", "None", ",", "seed", "=", "42", ")", ":", "# Lots of room for noise sdrs", "patternAlphabetSize"...
Generate high order sequences using SequenceMachine
[ "Generate", "high", "order", "sequences", "using", "SequenceMachine" ]
python
train
tensorflow/tensorboard
tensorboard/plugins/scalar/summary.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/scalar/summary.py#L72-L109
def pb(name, data, display_name=None, description=None): """Create a legacy scalar summary protobuf. Arguments: name: A unique name for the generated summary, including any desired name scopes. data: A rank-0 `np.array` or array-like form (so raw `int`s and `float`s are fine, too). display_name: Optional name for this summary in TensorBoard, as a `str`. Defaults to `name`. description: Optional long-form description for this summary, as a `str`. Markdown is supported. Defaults to empty. Returns: A `tf.Summary` protobuf object. """ # TODO(nickfelt): remove on-demand imports once dep situation is fixed. import tensorflow.compat.v1 as tf data = np.array(data) if data.shape != (): raise ValueError('Expected scalar shape for data, saw shape: %s.' % data.shape) if data.dtype.kind not in ('b', 'i', 'u', 'f'): # bool, int, uint, float raise ValueError('Cast %s to float is not supported' % data.dtype.name) tensor = tf.make_tensor_proto(data.astype(np.float32)) if display_name is None: display_name = name summary_metadata = metadata.create_summary_metadata( display_name=display_name, description=description) tf_summary_metadata = tf.SummaryMetadata.FromString( summary_metadata.SerializeToString()) summary = tf.Summary() summary.value.add(tag='%s/scalar_summary' % name, metadata=tf_summary_metadata, tensor=tensor) return summary
[ "def", "pb", "(", "name", ",", "data", ",", "display_name", "=", "None", ",", "description", "=", "None", ")", ":", "# TODO(nickfelt): remove on-demand imports once dep situation is fixed.", "import", "tensorflow", ".", "compat", ".", "v1", "as", "tf", "data", "="...
Create a legacy scalar summary protobuf. Arguments: name: A unique name for the generated summary, including any desired name scopes. data: A rank-0 `np.array` or array-like form (so raw `int`s and `float`s are fine, too). display_name: Optional name for this summary in TensorBoard, as a `str`. Defaults to `name`. description: Optional long-form description for this summary, as a `str`. Markdown is supported. Defaults to empty. Returns: A `tf.Summary` protobuf object.
[ "Create", "a", "legacy", "scalar", "summary", "protobuf", "." ]
python
train
GeorgeArgyros/sfalearn
sfalearn/observationtableinit.py
https://github.com/GeorgeArgyros/sfalearn/blob/68a93f507e2fb7d89ca04bd8a8f0da2d6c680443/sfalearn/observationtableinit.py#L124-L135
def _reverse_to_source(self, target, group1): """ Args: target (dict): A table containing the reverse transitions for each state group1 (list): A group of states Return: Set: A set of states for which there is a transition with the states of the group """ new_group = [] for dst in group1: new_group += target[dst] return set(new_group)
[ "def", "_reverse_to_source", "(", "self", ",", "target", ",", "group1", ")", ":", "new_group", "=", "[", "]", "for", "dst", "in", "group1", ":", "new_group", "+=", "target", "[", "dst", "]", "return", "set", "(", "new_group", ")" ]
Args: target (dict): A table containing the reverse transitions for each state group1 (list): A group of states Return: Set: A set of states for which there is a transition with the states of the group
[ "Args", ":", "target", "(", "dict", ")", ":", "A", "table", "containing", "the", "reverse", "transitions", "for", "each", "state", "group1", "(", "list", ")", ":", "A", "group", "of", "states", "Return", ":", "Set", ":", "A", "set", "of", "states", "...
python
train
ska-sa/katcp-python
katcp/inspecting_client.py
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/inspecting_client.py#L541-L590
def inspect(self): """Inspect device requests and sensors, update model Returns ------- Tornado future that resolves with: model_changes : Nested AttrDict or None Contains sets of added/removed request/sensor names Example structure: {'requests': { 'added': set(['req1', 'req2']), 'removed': set(['req10', 'req20'])} 'sensors': { 'added': set(['sens1', 'sens2']), 'removed': set(['sens10', 'sens20'])} } If there are no changes keys may be omitted. If an item is in both the 'added' and 'removed' sets that means that it changed. If neither request not sensor changes are present, None is returned instead of a nested structure. """ timeout_manager = future_timeout_manager(self.sync_timeout) sensor_index_before = copy.copy(self._sensors_index) request_index_before = copy.copy(self._requests_index) try: request_changes = yield self.inspect_requests( timeout=timeout_manager.remaining()) sensor_changes = yield self.inspect_sensors( timeout=timeout_manager.remaining()) except Exception: # Ensure atomicity of sensor and request updates ; if the one # fails, the other should act as if it has failed too. self._sensors_index = sensor_index_before self._requests_index = request_index_before raise model_changes = AttrDict() if request_changes: model_changes.requests = request_changes if sensor_changes: model_changes.sensors = sensor_changes if model_changes: raise Return(model_changes)
[ "def", "inspect", "(", "self", ")", ":", "timeout_manager", "=", "future_timeout_manager", "(", "self", ".", "sync_timeout", ")", "sensor_index_before", "=", "copy", ".", "copy", "(", "self", ".", "_sensors_index", ")", "request_index_before", "=", "copy", ".", ...
Inspect device requests and sensors, update model Returns ------- Tornado future that resolves with: model_changes : Nested AttrDict or None Contains sets of added/removed request/sensor names Example structure: {'requests': { 'added': set(['req1', 'req2']), 'removed': set(['req10', 'req20'])} 'sensors': { 'added': set(['sens1', 'sens2']), 'removed': set(['sens10', 'sens20'])} } If there are no changes keys may be omitted. If an item is in both the 'added' and 'removed' sets that means that it changed. If neither request not sensor changes are present, None is returned instead of a nested structure.
[ "Inspect", "device", "requests", "and", "sensors", "update", "model" ]
python
train
collectiveacuity/labPack
labpack/platforms/aws/ec2.py
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/platforms/aws/ec2.py#L1564-L1600
def list_keypairs(self): ''' a method to discover the list of key pairs on AWS :return: list of key pairs ''' title = '%s.list_keypairs' % self.__class__.__name__ # request subnet list from AWS self.iam.printer('Querying AWS region %s for key pairs.' % self.iam.region_name) keypair_list = [] try: response = self.connection.describe_key_pairs() except: raise AWSConnectionError(title) response_list = [] if 'KeyPairs' in response: response_list = response['KeyPairs'] # construct list of keypairs from response for sub_dict in response_list: keypair_list.append(sub_dict['KeyName']) # report results and return list if keypair_list: print_out = 'Found key pair' if len(keypair_list) > 1: print_out += 's' from labpack.parsing.grammar import join_words print_out += ' %s.' % join_words(keypair_list) self.iam.printer(print_out) else: self.iam.printer('No key pairs found.') return keypair_list
[ "def", "list_keypairs", "(", "self", ")", ":", "title", "=", "'%s.list_keypairs'", "%", "self", ".", "__class__", ".", "__name__", "# request subnet list from AWS", "self", ".", "iam", ".", "printer", "(", "'Querying AWS region %s for key pairs.'", "%", "self", ".",...
a method to discover the list of key pairs on AWS :return: list of key pairs
[ "a", "method", "to", "discover", "the", "list", "of", "key", "pairs", "on", "AWS" ]
python
train
dragnet-org/dragnet
dragnet/features/__init__.py
https://github.com/dragnet-org/dragnet/blob/532c9d9f28e5b1b57f3cabc708218d3863a16322/dragnet/features/__init__.py#L8-L21
def get_feature(name): """Get an instance of a ``Features`` class by ``name`` (str).""" if name == 'css': return CSSFeatures() elif name == 'kohlschuetter': return KohlschuetterFeatures() elif name == 'readability': return ReadabilityFeatures() elif name == 'weninger': return WeningerFeatures() elif name == 'clustered_weninger': return ClusteredWeningerFeatures() else: raise ValueError('invalid feature name: "{}"'.format(name))
[ "def", "get_feature", "(", "name", ")", ":", "if", "name", "==", "'css'", ":", "return", "CSSFeatures", "(", ")", "elif", "name", "==", "'kohlschuetter'", ":", "return", "KohlschuetterFeatures", "(", ")", "elif", "name", "==", "'readability'", ":", "return",...
Get an instance of a ``Features`` class by ``name`` (str).
[ "Get", "an", "instance", "of", "a", "Features", "class", "by", "name", "(", "str", ")", "." ]
python
train
daler/metaseq
metaseq/results_table.py
https://github.com/daler/metaseq/blob/fa875d1f72317aa7ef95cb128b739956b16eef9f/metaseq/results_table.py#L789-L798
def changed(self, thresh=0.05, idx=True): """ Changed features. {threshdoc} """ ind = self.data[self.pval_column] <= thresh if idx: return ind return self[ind]
[ "def", "changed", "(", "self", ",", "thresh", "=", "0.05", ",", "idx", "=", "True", ")", ":", "ind", "=", "self", ".", "data", "[", "self", ".", "pval_column", "]", "<=", "thresh", "if", "idx", ":", "return", "ind", "return", "self", "[", "ind", ...
Changed features. {threshdoc}
[ "Changed", "features", "." ]
python
train
hardbyte/python-can
can/interfaces/ics_neovi/neovi_bus.py
https://github.com/hardbyte/python-can/blob/cdc5254d96072df7739263623f3e920628a7d214/can/interfaces/ics_neovi/neovi_bus.py#L164-L174
def get_serial_number(device): """Decode (if needed) and return the ICS device serial string :param device: ics device :return: ics device serial string :rtype: str """ a0000 = 604661760 if device.SerialNumber >= a0000: return ics.base36enc(device.SerialNumber) return str(device.SerialNumber)
[ "def", "get_serial_number", "(", "device", ")", ":", "a0000", "=", "604661760", "if", "device", ".", "SerialNumber", ">=", "a0000", ":", "return", "ics", ".", "base36enc", "(", "device", ".", "SerialNumber", ")", "return", "str", "(", "device", ".", "Seria...
Decode (if needed) and return the ICS device serial string :param device: ics device :return: ics device serial string :rtype: str
[ "Decode", "(", "if", "needed", ")", "and", "return", "the", "ICS", "device", "serial", "string" ]
python
train
ZELLMECHANIK-DRESDEN/dclab
dclab/kde_methods.py
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/kde_methods.py#L214-L257
def kde_multivariate(events_x, events_y, xout=None, yout=None, bw=None): """ Multivariate Kernel Density Estimation Parameters ---------- events_x, events_y: 1D ndarray The input points for kernel density estimation. Input is flattened automatically. bw: tuple (bwx, bwy) or None The bandwith for kernel density estimation. xout, yout: ndarray The coordinates at which the KDE should be computed. If set to none, input coordinates are used. Returns ------- density: ndarray, same shape as `xout` The KDE for the points in (xout, yout) See Also -------- `statsmodels.nonparametric.kernel_density.KDEMultivariate` """ valid_combi = ((xout is None and yout is None) or (xout is not None and yout is not None) ) if not valid_combi: raise ValueError("Both `xout` and `yout` must be (un)set.") if yout is None and yout is None: xout = events_x yout = events_y if bw is None: # divide by 2 to make it comparable to histogram KDE bw = (bin_width_doane(events_x) / 2, bin_width_doane(events_y) / 2) positions = np.vstack([xout.flatten(), yout.flatten()]) estimator_ly = KDEMultivariate(data=[events_x.flatten(), events_y.flatten()], var_type='cc', bw=bw) density = estimator_ly.pdf(positions) return density.reshape(xout.shape)
[ "def", "kde_multivariate", "(", "events_x", ",", "events_y", ",", "xout", "=", "None", ",", "yout", "=", "None", ",", "bw", "=", "None", ")", ":", "valid_combi", "=", "(", "(", "xout", "is", "None", "and", "yout", "is", "None", ")", "or", "(", "xou...
Multivariate Kernel Density Estimation Parameters ---------- events_x, events_y: 1D ndarray The input points for kernel density estimation. Input is flattened automatically. bw: tuple (bwx, bwy) or None The bandwith for kernel density estimation. xout, yout: ndarray The coordinates at which the KDE should be computed. If set to none, input coordinates are used. Returns ------- density: ndarray, same shape as `xout` The KDE for the points in (xout, yout) See Also -------- `statsmodels.nonparametric.kernel_density.KDEMultivariate`
[ "Multivariate", "Kernel", "Density", "Estimation" ]
python
train
thetarkus/django-semanticui-forms
semanticuiforms/templatetags/semanticui.py
https://github.com/thetarkus/django-semanticui-forms/blob/9664c6f01621568c3fa39b36439178586649eafe/semanticuiforms/templatetags/semanticui.py#L178-L214
def render_layout_form(form, layout=None, **kwargs): """Render an entire form with Semantic UI wrappers for each field with a layout provided in the template or in the form class Args: form (form): Django Form layout (tuple): layout design kwargs (dict): other attributes will be passed to fields Returns: string: HTML of Django Form fields with Semantic UI wrappers """ def make_component(type_, *args): """Loop through tuples to make field wrappers for fields. """ if type_ == "Text": return "".join(args) elif type_ == "Field": result = "" for c in args: if isinstance(c, tuple): result += make_component(*c) elif isinstance(c, str): result += render_field(form.__getitem__(c), **kwargs) return result else: if len(args) < 2: return "" result = "".join([make_component(*c) for c in args]) if type_: return "<div class=\"%s\">%s</div>" % (type_.lower(), result) else: return result return mark_safe("".join([make_component(*component) for component in layout]))
[ "def", "render_layout_form", "(", "form", ",", "layout", "=", "None", ",", "*", "*", "kwargs", ")", ":", "def", "make_component", "(", "type_", ",", "*", "args", ")", ":", "\"\"\"Loop through tuples to make field wrappers for fields.\r\n\t\t\"\"\"", "if", "type_", ...
Render an entire form with Semantic UI wrappers for each field with a layout provided in the template or in the form class Args: form (form): Django Form layout (tuple): layout design kwargs (dict): other attributes will be passed to fields Returns: string: HTML of Django Form fields with Semantic UI wrappers
[ "Render", "an", "entire", "form", "with", "Semantic", "UI", "wrappers", "for", "each", "field", "with", "a", "layout", "provided", "in", "the", "template", "or", "in", "the", "form", "class", "Args", ":", "form", "(", "form", ")", ":", "Django", "Form", ...
python
train
coghost/izen
izen/dec.py
https://github.com/coghost/izen/blob/432db017f99dd2ba809e1ba1792145ab6510263d/izen/dec.py#L355-L413
def block_until_expired(timeout): """ 阻塞当前程序运行, 直到超时 .. note: 会阻塞当前程序运行 - 如果 ``timeout大于0``, 则当作 ``计时阻塞器`` 来使用 .. code:: python @run_until(0.1) def s2(): m = 5 while m: print('s2: ', m, now()) time.sleep(1) m -= 1 return 'good' :param timeout: int :type timeout: :return: :rtype: """ if not callable(timeout): def dec(fn): @wraps(fn) def wrapper_(*args, **kwargs): class TimeLimited(threading.Thread): def __init__(self, _error=None, ): threading.Thread.__init__(self) self.error_ = _error self.result = None self.stopped = False def run(self): self.result = fn(*args, **kwargs) try: float(timeout) except ValueError as err: print('err: ', err) return None, None t = TimeLimited() t.start() if timeout > 0: t.join(timeout) if t.isAlive(): print('[timeout running out for {}]'.format(fn.__name__)) _async_raise(t.ident, SystemExit) return None, None return t.error_, t.result return wrapper_ return dec
[ "def", "block_until_expired", "(", "timeout", ")", ":", "if", "not", "callable", "(", "timeout", ")", ":", "def", "dec", "(", "fn", ")", ":", "@", "wraps", "(", "fn", ")", "def", "wrapper_", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "c...
阻塞当前程序运行, 直到超时 .. note: 会阻塞当前程序运行 - 如果 ``timeout大于0``, 则当作 ``计时阻塞器`` 来使用 .. code:: python @run_until(0.1) def s2(): m = 5 while m: print('s2: ', m, now()) time.sleep(1) m -= 1 return 'good' :param timeout: int :type timeout: :return: :rtype:
[ "阻塞当前程序运行", "直到超时" ]
python
train
SpriteLink/NIPAP
nipap-cli/nipap_cli/nipap_cli.py
https://github.com/SpriteLink/NIPAP/blob/f96069f11ab952d80b13cab06e0528f2d24b3de9/nipap-cli/nipap_cli/nipap_cli.py#L1626-L1703
def modify_prefix(arg, opts, shell_opts): """ Modify the prefix 'arg' with the options 'opts' """ modify_confirmed = shell_opts.force spec = { 'prefix': arg } v = get_vrf(opts.get('vrf_rt'), abort=True) spec['vrf_rt'] = v.rt res = Prefix.list(spec) if len(res) == 0: print("Prefix %s not found in %s." % (arg, vrf_format(v)), file=sys.stderr) return p = res[0] if 'prefix' in opts: p.prefix = opts['prefix'] if 'description' in opts: p.description = opts['description'] if 'comment' in opts: p.comment = opts['comment'] if 'tags' in opts: tags = list(csv.reader([opts.get('tags', '')], escapechar='\\'))[0] p.tags = {} for tag_name in tags: tag = Tag() tag.name = tag_name p.tags[tag_name] = tag if 'node' in opts: p.node = opts['node'] if 'type' in opts: p.type = opts['type'] if 'status' in opts: p.status = opts['status'] if 'country' in opts: p.country = opts['country'] if 'order_id' in opts: p.order_id = opts['order_id'] if 'customer_id' in opts: p.customer_id = opts['customer_id'] if 'vlan' in opts: p.vlan = opts['vlan'] if 'alarm_priority' in opts: p.alarm_priority = opts['alarm_priority'] if 'monitor' in opts: p.monitor = _str_to_bool(opts['monitor']) if 'expires' in opts: p.expires = opts['expires'] for avp in opts.get('extra-attribute', []): try: key, value = avp.split('=', 1) except ValueError: print("ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp, file=sys.stderr) return p.avps[key] = value # Promt user if prefix has authoritative source != nipap if not modify_confirmed and p.authoritative_source.lower() != 'nipap': res = input("Prefix %s in %s is managed by system '%s'. Are you sure you want to modify it? [y/n]: " % (p.prefix, vrf_format(p.vrf), p.authoritative_source)) # If the user declines, short-circuit... if res.lower() not in [ 'y', 'yes' ]: print("Operation aborted.") return try: p.save() except NipapError as exc: print("Could not save prefix changes: %s" % str(exc), file=sys.stderr) sys.exit(1) print("Prefix %s in %s saved." % (p.display_prefix, vrf_format(p.vrf)))
[ "def", "modify_prefix", "(", "arg", ",", "opts", ",", "shell_opts", ")", ":", "modify_confirmed", "=", "shell_opts", ".", "force", "spec", "=", "{", "'prefix'", ":", "arg", "}", "v", "=", "get_vrf", "(", "opts", ".", "get", "(", "'vrf_rt'", ")", ",", ...
Modify the prefix 'arg' with the options 'opts'
[ "Modify", "the", "prefix", "arg", "with", "the", "options", "opts" ]
python
train
prompt-toolkit/ptpython
ptpython/layout.py
https://github.com/prompt-toolkit/ptpython/blob/b1bba26a491324cd65e0ef46c7b818c4b88fd993/ptpython/layout.py#L404-L437
def show_sidebar_button_info(python_input): """ Create `Layout` for the information in the right-bottom corner. (The right part of the status bar.) """ @if_mousedown def toggle_sidebar(mouse_event): " Click handler for the menu. " python_input.show_sidebar = not python_input.show_sidebar version = sys.version_info tokens = [ ('class:status-toolbar.key', '[F2]', toggle_sidebar), ('class:status-toolbar', ' Menu', toggle_sidebar), ('class:status-toolbar', ' - '), ('class:status-toolbar.python-version', '%s %i.%i.%i' % (platform.python_implementation(), version[0], version[1], version[2])), ('class:status-toolbar', ' '), ] width = fragment_list_width(tokens) def get_text_fragments(): # Python version return tokens return ConditionalContainer( content=Window( FormattedTextControl(get_text_fragments), style='class:status-toolbar', height=Dimension.exact(1), width=Dimension.exact(width)), filter=~is_done & renderer_height_is_known & Condition(lambda: python_input.show_status_bar and not python_input.show_exit_confirmation))
[ "def", "show_sidebar_button_info", "(", "python_input", ")", ":", "@", "if_mousedown", "def", "toggle_sidebar", "(", "mouse_event", ")", ":", "\" Click handler for the menu. \"", "python_input", ".", "show_sidebar", "=", "not", "python_input", ".", "show_sidebar", "vers...
Create `Layout` for the information in the right-bottom corner. (The right part of the status bar.)
[ "Create", "Layout", "for", "the", "information", "in", "the", "right", "-", "bottom", "corner", ".", "(", "The", "right", "part", "of", "the", "status", "bar", ".", ")" ]
python
train
SmileyChris/easy-thumbnails
easy_thumbnails/files.py
https://github.com/SmileyChris/easy-thumbnails/blob/b08ab44883bf7b221a98dadb9b589cb95d35b0bf/easy_thumbnails/files.py#L546-L575
def save_thumbnail(self, thumbnail): """ Save a thumbnail to the thumbnail_storage. Also triggers the ``thumbnail_created`` signal and caches the thumbnail values and dimensions for future lookups. """ filename = thumbnail.name try: self.thumbnail_storage.delete(filename) except Exception: pass self.thumbnail_storage.save(filename, thumbnail) thumb_cache = self.get_thumbnail_cache( thumbnail.name, create=True, update=True) # Cache thumbnail dimensions. if settings.THUMBNAIL_CACHE_DIMENSIONS: dimensions_cache, created = ( models.ThumbnailDimensions.objects.get_or_create( thumbnail=thumb_cache, defaults={'width': thumbnail.width, 'height': thumbnail.height})) if not created: dimensions_cache.width = thumbnail.width dimensions_cache.height = thumbnail.height dimensions_cache.save() signals.thumbnail_created.send(sender=thumbnail)
[ "def", "save_thumbnail", "(", "self", ",", "thumbnail", ")", ":", "filename", "=", "thumbnail", ".", "name", "try", ":", "self", ".", "thumbnail_storage", ".", "delete", "(", "filename", ")", "except", "Exception", ":", "pass", "self", ".", "thumbnail_storag...
Save a thumbnail to the thumbnail_storage. Also triggers the ``thumbnail_created`` signal and caches the thumbnail values and dimensions for future lookups.
[ "Save", "a", "thumbnail", "to", "the", "thumbnail_storage", "." ]
python
train
rochacbruno/flasgger
flasgger/utils.py
https://github.com/rochacbruno/flasgger/blob/fef154f61d7afca548067be0c758c3dd71cc4c97/flasgger/utils.py#L459-L493
def load_from_file(swag_path, swag_type='yml', root_path=None): """ Load specs from YAML file """ if swag_type not in ('yaml', 'yml'): raise AttributeError("Currently only yaml or yml supported") # TODO: support JSON try: enc = detect_by_bom(swag_path) with codecs.open(swag_path, encoding=enc) as yaml_file: return yaml_file.read() except IOError: # not in the same dir, add dirname swag_path = os.path.join( root_path or os.path.dirname(__file__), swag_path ) try: enc = detect_by_bom(swag_path) with codecs.open(swag_path, encoding=enc) as yaml_file: return yaml_file.read() except IOError: # pragma: no cover # if package dir # see https://github.com/rochacbruno/flasgger/pull/104 # Still not able to reproduce this case # test are in examples/package_example # need more detail on how to reproduce IOError here swag_path = swag_path.replace("/", os.sep).replace("\\", os.sep) path = swag_path.replace( (root_path or os.path.dirname(__file__)), '' ).split(os.sep)[1:] site_package = imp.find_module(path[0])[1] swag_path = os.path.join(site_package, os.sep.join(path[1:])) with open(swag_path) as yaml_file: return yaml_file.read()
[ "def", "load_from_file", "(", "swag_path", ",", "swag_type", "=", "'yml'", ",", "root_path", "=", "None", ")", ":", "if", "swag_type", "not", "in", "(", "'yaml'", ",", "'yml'", ")", ":", "raise", "AttributeError", "(", "\"Currently only yaml or yml supported\"",...
Load specs from YAML file
[ "Load", "specs", "from", "YAML", "file" ]
python
train
xray7224/PyPump
pypump/store.py
https://github.com/xray7224/PyPump/blob/f921f691c39fe021f4fd124b6bc91718c9e49b4a/pypump/store.py#L191-L203
def load(cls, webfinger, pypump): """ Load JSON from disk into store object """ filename = cls.get_filename() if os.path.isfile(filename): data = open(filename).read() data = json.loads(data) store = cls(data, filename=filename) else: store = cls(filename=filename) store.prefix = webfinger return store
[ "def", "load", "(", "cls", ",", "webfinger", ",", "pypump", ")", ":", "filename", "=", "cls", ".", "get_filename", "(", ")", "if", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "data", "=", "open", "(", "filename", ")", ".", "read", ...
Load JSON from disk into store object
[ "Load", "JSON", "from", "disk", "into", "store", "object" ]
python
train
aarongarrett/inspyred
inspyred/ec/replacers.py
https://github.com/aarongarrett/inspyred/blob/d5976ab503cc9d51c6f586cbb7bb601a38c01128/inspyred/ec/replacers.py#L171-L191
def plus_replacement(random, population, parents, offspring, args): """Performs "plus" replacement. This function performs "plus" replacement, which means that the entire existing population is replaced by the best population-many elements from the combined set of parents and offspring. .. Arguments: random -- the random number generator object population -- the population of individuals parents -- the list of parent individuals offspring -- the list of offspring individuals args -- a dictionary of keyword arguments """ pool = list(offspring) pool.extend(parents) pool.sort(reverse=True) survivors = pool[:len(population)] return survivors
[ "def", "plus_replacement", "(", "random", ",", "population", ",", "parents", ",", "offspring", ",", "args", ")", ":", "pool", "=", "list", "(", "offspring", ")", "pool", ".", "extend", "(", "parents", ")", "pool", ".", "sort", "(", "reverse", "=", "Tru...
Performs "plus" replacement. This function performs "plus" replacement, which means that the entire existing population is replaced by the best population-many elements from the combined set of parents and offspring. .. Arguments: random -- the random number generator object population -- the population of individuals parents -- the list of parent individuals offspring -- the list of offspring individuals args -- a dictionary of keyword arguments
[ "Performs", "plus", "replacement", ".", "This", "function", "performs", "plus", "replacement", "which", "means", "that", "the", "entire", "existing", "population", "is", "replaced", "by", "the", "best", "population", "-", "many", "elements", "from", "the", "comb...
python
train
20c/vodka
vodka/bartender.py
https://github.com/20c/vodka/blob/9615148ac6560298453704bb5246b35b66b3339c/vodka/bartender.py#L28-L34
def options(f): """ Shared options, used by all bartender commands """ f = click.option('--config', envvar='VODKA_HOME', default=click.get_app_dir('vodka'), help="location of config file")(f) return f
[ "def", "options", "(", "f", ")", ":", "f", "=", "click", ".", "option", "(", "'--config'", ",", "envvar", "=", "'VODKA_HOME'", ",", "default", "=", "click", ".", "get_app_dir", "(", "'vodka'", ")", ",", "help", "=", "\"location of config file\"", ")", "(...
Shared options, used by all bartender commands
[ "Shared", "options", "used", "by", "all", "bartender", "commands" ]
python
train
Becksteinlab/GromacsWrapper
gromacs/fileformats/xvg.py
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/fileformats/xvg.py#L836-L893
def decimate(self, method, a, maxpoints=10000, **kwargs): """Decimate data *a* to *maxpoints* using *method*. If *a* is a 1D array then it is promoted to a (2, N) array where the first column simply contains the index. If the array contains fewer than *maxpoints* points or if *maxpoints* is ``None`` then it is returned as it is. The default for *maxpoints* is 10000. Valid values for the reduction *method*: * "mean", uses :meth:`XVG.decimate_mean` to coarse grain by averaging the data in bins along the time axis * "circmean", uses :meth:`XVG.decimate_circmean` to coarse grain by calculating the circular mean of the data in bins along the time axis. Use additional keywords *low* and *high* to set the limits. Assumes that the data are in degrees. * "min" and "max* select the extremum in each bin * "rms", uses :meth:`XVG.decimate_rms` to coarse grain by computing the root mean square sum of the data in bins along the time axis (for averaging standard deviations and errors) * "percentile" with keyword *per*: :meth:`XVG.decimate_percentile` reduces data in each bin to the percentile *per* * "smooth", uses :meth:`XVG.decimate_smooth` to subsample from a smoothed function (generated with a running average of the coarse graining step size derived from the original number of data points and *maxpoints*) :Returns: numpy array ``(M', N')`` from a ``(M', N)`` array with ``M' == M`` (except when ``M == 1``, see above) and ``N' <= N`` (``N'`` is *maxpoints*). """ methods = {'mean': self.decimate_mean, 'min': self.decimate_min, 'max': self.decimate_max, 'smooth': self.decimate_smooth, 'rms': self.decimate_rms, 'percentile': self.decimate_percentile, 'error': self.decimate_error, # undocumented, not working well 'circmean': self.decimate_circmean, } if len(a.shape) == 1: # add first column as index # (probably should do this in class/init anyway...) X = numpy.arange(len(a)) a = numpy.vstack([X, a]) ny = a.shape[-1] # assume 1D or 2D array with last dimension varying fastest if maxpoints is None or ny <= maxpoints: return a return methods[method](a, maxpoints, **kwargs)
[ "def", "decimate", "(", "self", ",", "method", ",", "a", ",", "maxpoints", "=", "10000", ",", "*", "*", "kwargs", ")", ":", "methods", "=", "{", "'mean'", ":", "self", ".", "decimate_mean", ",", "'min'", ":", "self", ".", "decimate_min", ",", "'max'"...
Decimate data *a* to *maxpoints* using *method*. If *a* is a 1D array then it is promoted to a (2, N) array where the first column simply contains the index. If the array contains fewer than *maxpoints* points or if *maxpoints* is ``None`` then it is returned as it is. The default for *maxpoints* is 10000. Valid values for the reduction *method*: * "mean", uses :meth:`XVG.decimate_mean` to coarse grain by averaging the data in bins along the time axis * "circmean", uses :meth:`XVG.decimate_circmean` to coarse grain by calculating the circular mean of the data in bins along the time axis. Use additional keywords *low* and *high* to set the limits. Assumes that the data are in degrees. * "min" and "max* select the extremum in each bin * "rms", uses :meth:`XVG.decimate_rms` to coarse grain by computing the root mean square sum of the data in bins along the time axis (for averaging standard deviations and errors) * "percentile" with keyword *per*: :meth:`XVG.decimate_percentile` reduces data in each bin to the percentile *per* * "smooth", uses :meth:`XVG.decimate_smooth` to subsample from a smoothed function (generated with a running average of the coarse graining step size derived from the original number of data points and *maxpoints*) :Returns: numpy array ``(M', N')`` from a ``(M', N)`` array with ``M' == M`` (except when ``M == 1``, see above) and ``N' <= N`` (``N'`` is *maxpoints*).
[ "Decimate", "data", "*", "a", "*", "to", "*", "maxpoints", "*", "using", "*", "method", "*", "." ]
python
valid
amirziai/flatten
flatten_json.py
https://github.com/amirziai/flatten/blob/e8e2cbbdd6fe21177bfc0ce034562463ae555799/flatten_json.py#L32-L79
def flatten(nested_dict, separator="_", root_keys_to_ignore=set()): """ Flattens a dictionary with nested structure to a dictionary with no hierarchy Consider ignoring keys that you are not interested in to prevent unnecessary processing This is specially true for very deep objects :param nested_dict: dictionary we want to flatten :param separator: string to separate dictionary keys by :param root_keys_to_ignore: set of root keys to ignore from flattening :return: flattened dictionary """ assert isinstance(nested_dict, dict), "flatten requires a dictionary input" assert isinstance(separator, six.string_types), "separator must be string" # This global dictionary stores the flattened keys and values and is # ultimately returned flattened_dict = dict() def _flatten(object_, key): """ For dict, list and set objects_ calls itself on the elements and for other types assigns the object_ to the corresponding key in the global flattened_dict :param object_: object to flatten :param key: carries the concatenated key for the object_ :return: None """ # Empty object can't be iterated, take as is if not object_: flattened_dict[key] = object_ # These object types support iteration elif isinstance(object_, dict): for object_key in object_: if not (not key and object_key in root_keys_to_ignore): _flatten(object_[object_key], _construct_key(key, separator, object_key)) elif isinstance(object_, (list, set, tuple)): for index, item in enumerate(object_): _flatten(item, _construct_key(key, separator, index)) # Anything left take as is else: flattened_dict[key] = object_ _flatten(nested_dict, None) return flattened_dict
[ "def", "flatten", "(", "nested_dict", ",", "separator", "=", "\"_\"", ",", "root_keys_to_ignore", "=", "set", "(", ")", ")", ":", "assert", "isinstance", "(", "nested_dict", ",", "dict", ")", ",", "\"flatten requires a dictionary input\"", "assert", "isinstance", ...
Flattens a dictionary with nested structure to a dictionary with no hierarchy Consider ignoring keys that you are not interested in to prevent unnecessary processing This is specially true for very deep objects :param nested_dict: dictionary we want to flatten :param separator: string to separate dictionary keys by :param root_keys_to_ignore: set of root keys to ignore from flattening :return: flattened dictionary
[ "Flattens", "a", "dictionary", "with", "nested", "structure", "to", "a", "dictionary", "with", "no", "hierarchy", "Consider", "ignoring", "keys", "that", "you", "are", "not", "interested", "in", "to", "prevent", "unnecessary", "processing", "This", "is", "specia...
python
train
h2oai/h2o-3
h2o-py/h2o/utils/typechecks.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/utils/typechecks.py#L650-L696
def _get_lambda_source_code(lambda_fn, src): """Attempt to find the source code of the ``lambda_fn`` within the string ``src``.""" def gen_lambdas(): def gen(): yield src + "\n" g = gen() step = 0 tokens = [] for tok in tokenize.generate_tokens(getattr(g, "next", getattr(g, "__next__", None))): if step == 0: if tok[0] == tokenize.NAME and tok[1] == "lambda": step = 1 tokens = [tok] level = 0 elif step == 1: if tok[0] == tokenize.NAME: tokens.append(tok) step = 2 else: step = 0 elif step == 2: if tok[0] == tokenize.OP and tok[1] == ":": tokens.append(tok) step = 3 else: step = 0 elif step == 3: if level == 0 and (tok[0] == tokenize.OP and tok[1] in ",)" or tok[0] == tokenize.ENDMARKER): yield tokenize.untokenize(tokens).strip() step = 0 else: tokens.append(tok) if tok[0] == tokenize.OP: if tok[1] in "[({": level += 1 if tok[1] in "])}": level -= 1 assert not tokens actual_code = lambda_fn.__code__.co_code for lambda_src in gen_lambdas(): try: fn = eval(lambda_src, globals(), locals()) if fn.__code__.co_code == actual_code: return lambda_src.split(":", 1)[1].strip() except Exception: pass return "<lambda>"
[ "def", "_get_lambda_source_code", "(", "lambda_fn", ",", "src", ")", ":", "def", "gen_lambdas", "(", ")", ":", "def", "gen", "(", ")", ":", "yield", "src", "+", "\"\\n\"", "g", "=", "gen", "(", ")", "step", "=", "0", "tokens", "=", "[", "]", "for",...
Attempt to find the source code of the ``lambda_fn`` within the string ``src``.
[ "Attempt", "to", "find", "the", "source", "code", "of", "the", "lambda_fn", "within", "the", "string", "src", "." ]
python
test