repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
HttpRunner/HttpRunner
httprunner/api.py
https://github.com/HttpRunner/HttpRunner/blob/f259551bf9c8ba905eae5c1afcf2efea20ae0871/httprunner/api.py#L99-L118
def _run_suite(self, test_suite): """ run tests in test_suite Args: test_suite: unittest.TestSuite() Returns: list: tests_results """ tests_results = [] for testcase in test_suite: testcase_name = testcase.config.get("name") logger.log_info("Start to run testcase: {}".format(testcase_name)) result = self.unittest_runner.run(testcase) tests_results.append((testcase, result)) return tests_results
[ "def", "_run_suite", "(", "self", ",", "test_suite", ")", ":", "tests_results", "=", "[", "]", "for", "testcase", "in", "test_suite", ":", "testcase_name", "=", "testcase", ".", "config", ".", "get", "(", "\"name\"", ")", "logger", ".", "log_info", "(", ...
run tests in test_suite Args: test_suite: unittest.TestSuite() Returns: list: tests_results
[ "run", "tests", "in", "test_suite" ]
python
train
saltstack/salt
salt/modules/zoneadm.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/zoneadm.py#L426-L454
def move(zone, zonepath): ''' Move zone to new zonepath. zone : string name or uuid of the zone zonepath : string new zonepath CLI Example: .. code-block:: bash salt '*' zoneadm.move meave /sweetwater/meave ''' ret = {'status': True} ## verify zone res = __salt__['cmd.run_all']('zoneadm {zone} move {path}'.format( zone='-u {0}'.format(zone) if _is_uuid(zone) else '-z {0}'.format(zone), path=zonepath, )) ret['status'] = res['retcode'] == 0 ret['message'] = res['stdout'] if ret['status'] else res['stderr'] ret['message'] = ret['message'].replace('zoneadm: ', '') if ret['message'] == '': del ret['message'] return ret
[ "def", "move", "(", "zone", ",", "zonepath", ")", ":", "ret", "=", "{", "'status'", ":", "True", "}", "## verify zone", "res", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "'zoneadm {zone} move {path}'", ".", "format", "(", "zone", "=", "'-u {0}'", "."...
Move zone to new zonepath. zone : string name or uuid of the zone zonepath : string new zonepath CLI Example: .. code-block:: bash salt '*' zoneadm.move meave /sweetwater/meave
[ "Move", "zone", "to", "new", "zonepath", "." ]
python
train
apache/airflow
airflow/utils/dates.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/dates.py#L214-L224
def scale_time_units(time_seconds_arr, unit): """ Convert an array of time durations in seconds to the specified time unit. """ if unit == 'minutes': return list(map(lambda x: x * 1.0 / 60, time_seconds_arr)) elif unit == 'hours': return list(map(lambda x: x * 1.0 / (60 * 60), time_seconds_arr)) elif unit == 'days': return list(map(lambda x: x * 1.0 / (24 * 60 * 60), time_seconds_arr)) return time_seconds_arr
[ "def", "scale_time_units", "(", "time_seconds_arr", ",", "unit", ")", ":", "if", "unit", "==", "'minutes'", ":", "return", "list", "(", "map", "(", "lambda", "x", ":", "x", "*", "1.0", "/", "60", ",", "time_seconds_arr", ")", ")", "elif", "unit", "==",...
Convert an array of time durations in seconds to the specified time unit.
[ "Convert", "an", "array", "of", "time", "durations", "in", "seconds", "to", "the", "specified", "time", "unit", "." ]
python
test
bmuller/kademlia
kademlia/routing.py
https://github.com/bmuller/kademlia/blob/4a8d445c9ee8f3ca10f56107e4445daed4933c8a/kademlia/routing.py#L53-L71
def add_node(self, node): """ Add a C{Node} to the C{KBucket}. Return True if successful, False if the bucket is full. If the bucket is full, keep track of node in a replacement list, per section 4.1 of the paper. """ if node.id in self.nodes: del self.nodes[node.id] self.nodes[node.id] = node elif len(self) < self.ksize: self.nodes[node.id] = node else: if node.id in self.replacement_nodes: del self.replacement_nodes[node.id] self.replacement_nodes[node.id] = node return False return True
[ "def", "add_node", "(", "self", ",", "node", ")", ":", "if", "node", ".", "id", "in", "self", ".", "nodes", ":", "del", "self", ".", "nodes", "[", "node", ".", "id", "]", "self", ".", "nodes", "[", "node", ".", "id", "]", "=", "node", "elif", ...
Add a C{Node} to the C{KBucket}. Return True if successful, False if the bucket is full. If the bucket is full, keep track of node in a replacement list, per section 4.1 of the paper.
[ "Add", "a", "C", "{", "Node", "}", "to", "the", "C", "{", "KBucket", "}", ".", "Return", "True", "if", "successful", "False", "if", "the", "bucket", "is", "full", "." ]
python
train
Robpol86/libnl
libnl/msg.py
https://github.com/Robpol86/libnl/blob/274e9fdaa39822d06ef70b799ed4a95937a4d923/libnl/msg.py#L445-L483
def nl_nlmsg_flags2str(flags, buf, _=None): """Netlink Message Flags Translations. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L664 Positional arguments: flags -- integer. buf -- bytearray(). Keyword arguments: _ -- unused. Returns: Reference to `buf`. """ del buf[:] all_flags = ( ('REQUEST', libnl.linux_private.netlink.NLM_F_REQUEST), ('MULTI', libnl.linux_private.netlink.NLM_F_MULTI), ('ACK', libnl.linux_private.netlink.NLM_F_ACK), ('ECHO', libnl.linux_private.netlink.NLM_F_ECHO), ('ROOT', libnl.linux_private.netlink.NLM_F_ROOT), ('MATCH', libnl.linux_private.netlink.NLM_F_MATCH), ('ATOMIC', libnl.linux_private.netlink.NLM_F_ATOMIC), ('REPLACE', libnl.linux_private.netlink.NLM_F_REPLACE), ('EXCL', libnl.linux_private.netlink.NLM_F_EXCL), ('CREATE', libnl.linux_private.netlink.NLM_F_CREATE), ('APPEND', libnl.linux_private.netlink.NLM_F_APPEND), ) print_flags = [] for k, v in all_flags: if not flags & v: continue flags &= ~v print_flags.append(k) if flags: print_flags.append('0x{0:x}'.format(flags)) buf.extend(','.join(print_flags).encode('ascii')) return buf
[ "def", "nl_nlmsg_flags2str", "(", "flags", ",", "buf", ",", "_", "=", "None", ")", ":", "del", "buf", "[", ":", "]", "all_flags", "=", "(", "(", "'REQUEST'", ",", "libnl", ".", "linux_private", ".", "netlink", ".", "NLM_F_REQUEST", ")", ",", "(", "'M...
Netlink Message Flags Translations. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L664 Positional arguments: flags -- integer. buf -- bytearray(). Keyword arguments: _ -- unused. Returns: Reference to `buf`.
[ "Netlink", "Message", "Flags", "Translations", "." ]
python
train
AshleySetter/optoanalysis
optoanalysis/optoanalysis/optoanalysis.py
https://github.com/AshleySetter/optoanalysis/blob/9d390acc834d70024d47b574aea14189a5a5714e/optoanalysis/optoanalysis/optoanalysis.py#L2851-L2878
def make_butterworth_bandpass_b_a(CenterFreq, bandwidth, SampleFreq, order=5, btype='band'): """ Generates the b and a coefficients for a butterworth bandpass IIR filter. Parameters ---------- CenterFreq : float central frequency of bandpass bandwidth : float width of the bandpass from centre to edge SampleFreq : float Sample frequency of filter order : int, optional order of IIR filter. Is 5 by default btype : string, optional type of filter to make e.g. (band, low, high) Returns ------- b : ndarray coefficients multiplying the current and past inputs (feedforward coefficients) a : ndarray coefficients multiplying the past outputs (feedback coefficients) """ lowcut = CenterFreq-bandwidth/2 highcut = CenterFreq+bandwidth/2 b, a = make_butterworth_b_a(lowcut, highcut, SampleFreq, order, btype) return b, a
[ "def", "make_butterworth_bandpass_b_a", "(", "CenterFreq", ",", "bandwidth", ",", "SampleFreq", ",", "order", "=", "5", ",", "btype", "=", "'band'", ")", ":", "lowcut", "=", "CenterFreq", "-", "bandwidth", "/", "2", "highcut", "=", "CenterFreq", "+", "bandwi...
Generates the b and a coefficients for a butterworth bandpass IIR filter. Parameters ---------- CenterFreq : float central frequency of bandpass bandwidth : float width of the bandpass from centre to edge SampleFreq : float Sample frequency of filter order : int, optional order of IIR filter. Is 5 by default btype : string, optional type of filter to make e.g. (band, low, high) Returns ------- b : ndarray coefficients multiplying the current and past inputs (feedforward coefficients) a : ndarray coefficients multiplying the past outputs (feedback coefficients)
[ "Generates", "the", "b", "and", "a", "coefficients", "for", "a", "butterworth", "bandpass", "IIR", "filter", "." ]
python
train
jaraco/jaraco.text
jaraco/text.py
https://github.com/jaraco/jaraco.text/blob/0fe070e9241cb1fdb737516a3f57da94a2618376/jaraco/text.py#L392-L403
def remove_suffix(text, suffix): """ Remove the suffix from the text if it exists. >>> remove_suffix('name.git', '.git') 'name' >>> remove_suffix('something special', 'sample') 'something special' """ rest, suffix, null = text.partition(suffix) return rest
[ "def", "remove_suffix", "(", "text", ",", "suffix", ")", ":", "rest", ",", "suffix", ",", "null", "=", "text", ".", "partition", "(", "suffix", ")", "return", "rest" ]
Remove the suffix from the text if it exists. >>> remove_suffix('name.git', '.git') 'name' >>> remove_suffix('something special', 'sample') 'something special'
[ "Remove", "the", "suffix", "from", "the", "text", "if", "it", "exists", "." ]
python
train
saltstack/salt
salt/modules/zpool.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/zpool.py#L1196-L1316
def import_(zpool=None, new_name=None, **kwargs): ''' .. versionadded:: 2015.5.0 Import storage pools or list pools available for import zpool : string Optional name of storage pool new_name : string Optional new name for the storage pool mntopts : string Comma-separated list of mount options to use when mounting datasets within the pool. force : boolean Forces import, even if the pool appears to be potentially active. altroot : string Equivalent to "-o cachefile=none,altroot=root" dir : string Searches for devices or files in dir, multiple dirs can be specified as follows: ``dir="dir1,dir2"`` no_mount : boolean Import the pool without mounting any file systems. only_destroyed : boolean Imports destroyed pools only. This also sets ``force=True``. recovery : bool|str false: do not try to recovery broken pools true: try to recovery the pool by rolling back the latest transactions test: check if a pool can be recovered, but don't import it nolog: allow import without log device, recent transactions might be lost .. note:: If feature flags are not support this forced to the default of 'false' .. warning:: When recovery is set to 'test' the result will be have imported set to True if the pool can be imported. The pool might also be imported if the pool was not broken to begin with. properties : dict Additional pool properties .. note:: Zpool properties can be specified at the time of creation of the pool by passing an additional argument called "properties" and specifying the properties with their respective values in the form of a python dictionary: .. code-block:: text properties="{'property1': 'value1', 'property2': 'value2'}" CLI Example: .. code-block:: bash salt '*' zpool.import [force=True|False] salt '*' zpool.import myzpool [mynewzpool] [force=True|False] salt '*' zpool.import myzpool dir='/tmp' ''' ## Configure pool # NOTE: initialize the defaults flags = [] opts = {} target = [] # NOTE: push pool and filesystem properties pool_properties = kwargs.get('properties', {}) # NOTE: set extra config based on kwargs if kwargs.get('force', False) or kwargs.get('only_destroyed', False): flags.append('-f') if kwargs.get('only_destroyed', False): flags.append('-D') if kwargs.get('no_mount', False): flags.append('-N') if kwargs.get('altroot', False): opts['-R'] = kwargs.get('altroot') if kwargs.get('mntopts', False): # NOTE: -o is used for both mount options and pool properties! # ```-o nodevices,noexec,nosetuid,ro``` vs ```-o prop=val``` opts['-o'] = kwargs.get('mntopts') if kwargs.get('dir', False): opts['-d'] = kwargs.get('dir').split(',') if kwargs.get('recovery', False) and __utils__['zfs.has_feature_flags'](): recovery = kwargs.get('recovery') if recovery in [True, 'test']: flags.append('-F') if recovery == 'test': flags.append('-n') if recovery == 'nolog': flags.append('-m') # NOTE: append the pool name and specifications if zpool: target.append(zpool) target.append(new_name) else: flags.append('-a') ## Import storage pool res = __salt__['cmd.run_all']( __utils__['zfs.zpool_command']( command='import', flags=flags, opts=opts, pool_properties=pool_properties, target=target, ), python_shell=False, ) return __utils__['zfs.parse_command_result'](res, 'imported')
[ "def", "import_", "(", "zpool", "=", "None", ",", "new_name", "=", "None", ",", "*", "*", "kwargs", ")", ":", "## Configure pool", "# NOTE: initialize the defaults", "flags", "=", "[", "]", "opts", "=", "{", "}", "target", "=", "[", "]", "# NOTE: push pool...
.. versionadded:: 2015.5.0 Import storage pools or list pools available for import zpool : string Optional name of storage pool new_name : string Optional new name for the storage pool mntopts : string Comma-separated list of mount options to use when mounting datasets within the pool. force : boolean Forces import, even if the pool appears to be potentially active. altroot : string Equivalent to "-o cachefile=none,altroot=root" dir : string Searches for devices or files in dir, multiple dirs can be specified as follows: ``dir="dir1,dir2"`` no_mount : boolean Import the pool without mounting any file systems. only_destroyed : boolean Imports destroyed pools only. This also sets ``force=True``. recovery : bool|str false: do not try to recovery broken pools true: try to recovery the pool by rolling back the latest transactions test: check if a pool can be recovered, but don't import it nolog: allow import without log device, recent transactions might be lost .. note:: If feature flags are not support this forced to the default of 'false' .. warning:: When recovery is set to 'test' the result will be have imported set to True if the pool can be imported. The pool might also be imported if the pool was not broken to begin with. properties : dict Additional pool properties .. note:: Zpool properties can be specified at the time of creation of the pool by passing an additional argument called "properties" and specifying the properties with their respective values in the form of a python dictionary: .. code-block:: text properties="{'property1': 'value1', 'property2': 'value2'}" CLI Example: .. code-block:: bash salt '*' zpool.import [force=True|False] salt '*' zpool.import myzpool [mynewzpool] [force=True|False] salt '*' zpool.import myzpool dir='/tmp'
[ "..", "versionadded", "::", "2015", ".", "5", ".", "0" ]
python
train
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_map/mp_slipmap_util.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_map/mp_slipmap_util.py#L349-L358
def draw(self, img, pixmapper, bounds): '''draw legend on the image''' if self._img is None: self._img = self.draw_legend() w = self._img.shape[1] h = self._img.shape[0] px = 5 py = 5 img[py:py+h,px:px+w] = self._img
[ "def", "draw", "(", "self", ",", "img", ",", "pixmapper", ",", "bounds", ")", ":", "if", "self", ".", "_img", "is", "None", ":", "self", ".", "_img", "=", "self", ".", "draw_legend", "(", ")", "w", "=", "self", ".", "_img", ".", "shape", "[", "...
draw legend on the image
[ "draw", "legend", "on", "the", "image" ]
python
train
Karaage-Cluster/karaage
karaage/plugins/kgusage/graphs.py
https://github.com/Karaage-Cluster/karaage/blob/2f4c8b4e2d728b3fcbb151160c49000f1c04f5c9/karaage/plugins/kgusage/graphs.py#L138-L147
def get_trend_graph_url(start, end): """ Total trend graph for machine category. """ filename = get_trend_graph_filename(start, end) urls = { 'graph_url': urlparse.urljoin(GRAPH_URL, filename + ".png"), 'data_url': urlparse.urljoin(GRAPH_URL, filename + ".csv"), } return urls
[ "def", "get_trend_graph_url", "(", "start", ",", "end", ")", ":", "filename", "=", "get_trend_graph_filename", "(", "start", ",", "end", ")", "urls", "=", "{", "'graph_url'", ":", "urlparse", ".", "urljoin", "(", "GRAPH_URL", ",", "filename", "+", "\".png\""...
Total trend graph for machine category.
[ "Total", "trend", "graph", "for", "machine", "category", "." ]
python
train
panosl/django-currencies
currencies/management/commands/currencies.py
https://github.com/panosl/django-currencies/blob/8d4c6c202ad7c4cc06263ab2c1b1f969bbe99acd/currencies/management/commands/currencies.py#L47-L65
def get_imports(self, option): """ See if we have been passed a set of currencies or a setting variable or look for settings CURRENCIES or SHOP_CURRENCIES. """ if option: if len(option) == 1 and option[0].isupper() and len(option[0]) > 3: return getattr(settings, option[0]) else: codes = [e for e in option if e.isupper() and len(e) == 3] if len(codes) != len(option): raise ImproperlyConfigured("Invalid currency codes found: %s" % codes) return codes for attr in ('CURRENCIES', 'SHOP_CURRENCIES'): try: return getattr(settings, attr) except AttributeError: continue return option
[ "def", "get_imports", "(", "self", ",", "option", ")", ":", "if", "option", ":", "if", "len", "(", "option", ")", "==", "1", "and", "option", "[", "0", "]", ".", "isupper", "(", ")", "and", "len", "(", "option", "[", "0", "]", ")", ">", "3", ...
See if we have been passed a set of currencies or a setting variable or look for settings CURRENCIES or SHOP_CURRENCIES.
[ "See", "if", "we", "have", "been", "passed", "a", "set", "of", "currencies", "or", "a", "setting", "variable", "or", "look", "for", "settings", "CURRENCIES", "or", "SHOP_CURRENCIES", "." ]
python
train
marshmallow-code/marshmallow-sqlalchemy
src/marshmallow_sqlalchemy/fields.py
https://github.com/marshmallow-code/marshmallow-sqlalchemy/blob/afe3a9ebd886791b662607499c180d2baaeaf617/src/marshmallow_sqlalchemy/fields.py#L90-L115
def _deserialize(self, value, *args, **kwargs): """Deserialize a serialized value to a model instance. If the parent schema is transient, create a new (transient) instance. Otherwise, attempt to find an existing instance in the database. :param value: The value to deserialize. """ if not isinstance(value, dict): if len(self.related_keys) != 1: self.fail( "invalid", value=value, keys=[prop.key for prop in self.related_keys], ) value = {self.related_keys[0].key: value} if self.transient: return self.related_model(**value) try: result = self._get_existing_instance( self.session.query(self.related_model), value ) except NoResultFound: # The related-object DNE in the DB, but we still want to deserialize it # ...perhaps we want to add it to the DB later return self.related_model(**value) return result
[ "def", "_deserialize", "(", "self", ",", "value", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "value", ",", "dict", ")", ":", "if", "len", "(", "self", ".", "related_keys", ")", "!=", "1", ":", "self", "."...
Deserialize a serialized value to a model instance. If the parent schema is transient, create a new (transient) instance. Otherwise, attempt to find an existing instance in the database. :param value: The value to deserialize.
[ "Deserialize", "a", "serialized", "value", "to", "a", "model", "instance", "." ]
python
test
theonion/django-bulbs
bulbs/reading_list/mixins.py
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/reading_list/mixins.py#L117-L131
def update_reading_list(self, reading_list): """Generic behaviors for reading lists before being rendered.""" # remove the current piece of content from the query. reading_list = reading_list.filter( ~es_filter.Ids(values=[self.id]) ) # remove excluded document types from the query. reading_list_config = getattr(settings, "READING_LIST_CONFIG", {}) excluded_doc_types = reading_list_config.get("excluded_doc_types", []) for obj in excluded_doc_types: reading_list = reading_list.filter(~es_filter.Type(value=obj)) return reading_list
[ "def", "update_reading_list", "(", "self", ",", "reading_list", ")", ":", "# remove the current piece of content from the query.", "reading_list", "=", "reading_list", ".", "filter", "(", "~", "es_filter", ".", "Ids", "(", "values", "=", "[", "self", ".", "id", "]...
Generic behaviors for reading lists before being rendered.
[ "Generic", "behaviors", "for", "reading", "lists", "before", "being", "rendered", "." ]
python
train
wandb/client
wandb/vendor/prompt_toolkit/buffer.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/buffer.py#L78-L86
def validate_and_handle(self, cli, buffer): """ Validate buffer and handle the accept action. """ if buffer.validate(): if self.handler: self.handler(cli, buffer) buffer.append_to_history()
[ "def", "validate_and_handle", "(", "self", ",", "cli", ",", "buffer", ")", ":", "if", "buffer", ".", "validate", "(", ")", ":", "if", "self", ".", "handler", ":", "self", ".", "handler", "(", "cli", ",", "buffer", ")", "buffer", ".", "append_to_history...
Validate buffer and handle the accept action.
[ "Validate", "buffer", "and", "handle", "the", "accept", "action", "." ]
python
train
PyMLGame/pymlgame
pymlgame/screen.py
https://github.com/PyMLGame/pymlgame/blob/450fe77d35f9a26c107586d6954f69c3895bf504/pymlgame/screen.py#L29-L35
def reset(self): """ Fill the screen with black pixels """ surface = Surface(self.width, self.height) surface.fill(BLACK) self.matrix = surface.matrix
[ "def", "reset", "(", "self", ")", ":", "surface", "=", "Surface", "(", "self", ".", "width", ",", "self", ".", "height", ")", "surface", ".", "fill", "(", "BLACK", ")", "self", ".", "matrix", "=", "surface", ".", "matrix" ]
Fill the screen with black pixels
[ "Fill", "the", "screen", "with", "black", "pixels" ]
python
train
pytroll/pyorbital
pyorbital/orbital.py
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/orbital.py#L335-L431
def get_next_passes(self, utc_time, length, lon, lat, alt, tol=0.001, horizon=0): """Calculate passes for the next hours for a given start time and a given observer. Original by Martin. utc_time: Observation time (datetime object) length: Number of hours to find passes (int) lon: Longitude of observer position on ground (float) lat: Latitude of observer position on ground (float) alt: Altitude above sea-level (geoid) of observer position on ground (float) tol: precision of the result in seconds horizon: the elevation of horizon to compute risetime and falltime. Return: [(rise-time, fall-time, max-elevation-time), ...] """ def elevation(minutes): """Compute the elevation.""" return self.get_observer_look(utc_time + timedelta( minutes=np.float64(minutes)), lon, lat, alt)[1] - horizon def elevation_inv(minutes): """Compute the inverse of elevation.""" return -elevation(minutes) def get_root(fun, start, end, tol=0.01): """Root finding scheme""" x_0 = end x_1 = start fx_0 = fun(end) fx_1 = fun(start) if abs(fx_0) < abs(fx_1): fx_0, fx_1 = fx_1, fx_0 x_0, x_1 = x_1, x_0 x_n = optimize.brentq(fun, x_0, x_1) return x_n def get_max_parab(fun, start, end, tol=0.01): """Successive parabolic interpolation.""" a = float(start) c = float(end) b = (a + c) / 2.0 f_a = fun(a) f_b = fun(b) f_c = fun(c) x = b while True: x = x - 0.5 * (((b - a) ** 2 * (f_b - f_c) - (b - c) ** 2 * (f_b - f_a)) / ((b - a) * (f_b - f_c) - (b - c) * (f_b - f_a))) if np.isnan(x): return b if abs(b - x) <= tol: return x a, b, c = (a + x) / 2.0, x, (x + c) / 2.0 f_a, f_b, f_c = fun(a), fun(b), fun(c) # every minute times = utc_time + np.array([timedelta(minutes=minutes) for minutes in range(length * 60)]) elev = self.get_observer_look(times, lon, lat, alt)[1] - horizon zcs = np.where(np.diff(np.sign(elev)))[0] res = [] risetime = None falltime = None for guess in zcs: horizon_mins = get_root( elevation, guess, guess + 1.0, tol=tol / 60.0) horizon_time = utc_time + timedelta(minutes=horizon_mins) if elev[guess] < 0: risetime = horizon_time risemins = horizon_mins falltime = None else: falltime = horizon_time fallmins = horizon_mins if risetime: int_start = max(0, int(np.floor(risemins))) int_end = min(len(elev), int(np.ceil(fallmins) + 1)) middle = int_start + np.argmax(elev[int_start:int_end]) highest = utc_time + \ timedelta(minutes=get_max_parab( elevation_inv, max(risemins, middle - 1), min(fallmins, middle + 1), tol=tol / 60.0 )) res += [(risetime, falltime, highest)] risetime = None return res
[ "def", "get_next_passes", "(", "self", ",", "utc_time", ",", "length", ",", "lon", ",", "lat", ",", "alt", ",", "tol", "=", "0.001", ",", "horizon", "=", "0", ")", ":", "def", "elevation", "(", "minutes", ")", ":", "\"\"\"Compute the elevation.\"\"\"", "...
Calculate passes for the next hours for a given start time and a given observer. Original by Martin. utc_time: Observation time (datetime object) length: Number of hours to find passes (int) lon: Longitude of observer position on ground (float) lat: Latitude of observer position on ground (float) alt: Altitude above sea-level (geoid) of observer position on ground (float) tol: precision of the result in seconds horizon: the elevation of horizon to compute risetime and falltime. Return: [(rise-time, fall-time, max-elevation-time), ...]
[ "Calculate", "passes", "for", "the", "next", "hours", "for", "a", "given", "start", "time", "and", "a", "given", "observer", "." ]
python
train
quantopian/zipline
zipline/data/data_portal.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/data_portal.py#L475-L537
def get_spot_value(self, assets, field, dt, data_frequency): """ Public API method that returns a scalar value representing the value of the desired asset's field at either the given dt. Parameters ---------- assets : Asset, ContinuousFuture, or iterable of same. The asset or assets whose data is desired. field : {'open', 'high', 'low', 'close', 'volume', 'price', 'last_traded'} The desired field of the asset. dt : pd.Timestamp The timestamp for the desired value. data_frequency : str The frequency of the data to query; i.e. whether the data is 'daily' or 'minute' bars Returns ------- value : float, int, or pd.Timestamp The spot value of ``field`` for ``asset`` The return type is based on the ``field`` requested. If the field is one of 'open', 'high', 'low', 'close', or 'price', the value will be a float. If the ``field`` is 'volume' the value will be a int. If the ``field`` is 'last_traded' the value will be a Timestamp. """ assets_is_scalar = False if isinstance(assets, (AssetConvertible, PricingDataAssociable)): assets_is_scalar = True else: # If 'assets' was not one of the expected types then it should be # an iterable. try: iter(assets) except TypeError: raise TypeError( "Unexpected 'assets' value of type {}." .format(type(assets)) ) session_label = self.trading_calendar.minute_to_session_label(dt) if assets_is_scalar: return self._get_single_asset_value( session_label, assets, field, dt, data_frequency, ) else: get_single_asset_value = self._get_single_asset_value return [ get_single_asset_value( session_label, asset, field, dt, data_frequency, ) for asset in assets ]
[ "def", "get_spot_value", "(", "self", ",", "assets", ",", "field", ",", "dt", ",", "data_frequency", ")", ":", "assets_is_scalar", "=", "False", "if", "isinstance", "(", "assets", ",", "(", "AssetConvertible", ",", "PricingDataAssociable", ")", ")", ":", "as...
Public API method that returns a scalar value representing the value of the desired asset's field at either the given dt. Parameters ---------- assets : Asset, ContinuousFuture, or iterable of same. The asset or assets whose data is desired. field : {'open', 'high', 'low', 'close', 'volume', 'price', 'last_traded'} The desired field of the asset. dt : pd.Timestamp The timestamp for the desired value. data_frequency : str The frequency of the data to query; i.e. whether the data is 'daily' or 'minute' bars Returns ------- value : float, int, or pd.Timestamp The spot value of ``field`` for ``asset`` The return type is based on the ``field`` requested. If the field is one of 'open', 'high', 'low', 'close', or 'price', the value will be a float. If the ``field`` is 'volume' the value will be a int. If the ``field`` is 'last_traded' the value will be a Timestamp.
[ "Public", "API", "method", "that", "returns", "a", "scalar", "value", "representing", "the", "value", "of", "the", "desired", "asset", "s", "field", "at", "either", "the", "given", "dt", "." ]
python
train
pybel/pybel
src/pybel/manager/cache_manager.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/manager/cache_manager.py#L901-L953
def get_or_create_edge(self, source: Node, target: Node, relation: str, bel: str, sha512: str, data: EdgeData, evidence: Optional[Evidence] = None, annotations: Optional[List[NamespaceEntry]] = None, properties: Optional[List[Property]] = None, ) -> Edge: """Create an edge if it does not exist, or return it if it does. :param source: Source node of the relation :param target: Target node of the relation :param relation: Type of the relation between source and target node :param bel: BEL statement that describes the relation :param sha512: The SHA512 hash of the edge as a string :param data: The PyBEL data dictionary :param Evidence evidence: Evidence object that proves the given relation :param properties: List of all properties that belong to the edge :param annotations: List of all annotations that belong to the edge """ if sha512 in self.object_cache_edge: edge = self.object_cache_edge[sha512] self.session.add(edge) return edge edge = self.get_edge_by_hash(sha512) if edge is not None: self.object_cache_edge[sha512] = edge return edge edge = Edge( source=source, target=target, relation=relation, bel=bel, sha512=sha512, data=json.dumps(data), ) if evidence is not None: edge.evidence = evidence if properties is not None: edge.properties = properties if annotations is not None: edge.annotations = annotations self.session.add(edge) self.object_cache_edge[sha512] = edge return edge
[ "def", "get_or_create_edge", "(", "self", ",", "source", ":", "Node", ",", "target", ":", "Node", ",", "relation", ":", "str", ",", "bel", ":", "str", ",", "sha512", ":", "str", ",", "data", ":", "EdgeData", ",", "evidence", ":", "Optional", "[", "Ev...
Create an edge if it does not exist, or return it if it does. :param source: Source node of the relation :param target: Target node of the relation :param relation: Type of the relation between source and target node :param bel: BEL statement that describes the relation :param sha512: The SHA512 hash of the edge as a string :param data: The PyBEL data dictionary :param Evidence evidence: Evidence object that proves the given relation :param properties: List of all properties that belong to the edge :param annotations: List of all annotations that belong to the edge
[ "Create", "an", "edge", "if", "it", "does", "not", "exist", "or", "return", "it", "if", "it", "does", "." ]
python
train
ArduPilot/MAVProxy
MAVProxy/tools/mavflightview.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/tools/mavflightview.py#L226-L373
def mavflightview_mav(mlog, options=None, flightmode_selections=[]): '''create a map for a log file''' wp = mavwp.MAVWPLoader() if options.mission is not None: wp.load(options.mission) fen = mavwp.MAVFenceLoader() if options.fence is not None: fen.load(options.fence) all_false = True for s in flightmode_selections: if s: all_false = False idx = 0 path = [[]] instances = {} ekf_counter = 0 nkf_counter = 0 types = ['MISSION_ITEM','CMD'] if options.types is not None: types.extend(options.types.split(',')) else: types.extend(['GPS','GLOBAL_POSITION_INT']) if options.rawgps or options.dualgps: types.extend(['GPS', 'GPS_RAW_INT']) if options.rawgps2 or options.dualgps: types.extend(['GPS2_RAW','GPS2']) if options.ekf: types.extend(['EKF1', 'GPS']) if options.nkf: types.extend(['NKF1', 'GPS']) if options.ahr2: types.extend(['AHR2', 'AHRS2', 'GPS']) print("Looking for types %s" % str(types)) last_timestamps = {} used_flightmodes = {} while True: try: m = mlog.recv_match(type=types) if m is None: break except Exception: break type = m.get_type() if type == 'MISSION_ITEM': try: while m.seq > wp.count(): print("Adding dummy WP %u" % wp.count()) wp.set(m, wp.count()) wp.set(m, m.seq) except Exception: pass continue if type == 'CMD': m = mavutil.mavlink.MAVLink_mission_item_message(0, 0, m.CNum, mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT, m.CId, 0, 1, m.Prm1, m.Prm2, m.Prm3, m.Prm4, m.Lat, m.Lng, m.Alt) try: while m.seq > wp.count(): print("Adding dummy WP %u" % wp.count()) wp.set(m, wp.count()) wp.set(m, m.seq) except Exception: pass continue if not mlog.check_condition(options.condition): continue if options.mode is not None and mlog.flightmode.lower() != options.mode.lower(): continue if not all_false and len(flightmode_selections) > 0 and idx < len(options._flightmodes) and m._timestamp >= options._flightmodes[idx][2]: idx += 1 elif (idx < len(flightmode_selections) and flightmode_selections[idx]) or all_false or len(flightmode_selections) == 0: used_flightmodes[mlog.flightmode] = 1 if type in ['GPS','GPS2']: status = getattr(m, 'Status', None) if status is None: status = getattr(m, 'FixType', None) if status is None: print("Can't find status on GPS message") print(m) break if status < 2: continue # flash log lat = m.Lat lng = getattr(m, 'Lng', None) if lng is None: lng = getattr(m, 'Lon', None) if lng is None: print("Can't find longitude on GPS message") print(m) break elif type in ['EKF1', 'ANU1']: pos = mavextra.ekf1_pos(m) if pos is None: continue ekf_counter += 1 if ekf_counter % options.ekf_sample != 0: continue (lat, lng) = pos elif type in ['NKF1']: pos = mavextra.ekf1_pos(m) if pos is None: continue nkf_counter += 1 if nkf_counter % options.nkf_sample != 0: continue (lat, lng) = pos elif type in ['ANU5']: (lat, lng) = (m.Alat*1.0e-7, m.Alng*1.0e-7) elif type in ['AHR2', 'POS', 'CHEK']: (lat, lng) = (m.Lat, m.Lng) elif type == 'AHRS2': (lat, lng) = (m.lat*1.0e-7, m.lng*1.0e-7) elif type == 'ORGN': (lat, lng) = (m.Lat, m.Lng) else: lat = m.lat * 1.0e-7 lng = m.lon * 1.0e-7 # automatically add new types to instances if type not in instances: instances[type] = len(instances) while len(instances) >= len(path): path.append([]) instance = instances[type] if abs(lat)>0.01 or abs(lng)>0.01: colour = colour_for_point(mlog, (lat, lng), instance, options) point = (lat, lng, colour) if options.rate == 0 or not type in last_timestamps or m._timestamp - last_timestamps[type] > 1.0/options.rate: last_timestamps[type] = m._timestamp path[instance].append(point) if len(path[0]) == 0: print("No points to plot") return None return [path, wp, fen, used_flightmodes, getattr(mlog, 'mav_type',None)]
[ "def", "mavflightview_mav", "(", "mlog", ",", "options", "=", "None", ",", "flightmode_selections", "=", "[", "]", ")", ":", "wp", "=", "mavwp", ".", "MAVWPLoader", "(", ")", "if", "options", ".", "mission", "is", "not", "None", ":", "wp", ".", "load",...
create a map for a log file
[ "create", "a", "map", "for", "a", "log", "file" ]
python
train
macacajs/wd.py
macaca/util.py
https://github.com/macacajs/wd.py/blob/6d3c52060013e01a67cd52b68b5230b387427bad/macaca/util.py#L26-L32
def check_unused_args(self, used_args, args, kwargs): """Implement the check_unused_args in superclass.""" for k, v in kwargs.items(): if k in used_args: self._used_kwargs.update({k: v}) else: self._unused_kwargs.update({k: v})
[ "def", "check_unused_args", "(", "self", ",", "used_args", ",", "args", ",", "kwargs", ")", ":", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "if", "k", "in", "used_args", ":", "self", ".", "_used_kwargs", ".", "update", "(", ...
Implement the check_unused_args in superclass.
[ "Implement", "the", "check_unused_args", "in", "superclass", "." ]
python
valid
google/grr
grr/server/grr_response_server/aff4_objects/cronjobs.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/aff4_objects/cronjobs.py#L392-L408
def IsRunning(self): """Returns True if there's a currently running iteration of this job.""" current_urn = self.Get(self.Schema.CURRENT_FLOW_URN) if not current_urn: return False try: current_flow = aff4.FACTORY.Open( urn=current_urn, aff4_type=flow.GRRFlow, token=self.token, mode="r") except aff4.InstantiationError: # This isn't a flow, something went really wrong, clear it out. logging.error("Unable to open cron job run: %s", current_urn) self.DeleteAttribute(self.Schema.CURRENT_FLOW_URN) self.Flush() return False return current_flow.GetRunner().IsRunning()
[ "def", "IsRunning", "(", "self", ")", ":", "current_urn", "=", "self", ".", "Get", "(", "self", ".", "Schema", ".", "CURRENT_FLOW_URN", ")", "if", "not", "current_urn", ":", "return", "False", "try", ":", "current_flow", "=", "aff4", ".", "FACTORY", ".",...
Returns True if there's a currently running iteration of this job.
[ "Returns", "True", "if", "there", "s", "a", "currently", "running", "iteration", "of", "this", "job", "." ]
python
train
hydraplatform/hydra-base
hydra_base/lib/users.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/users.py#L178-L187
def add_role(role,**kwargs): """ Add a new role """ #check_perm(kwargs.get('user_id'), 'add_role') role_i = Role(name=role.name, code=role.code) db.DBSession.add(role_i) db.DBSession.flush() return role_i
[ "def", "add_role", "(", "role", ",", "*", "*", "kwargs", ")", ":", "#check_perm(kwargs.get('user_id'), 'add_role')", "role_i", "=", "Role", "(", "name", "=", "role", ".", "name", ",", "code", "=", "role", ".", "code", ")", "db", ".", "DBSession", ".", "a...
Add a new role
[ "Add", "a", "new", "role" ]
python
train
vlasovskikh/funcparserlib
funcparserlib/parser.py
https://github.com/vlasovskikh/funcparserlib/blob/0b689920babcf6079a4b3e8721cc10bbc089d81c/funcparserlib/parser.py#L381-L394
def with_forward_decls(suspension): """(None -> Parser(a, b)) -> Parser(a, b) Returns a parser that computes itself lazily as a result of the suspension provided. It is needed when some parsers contain forward references to parsers defined later and such references are cyclic. See examples for more details. """ @Parser def f(tokens, s): return suspension().run(tokens, s) return f
[ "def", "with_forward_decls", "(", "suspension", ")", ":", "@", "Parser", "def", "f", "(", "tokens", ",", "s", ")", ":", "return", "suspension", "(", ")", ".", "run", "(", "tokens", ",", "s", ")", "return", "f" ]
(None -> Parser(a, b)) -> Parser(a, b) Returns a parser that computes itself lazily as a result of the suspension provided. It is needed when some parsers contain forward references to parsers defined later and such references are cyclic. See examples for more details.
[ "(", "None", "-", ">", "Parser", "(", "a", "b", "))", "-", ">", "Parser", "(", "a", "b", ")" ]
python
train
amueller/word_cloud
wordcloud/wordcloud.py
https://github.com/amueller/word_cloud/blob/d36f526e3d8346e6d7a2656631f05f68e402517d/wordcloud/wordcloud.py#L533-L579
def process_text(self, text): """Splits a long text into words, eliminates the stopwords. Parameters ---------- text : string The text to be processed. Returns ------- words : dict (string, int) Word tokens with associated frequency. ..versionchanged:: 1.2.2 Changed return type from list of tuples to dict. Notes ----- There are better ways to do word tokenization, but I don't want to include all those things. """ stopwords = set([i.lower() for i in self.stopwords]) flags = (re.UNICODE if sys.version < '3' and type(text) is unicode # noqa: F821 else 0) regexp = self.regexp if self.regexp is not None else r"\w[\w']+" words = re.findall(regexp, text, flags) # remove stopwords words = [word for word in words if word.lower() not in stopwords] # remove 's words = [word[:-2] if word.lower().endswith("'s") else word for word in words] # remove numbers if not self.include_numbers: words = [word for word in words if not word.isdigit()] # remove short words if self.min_word_length: words = [word for word in words if len(word) >= self.min_word_length] if self.collocations: word_counts = unigrams_and_bigrams(words, self.normalize_plurals) else: word_counts, _ = process_tokens(words, self.normalize_plurals) return word_counts
[ "def", "process_text", "(", "self", ",", "text", ")", ":", "stopwords", "=", "set", "(", "[", "i", ".", "lower", "(", ")", "for", "i", "in", "self", ".", "stopwords", "]", ")", "flags", "=", "(", "re", ".", "UNICODE", "if", "sys", ".", "version",...
Splits a long text into words, eliminates the stopwords. Parameters ---------- text : string The text to be processed. Returns ------- words : dict (string, int) Word tokens with associated frequency. ..versionchanged:: 1.2.2 Changed return type from list of tuples to dict. Notes ----- There are better ways to do word tokenization, but I don't want to include all those things.
[ "Splits", "a", "long", "text", "into", "words", "eliminates", "the", "stopwords", "." ]
python
train
DarkEnergySurvey/ugali
ugali/analysis/loglike.py
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/loglike.py#L279-L290
def calc_signal_color1(self, distance_modulus, mass_steps=10000): """ Compute signal color probability (u_color) for each catalog object on the fly. """ mag_1, mag_2 = self.catalog.mag_1,self.catalog.mag_2 mag_err_1, mag_err_2 = self.catalog.mag_err_1,self.catalog.mag_err_2 u_density = self.isochrone.pdf(mag_1,mag_2,mag_err_1,mag_err_2,distance_modulus,self.delta_mag,mass_steps) #u_color = u_density * self.delta_mag**2 u_color = u_density return u_color
[ "def", "calc_signal_color1", "(", "self", ",", "distance_modulus", ",", "mass_steps", "=", "10000", ")", ":", "mag_1", ",", "mag_2", "=", "self", ".", "catalog", ".", "mag_1", ",", "self", ".", "catalog", ".", "mag_2", "mag_err_1", ",", "mag_err_2", "=", ...
Compute signal color probability (u_color) for each catalog object on the fly.
[ "Compute", "signal", "color", "probability", "(", "u_color", ")", "for", "each", "catalog", "object", "on", "the", "fly", "." ]
python
train
knowmalware/camcrypt
camcrypt/__init__.py
https://github.com/knowmalware/camcrypt/blob/40c9ebbbd33ebfbb3a564ee5768cfe7a1815f6a3/camcrypt/__init__.py#L54-L71
def keygen(self, keyBitLength, rawKey): """ This must be called on the object before any encryption or decryption can take place. Provide it the key bit length, which must be 128, 192, or 256, and the key, which may be a sequence of bytes or a simple string. Does not return any value. Raises an exception if the arguments are not sane. """ if keyBitLength not in ACCEPTABLE_KEY_LENGTHS: raise Exception("keyBitLength must be 128, 192, or 256") self.bitlen = keyBitLength if len(rawKey) <= 0 or len(rawKey) > self.bitlen/8: raise Exception("rawKey must be less than or equal to keyBitLength/8 (%d) characters long" % (self.bitlen/8)) rawKey = zero_pad(rawKey, self.bitlen/8) keytable = ctypes.create_string_buffer(TABLE_BYTE_LEN) self.ekeygen(self.bitlen, rawKey, keytable) self.keytable = keytable self.initialized = True
[ "def", "keygen", "(", "self", ",", "keyBitLength", ",", "rawKey", ")", ":", "if", "keyBitLength", "not", "in", "ACCEPTABLE_KEY_LENGTHS", ":", "raise", "Exception", "(", "\"keyBitLength must be 128, 192, or 256\"", ")", "self", ".", "bitlen", "=", "keyBitLength", "...
This must be called on the object before any encryption or decryption can take place. Provide it the key bit length, which must be 128, 192, or 256, and the key, which may be a sequence of bytes or a simple string. Does not return any value. Raises an exception if the arguments are not sane.
[ "This", "must", "be", "called", "on", "the", "object", "before", "any", "encryption", "or", "decryption", "can", "take", "place", ".", "Provide", "it", "the", "key", "bit", "length", "which", "must", "be", "128", "192", "or", "256", "and", "the", "key", ...
python
train
QuantEcon/QuantEcon.py
quantecon/markov/approximation.py
https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/markov/approximation.py#L138-L189
def tauchen(rho, sigma_u, m=3, n=7): r""" Computes a Markov chain associated with a discretized version of the linear Gaussian AR(1) process .. math:: y_{t+1} = \rho y_t + u_{t+1} using Tauchen's method. Here :math:`{u_t}` is an i.i.d. Gaussian process with zero mean. Parameters ---------- rho : scalar(float) The autocorrelation coefficient sigma_u : scalar(float) The standard deviation of the random process m : scalar(int), optional(default=3) The number of standard deviations to approximate out to n : scalar(int), optional(default=7) The number of states to use in the approximation Returns ------- mc : MarkovChain An instance of the MarkovChain class that stores the transition matrix and state values returned by the discretization method """ # standard deviation of y_t std_y = np.sqrt(sigma_u**2 / (1 - rho**2)) # top of discrete state space x_max = m * std_y # bottom of discrete state space x_min = -x_max # discretized state space x = np.linspace(x_min, x_max, n) step = (x_max - x_min) / (n - 1) half_step = 0.5 * step P = np.empty((n, n)) _fill_tauchen(x, P, n, rho, sigma_u, half_step) mc = MarkovChain(P, state_values=x) return mc
[ "def", "tauchen", "(", "rho", ",", "sigma_u", ",", "m", "=", "3", ",", "n", "=", "7", ")", ":", "# standard deviation of y_t", "std_y", "=", "np", ".", "sqrt", "(", "sigma_u", "**", "2", "/", "(", "1", "-", "rho", "**", "2", ")", ")", "# top of d...
r""" Computes a Markov chain associated with a discretized version of the linear Gaussian AR(1) process .. math:: y_{t+1} = \rho y_t + u_{t+1} using Tauchen's method. Here :math:`{u_t}` is an i.i.d. Gaussian process with zero mean. Parameters ---------- rho : scalar(float) The autocorrelation coefficient sigma_u : scalar(float) The standard deviation of the random process m : scalar(int), optional(default=3) The number of standard deviations to approximate out to n : scalar(int), optional(default=7) The number of states to use in the approximation Returns ------- mc : MarkovChain An instance of the MarkovChain class that stores the transition matrix and state values returned by the discretization method
[ "r", "Computes", "a", "Markov", "chain", "associated", "with", "a", "discretized", "version", "of", "the", "linear", "Gaussian", "AR", "(", "1", ")", "process" ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/debug.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/debug.py#L1074-L1097
def next(self): """ Handles the next debug event. @see: L{cont}, L{dispatch}, L{wait}, L{stop} @raise WindowsError: Raises an exception on error. If the wait operation causes an error, debugging is stopped (meaning all debugees are either killed or detached from). If the event dispatching causes an error, the event is still continued before returning. This may happen, for example, if the event handler raises an exception nobody catches. """ try: event = self.wait() except Exception: self.stop() raise try: self.dispatch() finally: self.cont()
[ "def", "next", "(", "self", ")", ":", "try", ":", "event", "=", "self", ".", "wait", "(", ")", "except", "Exception", ":", "self", ".", "stop", "(", ")", "raise", "try", ":", "self", ".", "dispatch", "(", ")", "finally", ":", "self", ".", "cont",...
Handles the next debug event. @see: L{cont}, L{dispatch}, L{wait}, L{stop} @raise WindowsError: Raises an exception on error. If the wait operation causes an error, debugging is stopped (meaning all debugees are either killed or detached from). If the event dispatching causes an error, the event is still continued before returning. This may happen, for example, if the event handler raises an exception nobody catches.
[ "Handles", "the", "next", "debug", "event", "." ]
python
train
Ouranosinc/xclim
xclim/checks.py
https://github.com/Ouranosinc/xclim/blob/2080d139188bd8de2aeca097a025c2d89d6e0e09/xclim/checks.py#L24-L33
def check_valid(var, key, expected): r"""Check that a variable's attribute has the expected value. Warn user otherwise.""" att = getattr(var, key, None) if att is None: e = 'Variable does not have a `{}` attribute.'.format(key) warn(e) elif att != expected: e = 'Variable has a non-conforming {}. Got `{}`, expected `{}`'.format(key, att, expected) warn(e)
[ "def", "check_valid", "(", "var", ",", "key", ",", "expected", ")", ":", "att", "=", "getattr", "(", "var", ",", "key", ",", "None", ")", "if", "att", "is", "None", ":", "e", "=", "'Variable does not have a `{}` attribute.'", ".", "format", "(", "key", ...
r"""Check that a variable's attribute has the expected value. Warn user otherwise.
[ "r", "Check", "that", "a", "variable", "s", "attribute", "has", "the", "expected", "value", ".", "Warn", "user", "otherwise", "." ]
python
train
CalebBell/fluids
fluids/two_phase_voidage.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/two_phase_voidage.py#L909-L970
def Harms(x, rhol, rhog, mul, mug, m, D): r'''Calculates void fraction in two-phase flow according to the model of [1]_ also given in [2]_ and [3]_. .. math:: \alpha = \left[1 - 10.06Re_l^{-0.875}(1.74 + 0.104Re_l^{0.5})^2 \left(1.376 + \frac{7.242}{X_{tt}^{1.655}}\right)^{-0.5}\right]^2 .. math:: Re_l = \frac{G_{tp}(1-x)D}{\mu_l} Parameters ---------- x : float Quality at the specific tube interval [] rhol : float Density of the liquid [kg/m^3] rhog : float Density of the gas [kg/m^3] mul : float Viscosity of liquid [Pa*s] mug : float Viscosity of gas [Pa*s] m : float Mass flow rate of both phases, [kg/s] D : float Diameter of the channel, [m] Returns ------- alpha : float Void fraction (area of gas / total area of channel), [-] Notes ----- [1]_ has been reviewed. Examples -------- >>> Harms(.4, 800, 2.5, 1E-3, 1E-5, m=1, D=0.3) 0.9653289762907554 References ---------- .. [1] Tandon, T. N., H. K. Varma, and C. P. Gupta. "A Void Fraction Model for Annular Two-Phase Flow." International Journal of Heat and Mass Transfer 28, no. 1 (January 1, 1985): 191-198. doi:10.1016/0017-9310(85)90021-3. .. [2] Xu, Yu, and Xiande Fang. "Correlations of Void Fraction for Two- Phase Refrigerant Flow in Pipes." Applied Thermal Engineering 64, no. 1-2 (March 2014): 242–51. doi:10.1016/j.applthermaleng.2013.12.032. .. [3] Dalkilic, A. S., S. Laohalertdecha, and S. Wongwises. "Effect of Void Fraction Models on the Two-Phase Friction Factor of R134a during Condensation in Vertical Downward Flow in a Smooth Tube." International Communications in Heat and Mass Transfer 35, no. 8 (October 2008): 921-27. doi:10.1016/j.icheatmasstransfer.2008.04.001. ''' G = m/(pi/4*D**2) Rel = G*D*(1-x)/mul Xtt = Lockhart_Martinelli_Xtt(x, rhol, rhog, mul, mug) return (1 - 10.06*Rel**-0.875*(1.74 + 0.104*Rel**0.5)**2 *(1.376 + 7.242/Xtt**1.655)**-0.5)
[ "def", "Harms", "(", "x", ",", "rhol", ",", "rhog", ",", "mul", ",", "mug", ",", "m", ",", "D", ")", ":", "G", "=", "m", "/", "(", "pi", "/", "4", "*", "D", "**", "2", ")", "Rel", "=", "G", "*", "D", "*", "(", "1", "-", "x", ")", "/...
r'''Calculates void fraction in two-phase flow according to the model of [1]_ also given in [2]_ and [3]_. .. math:: \alpha = \left[1 - 10.06Re_l^{-0.875}(1.74 + 0.104Re_l^{0.5})^2 \left(1.376 + \frac{7.242}{X_{tt}^{1.655}}\right)^{-0.5}\right]^2 .. math:: Re_l = \frac{G_{tp}(1-x)D}{\mu_l} Parameters ---------- x : float Quality at the specific tube interval [] rhol : float Density of the liquid [kg/m^3] rhog : float Density of the gas [kg/m^3] mul : float Viscosity of liquid [Pa*s] mug : float Viscosity of gas [Pa*s] m : float Mass flow rate of both phases, [kg/s] D : float Diameter of the channel, [m] Returns ------- alpha : float Void fraction (area of gas / total area of channel), [-] Notes ----- [1]_ has been reviewed. Examples -------- >>> Harms(.4, 800, 2.5, 1E-3, 1E-5, m=1, D=0.3) 0.9653289762907554 References ---------- .. [1] Tandon, T. N., H. K. Varma, and C. P. Gupta. "A Void Fraction Model for Annular Two-Phase Flow." International Journal of Heat and Mass Transfer 28, no. 1 (January 1, 1985): 191-198. doi:10.1016/0017-9310(85)90021-3. .. [2] Xu, Yu, and Xiande Fang. "Correlations of Void Fraction for Two- Phase Refrigerant Flow in Pipes." Applied Thermal Engineering 64, no. 1-2 (March 2014): 242–51. doi:10.1016/j.applthermaleng.2013.12.032. .. [3] Dalkilic, A. S., S. Laohalertdecha, and S. Wongwises. "Effect of Void Fraction Models on the Two-Phase Friction Factor of R134a during Condensation in Vertical Downward Flow in a Smooth Tube." International Communications in Heat and Mass Transfer 35, no. 8 (October 2008): 921-27. doi:10.1016/j.icheatmasstransfer.2008.04.001.
[ "r", "Calculates", "void", "fraction", "in", "two", "-", "phase", "flow", "according", "to", "the", "model", "of", "[", "1", "]", "_", "also", "given", "in", "[", "2", "]", "_", "and", "[", "3", "]", "_", "." ]
python
train
log2timeline/dfdatetime
dfdatetime/apfs_time.py
https://github.com/log2timeline/dfdatetime/blob/141ca4ef1eff3d354b5deaac3d81cb08506f98d6/dfdatetime/apfs_time.py#L40-L59
def CopyFromDateTimeString(self, time_string): """Copies a APFS timestamp from a date and time string. Args: time_string (str): date and time value formatted as: YYYY-MM-DD hh:mm:ss.######[+-]##:## Where # are numeric digits ranging from 0 to 9 and the seconds fraction can be either 3 or 6 digits. The time of day, seconds fraction and time zone offset are optional. The default time zone is UTC. Raises: ValueError: if the date and time value is not supported. """ super(APFSTime, self)._CopyFromDateTimeString(time_string) if (self._timestamp is None or self._timestamp < self._INT64_MIN or self._timestamp > self._INT64_MAX): raise ValueError('Date time value not supported.')
[ "def", "CopyFromDateTimeString", "(", "self", ",", "time_string", ")", ":", "super", "(", "APFSTime", ",", "self", ")", ".", "_CopyFromDateTimeString", "(", "time_string", ")", "if", "(", "self", ".", "_timestamp", "is", "None", "or", "self", ".", "_timestam...
Copies a APFS timestamp from a date and time string. Args: time_string (str): date and time value formatted as: YYYY-MM-DD hh:mm:ss.######[+-]##:## Where # are numeric digits ranging from 0 to 9 and the seconds fraction can be either 3 or 6 digits. The time of day, seconds fraction and time zone offset are optional. The default time zone is UTC. Raises: ValueError: if the date and time value is not supported.
[ "Copies", "a", "APFS", "timestamp", "from", "a", "date", "and", "time", "string", "." ]
python
train
cytoscape/py2cytoscape
py2cytoscape/cyrest/table.py
https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/table.py#L83-L95
def delete_row(self,keyValue=None,table=None,verbose=None): """ Deletes a row from a table.Requires the table name or SUID and the row key. :param keyValue (string): Specifies the primary key of a value in the row o f a table :param table (string, optional): Specifies a table by table name. If the pr efix SUID: is used, the table corresponding the SUID will be returne d. """ PARAMS=set_param(['keyValue','table'],[keyValue,table]) response=api(url=self.__url+"/delete row", PARAMS=PARAMS, method="POST", verbose=verbose) return response
[ "def", "delete_row", "(", "self", ",", "keyValue", "=", "None", ",", "table", "=", "None", ",", "verbose", "=", "None", ")", ":", "PARAMS", "=", "set_param", "(", "[", "'keyValue'", ",", "'table'", "]", ",", "[", "keyValue", ",", "table", "]", ")", ...
Deletes a row from a table.Requires the table name or SUID and the row key. :param keyValue (string): Specifies the primary key of a value in the row o f a table :param table (string, optional): Specifies a table by table name. If the pr efix SUID: is used, the table corresponding the SUID will be returne d.
[ "Deletes", "a", "row", "from", "a", "table", ".", "Requires", "the", "table", "name", "or", "SUID", "and", "the", "row", "key", "." ]
python
train
Xion/taipan
taipan/strings.py
https://github.com/Xion/taipan/blob/f333f0287c8bd0915182c7d5308e5f05ef0cca78/taipan/strings.py#L137-L185
def split(s, by=None, maxsplit=None): """Split a string based on given delimiter(s). Delimiters can be either strings or compiled regular expression objects. :param s: String to split :param by: A delimiter, or iterable thereof. :param maxsplit: Maximum number of splits to perform. ``None`` means no limit, while 0 does not perform a split at all. :return: List of words in the string ``s`` that were separated by delimiter(s) :raise ValueError: If the separator is an empty string or regex """ ensure_string(s) # TODO(xion): Consider introducing a case for ``split('')`` # to make it return ``['']`` rather than default ``[]`` thru ``str.split``. # It's the so-called "whitespace split" that normally eliminates # empty strings from result. However, ``split(s)`` for any other ``s`` # always returns ``[s]`` so these two approaches are at odds here. # (Possibly refer to split functions in other languages for comparison). # string delimiter are handled by appropriate standard function if by is None or is_string(by): return s.split(by) if maxsplit is None else s.split(by, maxsplit) # regex delimiters have certain special cases handled explicitly below, # so that we do the same things that ``str.split`` does if is_regex(by): if not by.pattern: return s.split('') # will fail with proper exception & message if maxsplit == 0: return [s] return by.split(s, maxsplit=maxsplit or 0) # multiple delimiters are handled by regex that matches them all if is_iterable(by): if not by: raise ValueError("empty separator list") by = list(imap(ensure_string, by)) if not s: return [''] # quickly eliminate trivial case or_ = s.__class__('|') regex = join(or_, imap(re.escape, by)) return split(s, by=re.compile(regex), maxsplit=maxsplit) raise TypeError("invalid separator")
[ "def", "split", "(", "s", ",", "by", "=", "None", ",", "maxsplit", "=", "None", ")", ":", "ensure_string", "(", "s", ")", "# TODO(xion): Consider introducing a case for ``split('')``", "# to make it return ``['']`` rather than default ``[]`` thru ``str.split``.", "# It's the ...
Split a string based on given delimiter(s). Delimiters can be either strings or compiled regular expression objects. :param s: String to split :param by: A delimiter, or iterable thereof. :param maxsplit: Maximum number of splits to perform. ``None`` means no limit, while 0 does not perform a split at all. :return: List of words in the string ``s`` that were separated by delimiter(s) :raise ValueError: If the separator is an empty string or regex
[ "Split", "a", "string", "based", "on", "given", "delimiter", "(", "s", ")", ".", "Delimiters", "can", "be", "either", "strings", "or", "compiled", "regular", "expression", "objects", "." ]
python
train
chaoss/grimoirelab-sortinghat
sortinghat/cmd/load.py
https://github.com/chaoss/grimoirelab-sortinghat/blob/391cd37a75fea26311dc6908bc1c953c540a8e04/sortinghat/cmd/load.py#L352-L390
def __load_unique_identity(self, uidentity, verbose): """Seek or store unique identity""" uuid = uidentity.uuid if uuid: try: api.unique_identities(self.db, uuid) self.log("-- %s already exists." % uuid, verbose) return uuid except NotFoundError as e: self.log("-- %s not found. Generating a new UUID." % uuid, debug=verbose) # We don't have a unique identity, so we have to create # a new one. if len(uidentity.identities) == 0: msg = "not enough info to load %s unique identity." % uidentity.uuid raise LoadError(cause=msg) identity = uidentity.identities.pop(0) try: stored_uuid = api.add_identity(self.db, identity.source, identity.email, identity.name, identity.username) self.new_uids.add(stored_uuid) except AlreadyExistsError as e: with self.db.connect() as session: stored_identity = find_identity(session, e.eid) stored_uuid = stored_identity.uuid self.warning("-- " + str(e), debug=verbose) except ValueError as e: raise LoadError(cause=str(e)) self.log("-- using %s for %s unique identity." % (stored_uuid, uuid), verbose) return stored_uuid
[ "def", "__load_unique_identity", "(", "self", ",", "uidentity", ",", "verbose", ")", ":", "uuid", "=", "uidentity", ".", "uuid", "if", "uuid", ":", "try", ":", "api", ".", "unique_identities", "(", "self", ".", "db", ",", "uuid", ")", "self", ".", "log...
Seek or store unique identity
[ "Seek", "or", "store", "unique", "identity" ]
python
train
howl-anderson/MicroTokenizer
MicroTokenizer/util.py
https://github.com/howl-anderson/MicroTokenizer/blob/41bbe9c31d202b4f751ad5201d343ad1123b42b5/MicroTokenizer/util.py#L101-L120
def load_model_from_path(model_path, meta=False, **overrides): """Load a model from a data directory path. Creates Language class with pipeline from meta.json and then calls from_disk() with path.""" from .tokenizer_loader import TokenizerLoader if not meta: meta = get_model_meta(model_path) tokenizer_loader = TokenizerLoader(meta=meta, **overrides) tokenizers = meta.get('tokenizers', []) disable = overrides.get('disable', []) if tokenizers is True: tokenizers = TokenizerLoader.Defaults.tokenizers elif tokenizers in (False, None): tokenizers = [] for tokenizer_name in tokenizers: if tokenizer_name not in disable: config = meta.get('tokenizer_args', {}).get(tokenizer_name, {}) component = tokenizer_loader.create_tokenizer(tokenizer_name, config=config) tokenizer_loader.add_tokenizer(component, name=tokenizer_name) return tokenizer_loader.from_disk(model_path)
[ "def", "load_model_from_path", "(", "model_path", ",", "meta", "=", "False", ",", "*", "*", "overrides", ")", ":", "from", ".", "tokenizer_loader", "import", "TokenizerLoader", "if", "not", "meta", ":", "meta", "=", "get_model_meta", "(", "model_path", ")", ...
Load a model from a data directory path. Creates Language class with pipeline from meta.json and then calls from_disk() with path.
[ "Load", "a", "model", "from", "a", "data", "directory", "path", ".", "Creates", "Language", "class", "with", "pipeline", "from", "meta", ".", "json", "and", "then", "calls", "from_disk", "()", "with", "path", "." ]
python
train
OCA/odoorpc
odoorpc/rpc/__init__.py
https://github.com/OCA/odoorpc/blob/d90aa0b2bc4fafbab8bd8f50d50e3fb0b9ba91f0/odoorpc/rpc/__init__.py#L245-L248
def timeout(self, timeout): """Set the timeout.""" self._proxy_json._timeout = timeout self._proxy_http._timeout = timeout
[ "def", "timeout", "(", "self", ",", "timeout", ")", ":", "self", ".", "_proxy_json", ".", "_timeout", "=", "timeout", "self", ".", "_proxy_http", ".", "_timeout", "=", "timeout" ]
Set the timeout.
[ "Set", "the", "timeout", "." ]
python
train
numenta/htmresearch
htmresearch/algorithms/multiconnections.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/algorithms/multiconnections.py#L50-L68
def computeActivity(self, activeInputsBySource, permanenceThreshold=None): """ Calculate the number of active synapses per segment. @param activeInputsBySource (dict) The active cells in each source. Example: {"customInputName1": np.array([42, 69])} """ overlaps = None for source, connections in self.connectionsBySource.iteritems(): o = connections.computeActivity(activeInputsBySource[source], permanenceThreshold) if overlaps is None: overlaps = o else: overlaps += o return overlaps
[ "def", "computeActivity", "(", "self", ",", "activeInputsBySource", ",", "permanenceThreshold", "=", "None", ")", ":", "overlaps", "=", "None", "for", "source", ",", "connections", "in", "self", ".", "connectionsBySource", ".", "iteritems", "(", ")", ":", "o",...
Calculate the number of active synapses per segment. @param activeInputsBySource (dict) The active cells in each source. Example: {"customInputName1": np.array([42, 69])}
[ "Calculate", "the", "number", "of", "active", "synapses", "per", "segment", "." ]
python
train
stsouko/CIMtools
CIMtools/applicability_domain/similarity_distance.py
https://github.com/stsouko/CIMtools/blob/cbb46e68eaa1fe7e7b6cb311fc7063e97096bdf3/CIMtools/applicability_domain/similarity_distance.py#L180-L200
def predict(self, X): """Predict if a particular sample is an outlier or not. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- y : array, shape (n_samples,) For each observations, tells whether or not (True or False) it should be considered as an inlier according to the fitted model. """ # Check is fit had been called check_is_fitted(self, ['tree']) # Check data X = check_array(X) return self.tree.query(X)[0].flatten() <= self.threshold_value
[ "def", "predict", "(", "self", ",", "X", ")", ":", "# Check is fit had been called", "check_is_fitted", "(", "self", ",", "[", "'tree'", "]", ")", "# Check data", "X", "=", "check_array", "(", "X", ")", "return", "self", ".", "tree", ".", "query", "(", "...
Predict if a particular sample is an outlier or not. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- y : array, shape (n_samples,) For each observations, tells whether or not (True or False) it should be considered as an inlier according to the fitted model.
[ "Predict", "if", "a", "particular", "sample", "is", "an", "outlier", "or", "not", "." ]
python
valid
jxtech/wechatpy
wechatpy/pay/api/order.py
https://github.com/jxtech/wechatpy/blob/4df0da795618c0895a10f1c2cde9e9d5c0a93aaa/wechatpy/pay/api/order.py#L110-L129
def get_appapi_params(self, prepay_id, timestamp=None, nonce_str=None): """ 获取 APP 支付参数 :param prepay_id: 统一下单接口返回的 prepay_id 参数值 :param timestamp: 可选,时间戳,默认为当前时间戳 :param nonce_str: 可选,随机字符串,默认自动生成 :return: 签名 """ data = { 'appid': self.appid, 'partnerid': self.mch_id, 'prepayid': prepay_id, 'package': 'Sign=WXPay', 'timestamp': timestamp or to_text(int(time.time())), 'noncestr': nonce_str or random_string(32) } sign = calculate_signature(data, self._client.api_key) data['sign'] = sign return data
[ "def", "get_appapi_params", "(", "self", ",", "prepay_id", ",", "timestamp", "=", "None", ",", "nonce_str", "=", "None", ")", ":", "data", "=", "{", "'appid'", ":", "self", ".", "appid", ",", "'partnerid'", ":", "self", ".", "mch_id", ",", "'prepayid'", ...
获取 APP 支付参数 :param prepay_id: 统一下单接口返回的 prepay_id 参数值 :param timestamp: 可选,时间戳,默认为当前时间戳 :param nonce_str: 可选,随机字符串,默认自动生成 :return: 签名
[ "获取", "APP", "支付参数" ]
python
train
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/update/update.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/update/update.py#L543-L550
def device_filter(self): """The device filter to use. :rtype: dict """ if isinstance(self._device_filter, str): return self._decode_query(self._device_filter) return self._device_filter
[ "def", "device_filter", "(", "self", ")", ":", "if", "isinstance", "(", "self", ".", "_device_filter", ",", "str", ")", ":", "return", "self", ".", "_decode_query", "(", "self", ".", "_device_filter", ")", "return", "self", ".", "_device_filter" ]
The device filter to use. :rtype: dict
[ "The", "device", "filter", "to", "use", "." ]
python
train
spookylukey/django-paypal
paypal/standard/forms.py
https://github.com/spookylukey/django-paypal/blob/b07d0a3ad91b5c5fe7bb27be3e5d70aabcdef76f/paypal/standard/forms.py#L197-L227
def _encrypt(self): """Use your key thing to encrypt things.""" from M2Crypto import BIO, SMIME, X509 # Iterate through the fields and pull out the ones that have a value. plaintext = 'cert_id=%s\n' % self.cert_id for name, field in self.fields.items(): value = None if name in self.initial: value = self.initial[name] elif field.initial is not None: value = field.initial if value is not None: plaintext += u'%s=%s\n' % (name, value) plaintext = plaintext.encode('utf-8') # Begin crypto weirdness. s = SMIME.SMIME() s.load_key_bio(BIO.openfile(self.private_cert), BIO.openfile(self.public_cert)) p7 = s.sign(BIO.MemoryBuffer(plaintext), flags=SMIME.PKCS7_BINARY) x509 = X509.load_cert_bio(BIO.openfile(self.paypal_cert)) sk = X509.X509_Stack() sk.push(x509) s.set_x509_stack(sk) s.set_cipher(SMIME.Cipher('des_ede3_cbc')) tmp = BIO.MemoryBuffer() p7.write_der(tmp) p7 = s.encrypt(tmp, flags=SMIME.PKCS7_BINARY) out = BIO.MemoryBuffer() p7.write(out) return out.read().decode()
[ "def", "_encrypt", "(", "self", ")", ":", "from", "M2Crypto", "import", "BIO", ",", "SMIME", ",", "X509", "# Iterate through the fields and pull out the ones that have a value.", "plaintext", "=", "'cert_id=%s\\n'", "%", "self", ".", "cert_id", "for", "name", ",", "...
Use your key thing to encrypt things.
[ "Use", "your", "key", "thing", "to", "encrypt", "things", "." ]
python
train
QUANTAXIS/QUANTAXIS
QUANTAXIS/QAUtil/QADate_trade.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAUtil/QADate_trade.py#L7135-L7159
def QA_util_format_date2str(cursor_date): """ 对输入日期进行格式化处理,返回格式为 "%Y-%m-%d" 格式字符串 支持格式包括: 1. str: "%Y%m%d" "%Y%m%d%H%M%S", "%Y%m%d %H:%M:%S", "%Y-%m-%d", "%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H%M%S" 2. datetime.datetime 3. pd.Timestamp 4. int -> 自动在右边加 0 然后转换,譬如 '20190302093' --> "2019-03-02" :param cursor_date: str/datetime.datetime/int 日期或时间 :return: str 返回字符串格式日期 """ if isinstance(cursor_date, datetime.datetime): cursor_date = str(cursor_date)[:10] elif isinstance(cursor_date, str): try: cursor_date = str(pd.Timestamp(cursor_date))[:10] except: raise ValueError('请输入正确的日期格式, 建议 "%Y-%m-%d"') elif isinstance(cursor_date, int): cursor_date = str(pd.Timestamp("{:<014d}".format(cursor_date)))[:10] else: raise ValueError('请输入正确的日期格式,建议 "%Y-%m-%d"') return cursor_date
[ "def", "QA_util_format_date2str", "(", "cursor_date", ")", ":", "if", "isinstance", "(", "cursor_date", ",", "datetime", ".", "datetime", ")", ":", "cursor_date", "=", "str", "(", "cursor_date", ")", "[", ":", "10", "]", "elif", "isinstance", "(", "cursor_da...
对输入日期进行格式化处理,返回格式为 "%Y-%m-%d" 格式字符串 支持格式包括: 1. str: "%Y%m%d" "%Y%m%d%H%M%S", "%Y%m%d %H:%M:%S", "%Y-%m-%d", "%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H%M%S" 2. datetime.datetime 3. pd.Timestamp 4. int -> 自动在右边加 0 然后转换,譬如 '20190302093' --> "2019-03-02" :param cursor_date: str/datetime.datetime/int 日期或时间 :return: str 返回字符串格式日期
[ "对输入日期进行格式化处理,返回格式为", "%Y", "-", "%m", "-", "%d", "格式字符串", "支持格式包括", ":", "1", ".", "str", ":", "%Y%m%d", "%Y%m%d%H%M%S", "%Y%m%d", "%H", ":", "%M", ":", "%S", "%Y", "-", "%m", "-", "%d", "%Y", "-", "%m", "-", "%d", "%H", ":", "%M", ":", "%S", ...
python
train
mromanello/hucitlib
knowledge_base/surfext/__init__.py
https://github.com/mromanello/hucitlib/blob/6587d1b04eb7e5b48ad7359be845e5d3b444d6fa/knowledge_base/surfext/__init__.py#L164-L183
def get_abbreviations(self): """ Get abbreviations of the names of the author. :return: a list of strings (empty list if no abbreviations available). """ abbreviations = [] try: type_abbreviation = self.session.get_resource(BASE_URI_TYPES % "abbreviation" , self.session.get_class(surf.ns.ECRM['E55_Type'])) abbreviations = [unicode(label) for name in self.ecrm_P1_is_identified_by for abbreviation in name.ecrm_P139_has_alternative_form for label in abbreviation.rdfs_label if name.uri == surf.ns.EFRBROO['F12_Name'] and abbreviation.ecrm_P2_has_type.first == type_abbreviation] except Exception as e: logger.debug("Exception raised when getting abbreviations for %a"%self) finally: return abbreviations
[ "def", "get_abbreviations", "(", "self", ")", ":", "abbreviations", "=", "[", "]", "try", ":", "type_abbreviation", "=", "self", ".", "session", ".", "get_resource", "(", "BASE_URI_TYPES", "%", "\"abbreviation\"", ",", "self", ".", "session", ".", "get_class",...
Get abbreviations of the names of the author. :return: a list of strings (empty list if no abbreviations available).
[ "Get", "abbreviations", "of", "the", "names", "of", "the", "author", "." ]
python
train
Yelp/kafka-utils
kafka_utils/util/ssh.py
https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/ssh.py#L44-L58
def sudo_command(self, command, bufsize=-1): """Sudo a command on the SSH server. Delegates to :func`~ssh.Connection.exec_command` :param command: the command to execute :type command: str :param bufsize: interpreted the same way as by the built-in C{file()} function in python :type bufsize: int :returns the stdin, stdout, and stderr of the executing command :rtype: tuple(L{ChannelFile}, L{ChannelFile}, L{ChannelFile}) :raises SSHException: if the server fails to execute the command """ new_command = "sudo {0}".format(command) return self.exec_command(new_command, bufsize)
[ "def", "sudo_command", "(", "self", ",", "command", ",", "bufsize", "=", "-", "1", ")", ":", "new_command", "=", "\"sudo {0}\"", ".", "format", "(", "command", ")", "return", "self", ".", "exec_command", "(", "new_command", ",", "bufsize", ")" ]
Sudo a command on the SSH server. Delegates to :func`~ssh.Connection.exec_command` :param command: the command to execute :type command: str :param bufsize: interpreted the same way as by the built-in C{file()} function in python :type bufsize: int :returns the stdin, stdout, and stderr of the executing command :rtype: tuple(L{ChannelFile}, L{ChannelFile}, L{ChannelFile}) :raises SSHException: if the server fails to execute the command
[ "Sudo", "a", "command", "on", "the", "SSH", "server", ".", "Delegates", "to", ":", "func", "~ssh", ".", "Connection", ".", "exec_command" ]
python
train
rushter/heamy
heamy/cache.py
https://github.com/rushter/heamy/blob/c330854cee3c547417eb353a4a4a23331b40b4bc/heamy/cache.py#L18-L28
def store(self, key, data): """Takes an array and stores it in the cache.""" if not os.path.exists(self._hash_dir): os.makedirs(self._hash_dir) if isinstance(data, pd.DataFrame): columns = data.columns.tolist() np.save(os.path.join(self._hash_dir, key), data.values) json.dump(columns, open(os.path.join(self._hash_dir, '%s.json' % key), 'w')) else: np.save(os.path.join(self._hash_dir, key), data)
[ "def", "store", "(", "self", ",", "key", ",", "data", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "_hash_dir", ")", ":", "os", ".", "makedirs", "(", "self", ".", "_hash_dir", ")", "if", "isinstance", "(", "data", ",...
Takes an array and stores it in the cache.
[ "Takes", "an", "array", "and", "stores", "it", "in", "the", "cache", "." ]
python
train
CiscoTestAutomation/yang
ncdiff/src/yang/ncdiff/netconf.py
https://github.com/CiscoTestAutomation/yang/blob/c70ec5ac5a91f276c4060009203770ece92e76b4/ncdiff/src/yang/ncdiff/netconf.py#L685-L867
def node_sub(self, node_self, node_other): '''node_sub Low-level api: Compute the delta of two configs. This method is recursive. Assume two configs are different. Parameters ---------- node_self : `Element` A config node in a config tree that is being processed. node_self cannot be a leaf node. node_other : `Element` A config node in another config tree that is being processed. Returns ------- None There is no return of this method. ''' def same_leaf_list(tag): list_self = [c for c in list(node_self) if c.tag == tag] list_other = [c for c in list(node_other) if c.tag == tag] s_node = self.device.get_schema_node((list_self + list_other)[0]) if s_node.get('ordered-by') == 'user': if [i.text for i in list_self] == [i.text for i in list_other]: return True else: return False else: if set([i.text for i in list_self]) == \ set([i.text for i in list_other]): return True else: return False if self.preferred_replace != 'merge': t_self = [c.tag for c in list(node_self) \ if self.device.get_schema_node(c).get('type') == \ 'leaf-list'] t_other = [c.tag for c in list(node_other) \ if self.device.get_schema_node(c).get('type') == \ 'leaf-list'] commonalities = set(t_self) & set(t_other) for commonality in commonalities: if not same_leaf_list(commonality): node_self.set(operation_tag, 'replace') node_other.set(operation_tag, 'replace') return in_s_not_in_o, in_o_not_in_s, in_s_and_in_o = \ self._group_kids(node_self, node_other) ordered_by_user = {} for child_self in in_s_not_in_o: child_other = etree.Element(child_self.tag, {operation_tag: self.preferred_delete}, nsmap=child_self.nsmap) if self.preferred_create != 'merge': child_self.set(operation_tag, self.preferred_create) siblings = list(node_other.iterchildren(tag=child_self.tag)) if siblings: siblings[-1].addnext(child_other) else: node_other.append(child_other) s_node = self.device.get_schema_node(child_self) if s_node.get('type') == 'leaf-list': if s_node.get('ordered-by') == 'user' and \ s_node.tag not in ordered_by_user: ordered_by_user[s_node.tag] = 'leaf-list' child_other.text = child_self.text elif s_node.get('type') == 'list': keys = self._get_list_keys(s_node) if s_node.get('ordered-by') == 'user' and \ s_node.tag not in ordered_by_user: ordered_by_user[s_node.tag] = keys for key in keys: key_node = child_self.find(key) e = etree.SubElement(child_other, key, nsmap=key_node.nsmap) e.text = key_node.text for child_other in in_o_not_in_s: child_self = etree.Element(child_other.tag, {operation_tag: self.preferred_delete}, nsmap=child_other.nsmap) if self.preferred_create != 'merge': child_other.set(operation_tag, self.preferred_create) siblings = list(node_self.iterchildren(tag=child_other.tag)) if siblings: siblings[-1].addnext(child_self) else: node_self.append(child_self) s_node = self.device.get_schema_node(child_other) if s_node.get('type') == 'leaf-list': if s_node.get('ordered-by') == 'user' and \ s_node.tag not in ordered_by_user: ordered_by_user[s_node.tag] = 'leaf-list' child_self.text = child_other.text elif s_node.get('type') == 'list': keys = self._get_list_keys(s_node) if s_node.get('ordered-by') == 'user' and \ s_node.tag not in ordered_by_user: ordered_by_user[s_node.tag] = keys for key in keys: key_node = child_other.find(key) e = etree.SubElement(child_self, key, nsmap=key_node.nsmap) e.text = key_node.text for child_self, child_other in in_s_and_in_o: s_node = self.device.get_schema_node(child_self) if s_node.get('type') == 'leaf': if child_self.text == child_other.text: if not s_node.get('is_key'): node_self.remove(child_self) node_other.remove(child_other) else: if self.preferred_replace != 'merge': child_self.set(operation_tag, self.preferred_replace) child_other.set(operation_tag, self.preferred_replace) elif s_node.get('type') == 'leaf-list': if s_node.get('ordered-by') == 'user': if s_node.tag not in ordered_by_user: ordered_by_user[s_node.tag] = 'leaf-list' else: node_self.remove(child_self) node_other.remove(child_other) elif s_node.get('type') == 'container': if self._node_le(child_self, child_other) and \ self._node_le(child_other, child_self): node_self.remove(child_self) node_other.remove(child_other) else: self.node_sub(child_self, child_other) elif s_node.get('type') == 'list': if s_node.get('ordered-by') == 'user' and \ s_node.tag not in ordered_by_user: ordered_by_user[s_node.tag] = self._get_list_keys(s_node) if self._node_le(child_self, child_other) and \ self._node_le(child_other, child_self): if s_node.get('ordered-by') == 'user': for child in child_self.getchildren(): schema_node = self.device.get_schema_node(child) if not schema_node.get('is_key'): child_self.remove(child) for child in child_other.getchildren(): schema_node = self.device.get_schema_node(child) if not schema_node.get('is_key'): child_other.remove(child) else: node_self.remove(child_self) node_other.remove(child_other) else: self.node_sub(child_self, child_other) else: path = self.device.get_xpath(s_node) raise ModelError("unknown schema node type: type of node {}" \ "is '{}'".format(path, s_node.get('type'))) for tag in ordered_by_user: scope_s = in_s_not_in_o + in_s_and_in_o scope_o = in_o_not_in_s + in_s_and_in_o for sequence in self._get_sequence(scope_s, tag, node_self), \ self._get_sequence(scope_o, tag, node_other): for item in sequence: # modifying the namespace mapping of a node is not possible # in lxml. See https://bugs.launchpad.net/lxml/+bug/555602 # if 'yang' not in item.nsmap: # item.nsmap['yang'] = yang_url i = sequence.index(item) if i == 0: item.set(insert_tag, 'first') else: item.set(insert_tag, 'after') precursor = sequence[i - 1] if ordered_by_user[tag] == 'leaf-list': item.set(value_tag, precursor.text) else: keys = ordered_by_user[tag] key_nodes = {k: precursor.find(k) for k in keys} ids = {k: self._url_to_prefix(n, k) \ for k, n in key_nodes.items()} l = ["[{}='{}']".format(ids[k], key_nodes[k].text) \ for k in keys] item.set(key_tag, ''.join(l))
[ "def", "node_sub", "(", "self", ",", "node_self", ",", "node_other", ")", ":", "def", "same_leaf_list", "(", "tag", ")", ":", "list_self", "=", "[", "c", "for", "c", "in", "list", "(", "node_self", ")", "if", "c", ".", "tag", "==", "tag", "]", "lis...
node_sub Low-level api: Compute the delta of two configs. This method is recursive. Assume two configs are different. Parameters ---------- node_self : `Element` A config node in a config tree that is being processed. node_self cannot be a leaf node. node_other : `Element` A config node in another config tree that is being processed. Returns ------- None There is no return of this method.
[ "node_sub" ]
python
train
joshspeagle/dynesty
dynesty/bounding.py
https://github.com/joshspeagle/dynesty/blob/9e482aafeb5cf84bedb896fa6f07a761d917983e/dynesty/bounding.py#L102-L118
def samples(self, nsamples, rstate=None): """ Draw `nsamples` samples randomly distributed within the unit cube. Returns ------- x : `~numpy.ndarray` with shape (nsamples, ndim) A collection of coordinates within the unit cube. """ if rstate is None: rstate = np.random xs = np.array([self.sample(rstate=rstate) for i in range(nsamples)]) return xs
[ "def", "samples", "(", "self", ",", "nsamples", ",", "rstate", "=", "None", ")", ":", "if", "rstate", "is", "None", ":", "rstate", "=", "np", ".", "random", "xs", "=", "np", ".", "array", "(", "[", "self", ".", "sample", "(", "rstate", "=", "rsta...
Draw `nsamples` samples randomly distributed within the unit cube. Returns ------- x : `~numpy.ndarray` with shape (nsamples, ndim) A collection of coordinates within the unit cube.
[ "Draw", "nsamples", "samples", "randomly", "distributed", "within", "the", "unit", "cube", "." ]
python
train
mitsei/dlkit
dlkit/aws_adapter/repository/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/aws_adapter/repository/sessions.py#L1763-L1780
def delete_repository(self, repository_id=None): """Deletes a ``Repository``. arg: repository_id (osid.id.Id): the ``Id`` of the ``Repository`` to remove raise: NotFound - ``repository_id`` not found raise: NullArgument - ``repository_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from awsosid template for - # osid.resource.BinAdminSession.delete_bin_template if not self._can('delete'): raise PermissionDenied() else: return self._provider_session.delete_repository(repository_id)
[ "def", "delete_repository", "(", "self", ",", "repository_id", "=", "None", ")", ":", "# Implemented from awsosid template for -", "# osid.resource.BinAdminSession.delete_bin_template", "if", "not", "self", ".", "_can", "(", "'delete'", ")", ":", "raise", "PermissionDenie...
Deletes a ``Repository``. arg: repository_id (osid.id.Id): the ``Id`` of the ``Repository`` to remove raise: NotFound - ``repository_id`` not found raise: NullArgument - ``repository_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Deletes", "a", "Repository", "." ]
python
train
brunato/lograptor
lograptor/report.py
https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/report.py#L680-L689
def make_csv_tables(self): """ Builds the report as a list of csv tables with titles. """ logger.info('Generate csv report tables') report_parts = [] for sr in self.subreports: for data_item in sr.report_data: report_parts.append(TextPart(fmt='csv', text=data_item.csv, ext='csv')) return report_parts
[ "def", "make_csv_tables", "(", "self", ")", ":", "logger", ".", "info", "(", "'Generate csv report tables'", ")", "report_parts", "=", "[", "]", "for", "sr", "in", "self", ".", "subreports", ":", "for", "data_item", "in", "sr", ".", "report_data", ":", "re...
Builds the report as a list of csv tables with titles.
[ "Builds", "the", "report", "as", "a", "list", "of", "csv", "tables", "with", "titles", "." ]
python
train
eqcorrscan/EQcorrscan
eqcorrscan/utils/clustering.py
https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/clustering.py#L487-L523
def svd_to_stream(uvectors, stachans, k, sampling_rate): """ Convert the singular vectors output by SVD to streams. One stream will be generated for each singular vector level, for all channels. Useful for plotting, and aiding seismologists thinking of waveforms! :type svectors: list :param svectors: List of :class:`numpy.ndarray` Singular vectors :type stachans: list :param stachans: List of station.channel Strings :type k: int :param k: Number of streams to return = number of SV's to include :type sampling_rate: float :param sampling_rate: Sampling rate in Hz :returns: svstreams, List of :class:`obspy.core.stream.Stream`, with svStreams[0] being composed of the highest rank singular vectors. """ svstreams = [] for i in range(k): svstream = [] for j, stachan in enumerate(stachans): if len(uvectors[j]) <= k: warnings.warn('Too few traces at %s for a %02d dimensional ' 'subspace. Detector streams will not include ' 'this channel.' % ('.'.join(stachan[0], stachan[1]), k)) else: svstream.append(Trace(uvectors[j][i], header={'station': stachan[0], 'channel': stachan[1], 'sampling_rate': sampling_rate})) svstreams.append(Stream(svstream)) return svstreams
[ "def", "svd_to_stream", "(", "uvectors", ",", "stachans", ",", "k", ",", "sampling_rate", ")", ":", "svstreams", "=", "[", "]", "for", "i", "in", "range", "(", "k", ")", ":", "svstream", "=", "[", "]", "for", "j", ",", "stachan", "in", "enumerate", ...
Convert the singular vectors output by SVD to streams. One stream will be generated for each singular vector level, for all channels. Useful for plotting, and aiding seismologists thinking of waveforms! :type svectors: list :param svectors: List of :class:`numpy.ndarray` Singular vectors :type stachans: list :param stachans: List of station.channel Strings :type k: int :param k: Number of streams to return = number of SV's to include :type sampling_rate: float :param sampling_rate: Sampling rate in Hz :returns: svstreams, List of :class:`obspy.core.stream.Stream`, with svStreams[0] being composed of the highest rank singular vectors.
[ "Convert", "the", "singular", "vectors", "output", "by", "SVD", "to", "streams", "." ]
python
train
PythonCharmers/python-future
src/future/backports/email/_header_value_parser.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/email/_header_value_parser.py#L1828-L1873
def get_obs_local_part(value): """ obs-local-part = word *("." word) """ obs_local_part = ObsLocalPart() last_non_ws_was_dot = False while value and (value[0]=='\\' or value[0] not in PHRASE_ENDS): if value[0] == '.': if last_non_ws_was_dot: obs_local_part.defects.append(errors.InvalidHeaderDefect( "invalid repeated '.'")) obs_local_part.append(DOT) last_non_ws_was_dot = True value = value[1:] continue elif value[0]=='\\': obs_local_part.append(ValueTerminal(value[0], 'misplaced-special')) value = value[1:] obs_local_part.defects.append(errors.InvalidHeaderDefect( "'\\' character outside of quoted-string/ccontent")) last_non_ws_was_dot = False continue if obs_local_part and obs_local_part[-1].token_type != 'dot': obs_local_part.defects.append(errors.InvalidHeaderDefect( "missing '.' between words")) try: token, value = get_word(value) last_non_ws_was_dot = False except errors.HeaderParseError: if value[0] not in CFWS_LEADER: raise token, value = get_cfws(value) obs_local_part.append(token) if (obs_local_part[0].token_type == 'dot' or obs_local_part[0].token_type=='cfws' and obs_local_part[1].token_type=='dot'): obs_local_part.defects.append(errors.InvalidHeaderDefect( "Invalid leading '.' in local part")) if (obs_local_part[-1].token_type == 'dot' or obs_local_part[-1].token_type=='cfws' and obs_local_part[-2].token_type=='dot'): obs_local_part.defects.append(errors.InvalidHeaderDefect( "Invalid trailing '.' in local part")) if obs_local_part.defects: obs_local_part.token_type = 'invalid-obs-local-part' return obs_local_part, value
[ "def", "get_obs_local_part", "(", "value", ")", ":", "obs_local_part", "=", "ObsLocalPart", "(", ")", "last_non_ws_was_dot", "=", "False", "while", "value", "and", "(", "value", "[", "0", "]", "==", "'\\\\'", "or", "value", "[", "0", "]", "not", "in", "P...
obs-local-part = word *("." word)
[ "obs", "-", "local", "-", "part", "=", "word", "*", "(", ".", "word", ")" ]
python
train
tornadoweb/tornado
tornado/web.py
https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/web.py#L300-L317
def on_connection_close(self) -> None: """Called in async handlers if the client closed the connection. Override this to clean up resources associated with long-lived connections. Note that this method is called only if the connection was closed during asynchronous processing; if you need to do cleanup after every request override `on_finish` instead. Proxies may keep a connection open for a time (perhaps indefinitely) after the client has gone away, so this method may not be called promptly after the end user closes their connection. """ if _has_stream_request_body(self.__class__): if not self.request._body_future.done(): self.request._body_future.set_exception(iostream.StreamClosedError()) self.request._body_future.exception()
[ "def", "on_connection_close", "(", "self", ")", "->", "None", ":", "if", "_has_stream_request_body", "(", "self", ".", "__class__", ")", ":", "if", "not", "self", ".", "request", ".", "_body_future", ".", "done", "(", ")", ":", "self", ".", "request", "....
Called in async handlers if the client closed the connection. Override this to clean up resources associated with long-lived connections. Note that this method is called only if the connection was closed during asynchronous processing; if you need to do cleanup after every request override `on_finish` instead. Proxies may keep a connection open for a time (perhaps indefinitely) after the client has gone away, so this method may not be called promptly after the end user closes their connection.
[ "Called", "in", "async", "handlers", "if", "the", "client", "closed", "the", "connection", "." ]
python
train
saltstack/salt
salt/modules/neutron.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/neutron.py#L893-L910
def create_security_group(name=None, description=None, profile=None): ''' Creates a new security group CLI Example: .. code-block:: bash salt '*' neutron.create_security_group security-group-name \ description='Security group for servers' :param name: Name of security group (Optional) :param description: Description of security group (Optional) :param profile: Profile to build on (Optional) :return: Created security group information ''' conn = _auth(profile) return conn.create_security_group(name, description)
[ "def", "create_security_group", "(", "name", "=", "None", ",", "description", "=", "None", ",", "profile", "=", "None", ")", ":", "conn", "=", "_auth", "(", "profile", ")", "return", "conn", ".", "create_security_group", "(", "name", ",", "description", ")...
Creates a new security group CLI Example: .. code-block:: bash salt '*' neutron.create_security_group security-group-name \ description='Security group for servers' :param name: Name of security group (Optional) :param description: Description of security group (Optional) :param profile: Profile to build on (Optional) :return: Created security group information
[ "Creates", "a", "new", "security", "group" ]
python
train
bcbio/bcbio-nextgen
bcbio/variation/multi.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/multi.py#L173-L180
def _collapse_subitems(base, items): """Collapse full data representations relative to a standard base. """ out = [] for d in items: newd = _diff_dict(base, d) out.append(newd) return out
[ "def", "_collapse_subitems", "(", "base", ",", "items", ")", ":", "out", "=", "[", "]", "for", "d", "in", "items", ":", "newd", "=", "_diff_dict", "(", "base", ",", "d", ")", "out", ".", "append", "(", "newd", ")", "return", "out" ]
Collapse full data representations relative to a standard base.
[ "Collapse", "full", "data", "representations", "relative", "to", "a", "standard", "base", "." ]
python
train
pandas-dev/pandas
pandas/core/algorithms.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L373-L434
def isin(comps, values): """ Compute the isin boolean array Parameters ---------- comps : array-like values : array-like Returns ------- boolean array same length as comps """ if not is_list_like(comps): raise TypeError("only list-like objects are allowed to be passed" " to isin(), you passed a [{comps_type}]" .format(comps_type=type(comps).__name__)) if not is_list_like(values): raise TypeError("only list-like objects are allowed to be passed" " to isin(), you passed a [{values_type}]" .format(values_type=type(values).__name__)) if not isinstance(values, (ABCIndex, ABCSeries, np.ndarray)): values = construct_1d_object_array_from_listlike(list(values)) if is_categorical_dtype(comps): # TODO(extension) # handle categoricals return comps._values.isin(values) comps = com.values_from_object(comps) comps, dtype, _ = _ensure_data(comps) values, _, _ = _ensure_data(values, dtype=dtype) # faster for larger cases to use np.in1d f = lambda x, y: htable.ismember_object(x, values) # GH16012 # Ensure np.in1d doesn't get object types or it *may* throw an exception if len(comps) > 1000000 and not is_object_dtype(comps): f = lambda x, y: np.in1d(x, y) elif is_integer_dtype(comps): try: values = values.astype('int64', copy=False) comps = comps.astype('int64', copy=False) f = lambda x, y: htable.ismember_int64(x, y) except (TypeError, ValueError, OverflowError): values = values.astype(object) comps = comps.astype(object) elif is_float_dtype(comps): try: values = values.astype('float64', copy=False) comps = comps.astype('float64', copy=False) f = lambda x, y: htable.ismember_float64(x, y) except (TypeError, ValueError): values = values.astype(object) comps = comps.astype(object) return f(comps, values)
[ "def", "isin", "(", "comps", ",", "values", ")", ":", "if", "not", "is_list_like", "(", "comps", ")", ":", "raise", "TypeError", "(", "\"only list-like objects are allowed to be passed\"", "\" to isin(), you passed a [{comps_type}]\"", ".", "format", "(", "comps_type", ...
Compute the isin boolean array Parameters ---------- comps : array-like values : array-like Returns ------- boolean array same length as comps
[ "Compute", "the", "isin", "boolean", "array" ]
python
train
lingthio/Flask-User
flask_user/user_manager__views.py
https://github.com/lingthio/Flask-User/blob/a379fa0a281789618c484b459cb41236779b95b1/flask_user/user_manager__views.py#L215-L254
def email_action_view(self, id, action): """ Perform action 'action' on UserEmail object 'id' """ # Retrieve UserEmail by id user_email = self.db_manager.get_user_email_by_id(id=id) # Users may only change their own UserEmails if not user_email or user_email.user_id != current_user.id: return self.unauthorized_view() # Delete UserEmail if action == 'delete': # Primary UserEmail can not be deleted if user_email.is_primary: return self.unauthorized_view() # Delete UserEmail self.db_manager.delete_object(user_email) self.db_manager.commit() # Set UserEmail.is_primary elif action == 'make-primary': # Disable previously primary emails user_emails = self.db_manager.find_user_emails(current_user) for other_user_email in user_emails: if other_user_email.is_primary: other_user_email.is_primary=False self.db_manager.save_object(other_user_email) # Enable current primary email user_email.is_primary=True self.db_manager.save_object(user_email) self.db_manager.commit() # Send confirm email elif action == 'confirm': self._send_confirm_email_email(user_email.user, user_email) else: return self.unauthorized_view() return redirect(url_for('user.manage_emails'))
[ "def", "email_action_view", "(", "self", ",", "id", ",", "action", ")", ":", "# Retrieve UserEmail by id", "user_email", "=", "self", ".", "db_manager", ".", "get_user_email_by_id", "(", "id", "=", "id", ")", "# Users may only change their own UserEmails", "if", "no...
Perform action 'action' on UserEmail object 'id'
[ "Perform", "action", "action", "on", "UserEmail", "object", "id" ]
python
train
seequent/properties
properties/math.py
https://github.com/seequent/properties/blob/096b07012fff86b0a880c8c018320c3b512751b9/properties/math.py#L373-L388
def validate(self, instance, value): """Check shape and dtype of vector validate also coerces the vector from valid strings (these include ZERO, X, Y, -X, -Y, EAST, WEST, NORTH, and SOUTH) and scales it to the given length. """ if isinstance(value, string_types): if ( value.upper() not in VECTOR_DIRECTIONS or value.upper() in ('Z', '-Z', 'UP', 'DOWN') ): self.error(instance, value) value = VECTOR_DIRECTIONS[value.upper()][:2] return super(Vector2, self).validate(instance, value)
[ "def", "validate", "(", "self", ",", "instance", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "string_types", ")", ":", "if", "(", "value", ".", "upper", "(", ")", "not", "in", "VECTOR_DIRECTIONS", "or", "value", ".", "upper", "(", ...
Check shape and dtype of vector validate also coerces the vector from valid strings (these include ZERO, X, Y, -X, -Y, EAST, WEST, NORTH, and SOUTH) and scales it to the given length.
[ "Check", "shape", "and", "dtype", "of", "vector" ]
python
train
b-loyola/itglue-py
itglue/resources.py
https://github.com/b-loyola/itglue-py/blob/5dc6fca8eab4af23de620d6a916bbaf5ebe02a26/itglue/resources.py#L156-L165
def filter(cls, parent=None, **filters): """ Gets all resources of the given type and parent (if provided) which match the given filters. This will trigger an api GET request. :param parent ResourceBase: the parent of the resource - used for nesting the request url, optional :param **filters: any number of keyword arguments to filter by, e.g name='example name' :returns: a list of matching resources """ data = cls._process_filter_request(parent, **filters) return cls._load_resources(data)
[ "def", "filter", "(", "cls", ",", "parent", "=", "None", ",", "*", "*", "filters", ")", ":", "data", "=", "cls", ".", "_process_filter_request", "(", "parent", ",", "*", "*", "filters", ")", "return", "cls", ".", "_load_resources", "(", "data", ")" ]
Gets all resources of the given type and parent (if provided) which match the given filters. This will trigger an api GET request. :param parent ResourceBase: the parent of the resource - used for nesting the request url, optional :param **filters: any number of keyword arguments to filter by, e.g name='example name' :returns: a list of matching resources
[ "Gets", "all", "resources", "of", "the", "given", "type", "and", "parent", "(", "if", "provided", ")", "which", "match", "the", "given", "filters", ".", "This", "will", "trigger", "an", "api", "GET", "request", ".", ":", "param", "parent", "ResourceBase", ...
python
train
tensorflow/cleverhans
examples/multigpu_advtrain/attacks_multigpu.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/multigpu_advtrain/attacks_multigpu.py#L42-L106
def attack(self, x, y_p, **kwargs): """ This method creates a symoblic graph of the MadryEtAl attack on multiple GPUs. The graph is created on the first n GPUs. Stop gradient is needed to get the speed-up. This prevents us from being able to back-prop through the attack. :param x: A tensor with the input image. :param y_p: Ground truth label or predicted label. :return: Two lists containing the input and output tensors of each GPU. """ inputs = [] outputs = [] # Create the initial random perturbation device_name = '/gpu:0' self.model.set_device(device_name) with tf.device(device_name): with tf.variable_scope('init_rand'): if self.rand_init: eta = tf.random_uniform(tf.shape(x), -self.eps, self.eps) eta = clip_eta(eta, self.ord, self.eps) eta = tf.stop_gradient(eta) else: eta = tf.zeros_like(x) # TODO: Break the graph only nGPU times instead of nb_iter times. # The current implementation by the time an adversarial example is # used for training, the weights of the model have changed nb_iter # times. This can cause slower convergence compared to the single GPU # adversarial training. for i in range(self.nb_iter): # Create the graph for i'th step of attack inputs += [OrderedDict()] outputs += [OrderedDict()] device_name = x.device self.model.set_device(device_name) with tf.device(device_name): with tf.variable_scope('step%d' % i): if i > 0: # Clone the variables to separate the graph of 2 GPUs x = clone_variable('x', x) y_p = clone_variable('y_p', y_p) eta = clone_variable('eta', eta) inputs[i]['x'] = x inputs[i]['y_p'] = y_p outputs[i]['x'] = x outputs[i]['y_p'] = y_p inputs[i]['eta'] = eta eta = self.attack_single_step(x, eta, y_p) if i < self.nb_iter-1: outputs[i]['eta'] = eta else: # adv_x, not eta is the output of the last step adv_x = x + eta if (self.clip_min is not None and self.clip_max is not None): adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max) adv_x = tf.stop_gradient(adv_x, name='adv_x') outputs[i]['adv_x'] = adv_x return inputs, outputs
[ "def", "attack", "(", "self", ",", "x", ",", "y_p", ",", "*", "*", "kwargs", ")", ":", "inputs", "=", "[", "]", "outputs", "=", "[", "]", "# Create the initial random perturbation", "device_name", "=", "'/gpu:0'", "self", ".", "model", ".", "set_device", ...
This method creates a symoblic graph of the MadryEtAl attack on multiple GPUs. The graph is created on the first n GPUs. Stop gradient is needed to get the speed-up. This prevents us from being able to back-prop through the attack. :param x: A tensor with the input image. :param y_p: Ground truth label or predicted label. :return: Two lists containing the input and output tensors of each GPU.
[ "This", "method", "creates", "a", "symoblic", "graph", "of", "the", "MadryEtAl", "attack", "on", "multiple", "GPUs", ".", "The", "graph", "is", "created", "on", "the", "first", "n", "GPUs", "." ]
python
train
dshean/pygeotools
pygeotools/lib/filtlib.py
https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/filtlib.py#L462-L470
def erode_edge(dem, iterations=1): """Erode pixels near nodata """ import scipy.ndimage as ndimage print('Eroding pixels near nodata: %i iterations' % iterations) mask = np.ma.getmaskarray(dem) mask_dilate = ndimage.morphology.binary_dilation(mask, iterations=iterations) out = np.ma.array(dem, mask=mask_dilate) return out
[ "def", "erode_edge", "(", "dem", ",", "iterations", "=", "1", ")", ":", "import", "scipy", ".", "ndimage", "as", "ndimage", "print", "(", "'Eroding pixels near nodata: %i iterations'", "%", "iterations", ")", "mask", "=", "np", ".", "ma", ".", "getmaskarray", ...
Erode pixels near nodata
[ "Erode", "pixels", "near", "nodata" ]
python
train
EnigmaBridge/jbossply
jbossply/jbossparser.py
https://github.com/EnigmaBridge/jbossply/blob/44b30b15982cae781f0c356fab7263751b20b4d0/jbossply/jbossparser.py#L179-L183
def t_escaped_FORM_FEED_CHAR(self, t): r'\x66' # 'f' t.lexer.pop_state() t.value = unichr(0x000c) return t
[ "def", "t_escaped_FORM_FEED_CHAR", "(", "self", ",", "t", ")", ":", "# 'f'", "t", ".", "lexer", ".", "pop_state", "(", ")", "t", ".", "value", "=", "unichr", "(", "0x000c", ")", "return", "t" ]
r'\x66
[ "r", "\\", "x66" ]
python
train
jonhadfield/creds
lib/creds/users.py
https://github.com/jonhadfield/creds/blob/b2053b43516cf742c6e4c2b79713bc625592f47c/lib/creds/users.py#L234-L242
def export(self, file_path=None, export_format=None): """ Write the users to a file. """ with io.open(file_path, mode='w', encoding="utf-8") as export_file: if export_format == 'yaml': import yaml yaml.safe_dump(self.to_dict(), export_file, default_flow_style=False) elif export_format == 'json': export_file.write(text_type(json.dumps(self.to_dict(), ensure_ascii=False))) return True
[ "def", "export", "(", "self", ",", "file_path", "=", "None", ",", "export_format", "=", "None", ")", ":", "with", "io", ".", "open", "(", "file_path", ",", "mode", "=", "'w'", ",", "encoding", "=", "\"utf-8\"", ")", "as", "export_file", ":", "if", "e...
Write the users to a file.
[ "Write", "the", "users", "to", "a", "file", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/data_generators/gym_env.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/gym_env.py#L464-L484
def splits_and_paths(self, data_dir): """List of pairs (split, paths) for the current epoch.""" filepath_fns = { problem.DatasetSplit.TRAIN: self.training_filepaths, problem.DatasetSplit.EVAL: self.dev_filepaths, problem.DatasetSplit.TEST: self.test_filepaths, } def append_epoch(paths): return [ "{}.{}".format(path, self.current_epoch) for path in paths ] # We set shuffled=True as we don't want to shuffle on disk later. return [ (split["split"], append_epoch(filepath_fns[split["split"]]( data_dir, split["shards"], shuffled=True ))) for split in self.dataset_splits ]
[ "def", "splits_and_paths", "(", "self", ",", "data_dir", ")", ":", "filepath_fns", "=", "{", "problem", ".", "DatasetSplit", ".", "TRAIN", ":", "self", ".", "training_filepaths", ",", "problem", ".", "DatasetSplit", ".", "EVAL", ":", "self", ".", "dev_filepa...
List of pairs (split, paths) for the current epoch.
[ "List", "of", "pairs", "(", "split", "paths", ")", "for", "the", "current", "epoch", "." ]
python
train
havardgulldahl/jottalib
src/jottalib/jottacloud.py
https://github.com/havardgulldahl/jottalib/blob/4d015e4309b1d9055e561ec757363fb2632b4eb7/src/jottalib/jottacloud.py#L63-L70
def is_file(jottapath, JFS): """Check if a file exists on jottacloud""" log.debug("is_file %r", jottapath) try: jf = JFS.getObject(jottapath) except JFSNotFoundError: return False return isinstance(jf, JFSFile)
[ "def", "is_file", "(", "jottapath", ",", "JFS", ")", ":", "log", ".", "debug", "(", "\"is_file %r\"", ",", "jottapath", ")", "try", ":", "jf", "=", "JFS", ".", "getObject", "(", "jottapath", ")", "except", "JFSNotFoundError", ":", "return", "False", "ret...
Check if a file exists on jottacloud
[ "Check", "if", "a", "file", "exists", "on", "jottacloud" ]
python
train
cytoscape/py2cytoscape
py2cytoscape/cyrest/cyndex2.py
https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/cyndex2.py#L52-L66
def getNetworkSummary(self, suid, verbose=None): """ Returns summary of collection containing the specified network. :param suid: Cytoscape Collection/Subnetwork SUID :param verbose: print more :returns: 200: successful operation """ surl=self.___url sv=surl.split('/')[-1] surl=surl.rstrip(sv+'/') response=api(url=surl+'/cyndex2/'+sv+'/networks/'+str(suid)+'', method="GET", verbose=verbose, parse_params=False) return response
[ "def", "getNetworkSummary", "(", "self", ",", "suid", ",", "verbose", "=", "None", ")", ":", "surl", "=", "self", ".", "___url", "sv", "=", "surl", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "surl", "=", "surl", ".", "rstrip", "(", "sv", ...
Returns summary of collection containing the specified network. :param suid: Cytoscape Collection/Subnetwork SUID :param verbose: print more :returns: 200: successful operation
[ "Returns", "summary", "of", "collection", "containing", "the", "specified", "network", "." ]
python
train
log2timeline/plaso
plaso/parsers/text_parser.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/text_parser.py#L487-L516
def _ReadLine(self, file_object): """Reads a line from the file object. Args: file_object (dfvfs.FileIO): file-like object. Returns: str: line read from the file-like object. """ if len(self._buffer) < self._buffer_size: content = file_object.read(self._buffer_size) content = content.decode(self._encoding) self._buffer = ''.join([self._buffer, content]) line, new_line, self._buffer = self._buffer.partition('\n') if not line and not new_line: line = self._buffer self._buffer = '' self._current_offset += len(line) # Strip carriage returns from the text. if line.endswith('\r'): line = line[:-len('\r')] if new_line: line = ''.join([line, '\n']) self._current_offset += len('\n') return line
[ "def", "_ReadLine", "(", "self", ",", "file_object", ")", ":", "if", "len", "(", "self", ".", "_buffer", ")", "<", "self", ".", "_buffer_size", ":", "content", "=", "file_object", ".", "read", "(", "self", ".", "_buffer_size", ")", "content", "=", "con...
Reads a line from the file object. Args: file_object (dfvfs.FileIO): file-like object. Returns: str: line read from the file-like object.
[ "Reads", "a", "line", "from", "the", "file", "object", "." ]
python
train
ConsenSys/mythril-classic
mythril/ethereum/interface/leveldb/client.py
https://github.com/ConsenSys/mythril-classic/blob/27af71c34b2ce94f4fae5613ec457f93df1a8f56/mythril/ethereum/interface/leveldb/client.py#L146-L157
def _get_block_receipts(self, block_hash, num): """Get block transaction receipts by block header hash & number. :param block_hash: :param num: :return: """ number = _format_block_number(num) receipts_key = block_receipts_prefix + number + block_hash receipts_data = self.db.get(receipts_key) receipts = rlp.decode(receipts_data, sedes=CountableList(ReceiptForStorage)) return receipts
[ "def", "_get_block_receipts", "(", "self", ",", "block_hash", ",", "num", ")", ":", "number", "=", "_format_block_number", "(", "num", ")", "receipts_key", "=", "block_receipts_prefix", "+", "number", "+", "block_hash", "receipts_data", "=", "self", ".", "db", ...
Get block transaction receipts by block header hash & number. :param block_hash: :param num: :return:
[ "Get", "block", "transaction", "receipts", "by", "block", "header", "hash", "&", "number", "." ]
python
train
lpantano/seqcluster
seqcluster/libs/tool.py
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/tool.py#L132-L138
def _normalize_seqs(s, t): """Normalize to RPM""" for ids in s: obj = s[ids] [obj.norm_freq.update({sample: 1.0 * obj.freq[sample] / (t[sample]+1) * 1000000}) for sample in obj.norm_freq] s[ids] = obj return s
[ "def", "_normalize_seqs", "(", "s", ",", "t", ")", ":", "for", "ids", "in", "s", ":", "obj", "=", "s", "[", "ids", "]", "[", "obj", ".", "norm_freq", ".", "update", "(", "{", "sample", ":", "1.0", "*", "obj", ".", "freq", "[", "sample", "]", ...
Normalize to RPM
[ "Normalize", "to", "RPM" ]
python
train
thoughtworksarts/EmoPy
EmoPy/src/neuralnets.py
https://github.com/thoughtworksarts/EmoPy/blob/a0ab97b3719ebe0a9de9bfc5adae5e46c9b77fd7/EmoPy/src/neuralnets.py#L300-L320
def _init_model(self): """ Composes all layers of 3D CNN. """ model = Sequential() model.add(Conv3D(input_shape=[self.time_delay] + list(self.image_size) + [self.channels], filters=self.filters, kernel_size=self.kernel_size, activation='relu', data_format='channels_last')) model.add( Conv3D(filters=self.filters, kernel_size=self.kernel_size, activation='relu', data_format='channels_last')) model.add(MaxPooling3D(pool_size=(1, 2, 2), data_format='channels_last')) model.add( Conv3D(filters=self.filters, kernel_size=self.kernel_size, activation='relu', data_format='channels_last')) model.add( Conv3D(filters=self.filters, kernel_size=self.kernel_size, activation='relu', data_format='channels_last')) model.add(MaxPooling3D(pool_size=(1, 2, 2), data_format='channels_last')) model.add(Flatten()) model.add(Dense(units=len(self.emotion_map.keys()), activation="relu")) if self.verbose: model.summary() self.model = model
[ "def", "_init_model", "(", "self", ")", ":", "model", "=", "Sequential", "(", ")", "model", ".", "add", "(", "Conv3D", "(", "input_shape", "=", "[", "self", ".", "time_delay", "]", "+", "list", "(", "self", ".", "image_size", ")", "+", "[", "self", ...
Composes all layers of 3D CNN.
[ "Composes", "all", "layers", "of", "3D", "CNN", "." ]
python
train
nhfruchter/pgh-bustime
pghbustime/interface.py
https://github.com/nhfruchter/pgh-bustime/blob/b915e8fea28541612f0e79783c2cf12fd3daaac0/pghbustime/interface.py#L144-L183
def vehicles(self, vid=None, rt=None): """ Get busses by route or by vehicle ID. Arguments: either `vid`: "Set of one or more vehicle IDs whose location should be returned." Maximum of 10 `vid`s, either in a comma-separated list or an iterable. `rt`: "Set of one or more route designators for which matching vehicles should be returned." Maximum of 10 routes, either in a comma-separated list or an iterable. Response: `vehicle`: (vehicle container) contains list of `vid`: bus # `tmstmp`: local date/time of vehicle update `lat`, `lon`: position `hdg`: vehicle heading (e.g., 0: north, 180: south) `pid`: pattern ID of current trip (see `self.geopatterns`) `pdist`: distance into trip `rt`: route (e.g, 88) `des`: bus destinations (e.g., "Penn to Bakery Square") `dly` (optional): True if bus is delayed `spd`: speed in mph `zone`: current zone (usually `None` here) `tablockid`, `tatripid`: unsure, seems internal? http://realtime.portauthority.org/bustime/apidoc/v1/main.jsp?section=vehicles.jsp """ if vid and rt: raise ValueError("The `vid` and `route` parameters cannot be specified simultaneously.") if not (vid or rt): raise ValueError("You must specify either the `vid` or `rt` parameter.") # Turn list into comma separated string if listlike(rt): rt = ",".join( map(str, rt) ) if listlike(vid): vid = ",".join( map(str, vid) ) url = self.endpoint('VEHICLES', dict(vid=vid, rt=rt)) return self.response(url)
[ "def", "vehicles", "(", "self", ",", "vid", "=", "None", ",", "rt", "=", "None", ")", ":", "if", "vid", "and", "rt", ":", "raise", "ValueError", "(", "\"The `vid` and `route` parameters cannot be specified simultaneously.\"", ")", "if", "not", "(", "vid", "or"...
Get busses by route or by vehicle ID. Arguments: either `vid`: "Set of one or more vehicle IDs whose location should be returned." Maximum of 10 `vid`s, either in a comma-separated list or an iterable. `rt`: "Set of one or more route designators for which matching vehicles should be returned." Maximum of 10 routes, either in a comma-separated list or an iterable. Response: `vehicle`: (vehicle container) contains list of `vid`: bus # `tmstmp`: local date/time of vehicle update `lat`, `lon`: position `hdg`: vehicle heading (e.g., 0: north, 180: south) `pid`: pattern ID of current trip (see `self.geopatterns`) `pdist`: distance into trip `rt`: route (e.g, 88) `des`: bus destinations (e.g., "Penn to Bakery Square") `dly` (optional): True if bus is delayed `spd`: speed in mph `zone`: current zone (usually `None` here) `tablockid`, `tatripid`: unsure, seems internal? http://realtime.portauthority.org/bustime/apidoc/v1/main.jsp?section=vehicles.jsp
[ "Get", "busses", "by", "route", "or", "by", "vehicle", "ID", ".", "Arguments", ":", "either", "vid", ":", "Set", "of", "one", "or", "more", "vehicle", "IDs", "whose", "location", "should", "be", "returned", ".", "Maximum", "of", "10", "vid", "s", "eith...
python
train
QUANTAXIS/QUANTAXIS
QUANTAXIS/QAFetch/QAQuery_Advance.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAFetch/QAQuery_Advance.py#L516-L532
def QA_fetch_financial_report_adv(code, start, end=None, ltype='EN'): """高级财务查询接口 Arguments: code {[type]} -- [description] start {[type]} -- [description] Keyword Arguments: end {[type]} -- [description] (default: {None}) """ if end is None: return QA_DataStruct_Financial(QA_fetch_financial_report(code, start, ltype=ltype)) else: series = pd.Series( data=month_data, index=pd.to_datetime(month_data), name='date') timerange = series.loc[start:end].tolist() return QA_DataStruct_Financial(QA_fetch_financial_report(code, timerange, ltype=ltype))
[ "def", "QA_fetch_financial_report_adv", "(", "code", ",", "start", ",", "end", "=", "None", ",", "ltype", "=", "'EN'", ")", ":", "if", "end", "is", "None", ":", "return", "QA_DataStruct_Financial", "(", "QA_fetch_financial_report", "(", "code", ",", "start", ...
高级财务查询接口 Arguments: code {[type]} -- [description] start {[type]} -- [description] Keyword Arguments: end {[type]} -- [description] (default: {None})
[ "高级财务查询接口", "Arguments", ":", "code", "{", "[", "type", "]", "}", "--", "[", "description", "]", "start", "{", "[", "type", "]", "}", "--", "[", "description", "]", "Keyword", "Arguments", ":", "end", "{", "[", "type", "]", "}", "--", "[", "descrip...
python
train
knipknap/exscript
Exscript/protocols/telnetlib.py
https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/protocols/telnetlib.py#L259-L272
def msg(self, msg, *args): """Print a debug message, when the debug level is > 0. If extra arguments are present, they are substituted in the message using the standard string formatting operator. """ if self.debuglevel > 0: self.stderr.write('Telnet(%s,%d): ' % (self.host, self.port)) if args: self.stderr.write(msg % args) else: self.stderr.write(msg) self.stderr.write('\n')
[ "def", "msg", "(", "self", ",", "msg", ",", "*", "args", ")", ":", "if", "self", ".", "debuglevel", ">", "0", ":", "self", ".", "stderr", ".", "write", "(", "'Telnet(%s,%d): '", "%", "(", "self", ".", "host", ",", "self", ".", "port", ")", ")", ...
Print a debug message, when the debug level is > 0. If extra arguments are present, they are substituted in the message using the standard string formatting operator.
[ "Print", "a", "debug", "message", "when", "the", "debug", "level", "is", ">", "0", "." ]
python
train
nephila/django-knocker
knocker/signals.py
https://github.com/nephila/django-knocker/blob/d25380d43a1f91285f1581dcf9db8510fe87f354/knocker/signals.py#L12-L31
def notify_items(**kwargs): """ Signal endpoint that actually sends knocks whenever an instance is created / saved """ instance = kwargs.get('instance') created = kwargs.get('created', False) if hasattr(instance, 'send_knock') and active_knocks(instance): try: # This is a stupid generic interface for multilanguage models (hvad / parler) if hasattr(instance, 'get_available_languages'): langs = instance.get_available_languages() else: langs = [get_language()] for lang in langs: with override(lang): instance.send_knock(created) return True except AttributeError: # pragma: no cover pass return False
[ "def", "notify_items", "(", "*", "*", "kwargs", ")", ":", "instance", "=", "kwargs", ".", "get", "(", "'instance'", ")", "created", "=", "kwargs", ".", "get", "(", "'created'", ",", "False", ")", "if", "hasattr", "(", "instance", ",", "'send_knock'", "...
Signal endpoint that actually sends knocks whenever an instance is created / saved
[ "Signal", "endpoint", "that", "actually", "sends", "knocks", "whenever", "an", "instance", "is", "created", "/", "saved" ]
python
train
ga4gh/ga4gh-server
ga4gh/server/datamodel/datasets.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datamodel/datasets.py#L261-L268
def getContinuousSet(self, id_): """ Returns the ContinuousSet with the specified id, or raises a ContinuousSetNotFoundException otherwise. """ if id_ not in self._continuousSetIdMap: raise exceptions.ContinuousSetNotFoundException(id_) return self._continuousSetIdMap[id_]
[ "def", "getContinuousSet", "(", "self", ",", "id_", ")", ":", "if", "id_", "not", "in", "self", ".", "_continuousSetIdMap", ":", "raise", "exceptions", ".", "ContinuousSetNotFoundException", "(", "id_", ")", "return", "self", ".", "_continuousSetIdMap", "[", "...
Returns the ContinuousSet with the specified id, or raises a ContinuousSetNotFoundException otherwise.
[ "Returns", "the", "ContinuousSet", "with", "the", "specified", "id", "or", "raises", "a", "ContinuousSetNotFoundException", "otherwise", "." ]
python
train
inveniosoftware/invenio-migrator
invenio_migrator/records.py
https://github.com/inveniosoftware/invenio-migrator/blob/6902c6968a39b747d15e32363f43b7dffe2622c2/invenio_migrator/records.py#L153-L194
def create_files(cls, record, files, existing_files): """Create files. This method is currently limited to a single bucket per record. """ default_bucket = None # Look for bucket id in existing files. for f in existing_files: if 'bucket' in f: default_bucket = f['bucket'] break # Create a bucket in default location if none is found. if default_bucket is None: b = Bucket.create() BucketTag.create(b, 'record', str(record.id)) default_bucket = str(b.id) db.session.commit() else: b = Bucket.get(default_bucket) record['_files'] = [] for key, meta in files.items(): obj = cls.create_file(b, key, meta) ext = splitext(obj.key)[1].lower() if ext.startswith('.'): ext = ext[1:] record['_files'].append(dict( bucket=str(obj.bucket.id), key=obj.key, version_id=str(obj.version_id), size=obj.file.size, checksum=obj.file.checksum, type=ext, )) db.session.add( RecordsBuckets(record_id=record.id, bucket_id=b.id) ) record.commit() db.session.commit() return [b]
[ "def", "create_files", "(", "cls", ",", "record", ",", "files", ",", "existing_files", ")", ":", "default_bucket", "=", "None", "# Look for bucket id in existing files.", "for", "f", "in", "existing_files", ":", "if", "'bucket'", "in", "f", ":", "default_bucket", ...
Create files. This method is currently limited to a single bucket per record.
[ "Create", "files", "." ]
python
test
theolind/pymysensors
mysensors/gateway_mqtt.py
https://github.com/theolind/pymysensors/blob/a139ab6e2f6b71ebaf37282f69bfd0f7fe6193b6/mysensors/gateway_mqtt.py#L140-L149
def send(self, message): """Publish a command string to the gateway via MQTT.""" if not message: return topic, payload, qos = self._parse_message_to_mqtt(message) try: _LOGGER.debug('Publishing %s', message.strip()) self._pub_callback(topic, payload, qos, self._retain) except Exception as exception: # pylint: disable=broad-except _LOGGER.exception('Publish to %s failed: %s', topic, exception)
[ "def", "send", "(", "self", ",", "message", ")", ":", "if", "not", "message", ":", "return", "topic", ",", "payload", ",", "qos", "=", "self", ".", "_parse_message_to_mqtt", "(", "message", ")", "try", ":", "_LOGGER", ".", "debug", "(", "'Publishing %s'"...
Publish a command string to the gateway via MQTT.
[ "Publish", "a", "command", "string", "to", "the", "gateway", "via", "MQTT", "." ]
python
train
numenta/nupic
src/nupic/algorithms/fdrutilities.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L128-L158
def generateSimpleSequences(nCoinc=10, seqLength=[5,6,7], nSeq=100): """ Generate a set of simple sequences. The elements of the sequences will be integers from 0 to 'nCoinc'-1. The length of each sequence will be randomly chosen from the 'seqLength' list. Parameters: ----------------------------------------------- nCoinc: the number of elements available to use in the sequences seqLength: a list of possible sequence lengths. The length of each sequence will be randomly chosen from here. nSeq: The number of sequences to generate retval: a list of sequences. Each sequence is itself a list containing the coincidence indices for that sequence. """ coincList = range(nCoinc) seqList = [] for i in xrange(nSeq): if max(seqLength) <= nCoinc: seqList.append(random.sample(coincList, random.choice(seqLength))) else: len = random.choice(seqLength) seq = [] for x in xrange(len): seq.append(random.choice(coincList)) seqList.append(seq) return seqList
[ "def", "generateSimpleSequences", "(", "nCoinc", "=", "10", ",", "seqLength", "=", "[", "5", ",", "6", ",", "7", "]", ",", "nSeq", "=", "100", ")", ":", "coincList", "=", "range", "(", "nCoinc", ")", "seqList", "=", "[", "]", "for", "i", "in", "x...
Generate a set of simple sequences. The elements of the sequences will be integers from 0 to 'nCoinc'-1. The length of each sequence will be randomly chosen from the 'seqLength' list. Parameters: ----------------------------------------------- nCoinc: the number of elements available to use in the sequences seqLength: a list of possible sequence lengths. The length of each sequence will be randomly chosen from here. nSeq: The number of sequences to generate retval: a list of sequences. Each sequence is itself a list containing the coincidence indices for that sequence.
[ "Generate", "a", "set", "of", "simple", "sequences", ".", "The", "elements", "of", "the", "sequences", "will", "be", "integers", "from", "0", "to", "nCoinc", "-", "1", ".", "The", "length", "of", "each", "sequence", "will", "be", "randomly", "chosen", "f...
python
valid
fastai/fastai
fastai/vision/gan.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/gan.py#L284-L286
def on_batch_begin(self, train, **kwargs): "Multiply the current lr if necessary." if not self.learn.gan_trainer.gen_mode and train: self.learn.opt.lr *= self.mult_lr
[ "def", "on_batch_begin", "(", "self", ",", "train", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "learn", ".", "gan_trainer", ".", "gen_mode", "and", "train", ":", "self", ".", "learn", ".", "opt", ".", "lr", "*=", "self", ".", "mul...
Multiply the current lr if necessary.
[ "Multiply", "the", "current", "lr", "if", "necessary", "." ]
python
train
psphere-project/psphere
examples/vidiscovery.py
https://github.com/psphere-project/psphere/blob/83a252e037c3d6e4f18bcd37380998bc9535e591/examples/vidiscovery.py#L27-L53
def discovery(self, compute_resource): """An example that discovers hosts and VMs in the inventory.""" # Find the first ClusterComputeResource if compute_resource is None: cr_list = ComputeResource.all(self.client) print("ERROR: You must specify a ComputeResource.") print("Available ComputeResource's:") for cr in cr_list: print(cr.name) sys.exit(1) try: ccr = ComputeResource.get(self.client, name=compute_resource) except ObjectNotFoundError: print("ERROR: Could not find ComputeResource with name %s" % compute_resource) sys.exit(1) print('Cluster: %s (%s hosts)' % (ccr.name, len(ccr.host))) ccr.preload("host", properties=["name", "vm"]) for host in ccr.host: print(' Host: %s (%s VMs)' % (host.name, len(host.vm))) # Get the vm views in one fell swoop host.preload("vm", properties=["name"]) for vm in host.vm: print(' VM: %s' % vm.name)
[ "def", "discovery", "(", "self", ",", "compute_resource", ")", ":", "# Find the first ClusterComputeResource", "if", "compute_resource", "is", "None", ":", "cr_list", "=", "ComputeResource", ".", "all", "(", "self", ".", "client", ")", "print", "(", "\"ERROR: You ...
An example that discovers hosts and VMs in the inventory.
[ "An", "example", "that", "discovers", "hosts", "and", "VMs", "in", "the", "inventory", "." ]
python
train
rigetti/quantumflow
quantumflow/qubits.py
https://github.com/rigetti/quantumflow/blob/13a66cabbe8aabf6e023cc675f4a4ebe6ccda8fb/quantumflow/qubits.py#L233-L242
def inner_product(vec0: QubitVector, vec1: QubitVector) -> bk.BKTensor: """ Hilbert-Schmidt inner product between qubit vectors The tensor rank and qubits must match. """ if vec0.rank != vec1.rank or vec0.qubit_nb != vec1.qubit_nb: raise ValueError('Incompatibly vectors. Qubits and rank must match') vec1 = vec1.permute(vec0.qubits) # Make sure qubits in same order return bk.inner(vec0.tensor, vec1.tensor)
[ "def", "inner_product", "(", "vec0", ":", "QubitVector", ",", "vec1", ":", "QubitVector", ")", "->", "bk", ".", "BKTensor", ":", "if", "vec0", ".", "rank", "!=", "vec1", ".", "rank", "or", "vec0", ".", "qubit_nb", "!=", "vec1", ".", "qubit_nb", ":", ...
Hilbert-Schmidt inner product between qubit vectors The tensor rank and qubits must match.
[ "Hilbert", "-", "Schmidt", "inner", "product", "between", "qubit", "vectors" ]
python
train
BreakingBytes/simkit
simkit/core/formulas.py
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/formulas.py#L82-L165
def import_formulas(self): """ Import formulas specified in :attr:`parameters`. :returns: formulas :rtype: dict """ # TODO: unit tests! # TODO: move this to somewhere else and call it "importy", maybe # core.__init__.py since a lot of modules might use it. module = self.meta.module # module read from parameters package = getattr(self.meta, 'package', None) # package read from meta name = package + module if package else module # concat pkg + name path = getattr(self.meta, 'path', None) # path read from parameters # import module using module and package mod = None # SEE ALSO: http://docs.python.org/2/library/imp.html#examples try: # fast path: see if module was already imported mod = sys.modules[name] except KeyError: try: # import module specified in parameters mod = importlib.import_module(module, package) except ImportError as err: if not path: msg = ('%s could not be imported either because it was not ' 'on the PYTHONPATH or path was not given.') LOGGER.exception(msg, name) raise err else: # import module using path # expand ~, environmental variables and make path absolute if not os.path.isabs(path): path = os.path.expanduser(os.path.expandvars(path)) path = os.path.abspath(path) # paths must be a list paths = [path] # imp does not find hierarchical module names, find and load # packages recursively, then load module, see last paragraph # https://docs.python.org/2/library/imp.html#imp.find_module pname = '' # full dotted name of package to load # traverse namespace while name: # if dot in name get first package if '.' in name: pkg, name = name.split('.', 1) else: pkg, name = name, None # pkg is the module # Find package or module by name and path fp, filename, desc = imp.find_module(pkg, paths) # full dotted name of package to load pname = pkg if not pname else '%s.%s' % (pname, pkg) LOGGER.debug('package name: %s', pname) # try to load the package or module try: mod = imp.load_module(pname, fp, filename, desc) finally: if fp: fp.close() # append package paths for imp.find_module if name: paths = mod.__path__ formulas = {} # an empty list of formulas formula_param = self.parameters # formulas key # FYI: iterating over dictionary is equivalent to iterkeys() if isinstance(formula_param, (list, tuple, dict)): # iterate through formulas for f in formula_param: formulas[f] = getattr(mod, f) elif isinstance(formula_param, basestring): # only one formula # FYI: use basestring to test for str and unicode # SEE: http://docs.python.org/2/library/functions.html#basestring formulas[formula_param] = getattr(mod, formula_param) else: # autodetect formulas assuming first letter is f formulas = {f: getattr(mod, f) for f in dir(mod) if f[:2] == 'f_'} if not len(formulas): for f in dir(mod): mod_attr = getattr(mod, f) if inspect.isfunction(mod_attr): formulas[f] = mod_attr return formulas
[ "def", "import_formulas", "(", "self", ")", ":", "# TODO: unit tests!", "# TODO: move this to somewhere else and call it \"importy\", maybe", "# core.__init__.py since a lot of modules might use it.", "module", "=", "self", ".", "meta", ".", "module", "# module read from parameters",...
Import formulas specified in :attr:`parameters`. :returns: formulas :rtype: dict
[ "Import", "formulas", "specified", "in", ":", "attr", ":", "parameters", "." ]
python
train
wbond/oscrypto
oscrypto/keys.py
https://github.com/wbond/oscrypto/blob/af778bf1c88bf6c4a7342f5353b130686a5bbe1c/oscrypto/keys.py#L340-L428
def _unarmor_pem_openssl_private(headers, data, password): """ Parses a PKCS#1 private key, or encrypted private key :param headers: A dict of "Name: Value" lines from right after the PEM header :param data: A byte string of the DER-encoded PKCS#1 private key :param password: A byte string of the password to use if the private key is encrypted :return: A byte string of the DER-encoded private key """ enc_algo = None enc_iv_hex = None enc_iv = None if 'DEK-Info' in headers: params = headers['DEK-Info'] if params.find(',') != -1: enc_algo, enc_iv_hex = params.strip().split(',') else: enc_algo = 'RC4' if not enc_algo: return data if enc_iv_hex: enc_iv = binascii.unhexlify(enc_iv_hex.encode('ascii')) enc_algo = enc_algo.lower() enc_key_length = { 'aes-128-cbc': 16, 'aes-128': 16, 'aes-192-cbc': 24, 'aes-192': 24, 'aes-256-cbc': 32, 'aes-256': 32, 'rc4': 16, 'rc4-64': 8, 'rc4-40': 5, 'rc2-64-cbc': 8, 'rc2-40-cbc': 5, 'rc2-cbc': 16, 'rc2': 16, 'des-ede3-cbc': 24, 'des-ede3': 24, 'des3': 24, 'des-ede-cbc': 16, 'des-cbc': 8, 'des': 8, }[enc_algo] enc_key = hashlib.md5(password + enc_iv[0:8]).digest() while enc_key_length > len(enc_key): enc_key += hashlib.md5(enc_key + password + enc_iv[0:8]).digest() enc_key = enc_key[0:enc_key_length] enc_algo_name = { 'aes-128-cbc': 'aes', 'aes-128': 'aes', 'aes-192-cbc': 'aes', 'aes-192': 'aes', 'aes-256-cbc': 'aes', 'aes-256': 'aes', 'rc4': 'rc4', 'rc4-64': 'rc4', 'rc4-40': 'rc4', 'rc2-64-cbc': 'rc2', 'rc2-40-cbc': 'rc2', 'rc2-cbc': 'rc2', 'rc2': 'rc2', 'des-ede3-cbc': 'tripledes', 'des-ede3': 'tripledes', 'des3': 'tripledes', 'des-ede-cbc': 'tripledes', 'des-cbc': 'des', 'des': 'des', }[enc_algo] decrypt_func = crypto_funcs[enc_algo_name] if enc_algo_name == 'rc4': return decrypt_func(enc_key, data) return decrypt_func(enc_key, data, enc_iv)
[ "def", "_unarmor_pem_openssl_private", "(", "headers", ",", "data", ",", "password", ")", ":", "enc_algo", "=", "None", "enc_iv_hex", "=", "None", "enc_iv", "=", "None", "if", "'DEK-Info'", "in", "headers", ":", "params", "=", "headers", "[", "'DEK-Info'", "...
Parses a PKCS#1 private key, or encrypted private key :param headers: A dict of "Name: Value" lines from right after the PEM header :param data: A byte string of the DER-encoded PKCS#1 private key :param password: A byte string of the password to use if the private key is encrypted :return: A byte string of the DER-encoded private key
[ "Parses", "a", "PKCS#1", "private", "key", "or", "encrypted", "private", "key" ]
python
valid
michael-lazar/rtv
rtv/packages/praw/__init__.py
https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/packages/praw/__init__.py#L1204-L1206
def get_wiki_page(self, subreddit, page): """Return a WikiPage object for the subreddit and page provided.""" return objects.WikiPage(self, six.text_type(subreddit), page.lower())
[ "def", "get_wiki_page", "(", "self", ",", "subreddit", ",", "page", ")", ":", "return", "objects", ".", "WikiPage", "(", "self", ",", "six", ".", "text_type", "(", "subreddit", ")", ",", "page", ".", "lower", "(", ")", ")" ]
Return a WikiPage object for the subreddit and page provided.
[ "Return", "a", "WikiPage", "object", "for", "the", "subreddit", "and", "page", "provided", "." ]
python
train
mitodl/PyLmod
pylmod/gradebook.py
https://github.com/mitodl/PyLmod/blob/b798b86c33d1eb615e7cd4f3457b5c15da1d86e0/pylmod/gradebook.py#L1139-L1232
def get_staff(self, gradebook_id, simple=False): """Get staff list for gradebook. Get staff list for the gradebook specified. Optionally, return a less detailed list by specifying ``simple = True``. If simple=True, return a list of dictionaries, one dictionary for each member. The dictionary contains a member's ``email``, ``displayName``, and ``role``. Members with multiple roles will appear in the list once for each role. Args: gradebook_id (str): unique identifier for gradebook, i.e. ``2314`` simple (bool): Return a staff list with less detail. Default is ``False``. Returns: An example return value is: .. code-block:: python { u'data': { u'COURSE_ADMIN': [ { u'accountEmail': u'benfranklin@mit.edu', u'displayName': u'Benjamin Franklin', u'editable': False, u'email': u'benfranklin@mit.edu', u'givenName': u'Benjamin', u'middleName': None, u'mitId': u'921344431', u'nickName': u'Benjamin', u'personId': 10710616, u'sortableName': u'Franklin, Benjamin', u'surname': u'Franklin', u'year': None }, ], u'COURSE_PROF': [ { u'accountEmail': u'dduck@mit.edu', u'displayName': u'Donald Duck', u'editable': False, u'email': u'dduck@mit.edu', u'givenName': u'Donald', u'middleName': None, u'mitId': u'916144889', u'nickName': u'Donald', u'personId': 8117160, u'sortableName': u'Duck, Donald', u'surname': u'Duck', u'year': None }, ], u'COURSE_TA': [ { u'accountEmail': u'hduck@mit.edu', u'displayName': u'Huey Duck', u'editable': False, u'email': u'hduck@mit.edu', u'givenName': u'Huey', u'middleName': None, u'mitId': u'920445024', u'nickName': u'Huey', u'personId': 1299059, u'sortableName': u'Duck, Huey', u'surname': u'Duck', u'year': None }, ] }, } """ staff_data = self.get( 'staff/{gradebookId}'.format( gradebookId=gradebook_id or self.gradebook_id ), params=None, ) if simple: simple_list = [] unraveled_list = self.unravel_staff(staff_data) for member in unraveled_list.__iter__(): simple_list.append({ 'accountEmail': member['accountEmail'], 'displayName': member['displayName'], 'role': member['role'], }) return simple_list return staff_data['data']
[ "def", "get_staff", "(", "self", ",", "gradebook_id", ",", "simple", "=", "False", ")", ":", "staff_data", "=", "self", ".", "get", "(", "'staff/{gradebookId}'", ".", "format", "(", "gradebookId", "=", "gradebook_id", "or", "self", ".", "gradebook_id", ")", ...
Get staff list for gradebook. Get staff list for the gradebook specified. Optionally, return a less detailed list by specifying ``simple = True``. If simple=True, return a list of dictionaries, one dictionary for each member. The dictionary contains a member's ``email``, ``displayName``, and ``role``. Members with multiple roles will appear in the list once for each role. Args: gradebook_id (str): unique identifier for gradebook, i.e. ``2314`` simple (bool): Return a staff list with less detail. Default is ``False``. Returns: An example return value is: .. code-block:: python { u'data': { u'COURSE_ADMIN': [ { u'accountEmail': u'benfranklin@mit.edu', u'displayName': u'Benjamin Franklin', u'editable': False, u'email': u'benfranklin@mit.edu', u'givenName': u'Benjamin', u'middleName': None, u'mitId': u'921344431', u'nickName': u'Benjamin', u'personId': 10710616, u'sortableName': u'Franklin, Benjamin', u'surname': u'Franklin', u'year': None }, ], u'COURSE_PROF': [ { u'accountEmail': u'dduck@mit.edu', u'displayName': u'Donald Duck', u'editable': False, u'email': u'dduck@mit.edu', u'givenName': u'Donald', u'middleName': None, u'mitId': u'916144889', u'nickName': u'Donald', u'personId': 8117160, u'sortableName': u'Duck, Donald', u'surname': u'Duck', u'year': None }, ], u'COURSE_TA': [ { u'accountEmail': u'hduck@mit.edu', u'displayName': u'Huey Duck', u'editable': False, u'email': u'hduck@mit.edu', u'givenName': u'Huey', u'middleName': None, u'mitId': u'920445024', u'nickName': u'Huey', u'personId': 1299059, u'sortableName': u'Duck, Huey', u'surname': u'Duck', u'year': None }, ] }, }
[ "Get", "staff", "list", "for", "gradebook", "." ]
python
train
rootpy/rootpy
rootpy/tree/tree.py
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/tree/tree.py#L158-L255
def set_buffer(self, treebuffer, branches=None, ignore_branches=None, create_branches=False, visible=True, ignore_missing=False, ignore_duplicates=False, transfer_objects=False): """ Set the Tree buffer Parameters ---------- treebuffer : rootpy.tree.buffer.TreeBuffer a TreeBuffer branches : list, optional (default=None) only include these branches from the TreeBuffer ignore_branches : list, optional (default=None) ignore these branches from the TreeBuffer create_branches : bool, optional (default=False) If True then the branches in the TreeBuffer should be created. Use this option if initializing the Tree. A ValueError is raised if an attempt is made to create a branch with the same name as one that already exists in the Tree. If False the addresses of existing branches will be set to point at the addresses in this buffer. visible : bool, optional (default=True) If True then the branches will be added to the buffer and will be accessible as attributes of the Tree. ignore_missing : bool, optional (default=False) If True then any branches in this buffer that do not exist in the Tree will be ignored, otherwise a ValueError will be raised. This option is only valid when ``create_branches`` is False. ignore_duplicates : bool, optional (default=False) If False then raise a ValueError if the tree already has a branch with the same name as an entry in the buffer. If True then skip branches that already exist. This option is only valid when ``create_branches`` is True. transfer_objects : bool, optional (default=False) If True, all tree objects and collections will be transferred from the buffer into this Tree's buffer. """ # determine branches to keep while preserving branch order if branches is None: branches = treebuffer.keys() if ignore_branches is not None: branches = [b for b in branches if b not in ignore_branches] if create_branches: for name in branches: value = treebuffer[name] if self.has_branch(name): if ignore_duplicates: log.warning( "Skipping entry in buffer with the same name " "as an existing branch: `{0}`".format(name)) continue raise ValueError( "Attempting to create two branches " "with the same name: `{0}`".format(name)) if isinstance(value, Scalar): self.Branch(name, value, '{0}/{1}'.format( name, value.type)) elif isinstance(value, Array): length = value.length_name or len(value) self.Branch(name, value, '{0}[{2}]/{1}'.format( name, value.type, length)) else: self.Branch(name, value) else: for name in branches: value = treebuffer[name] if self.has_branch(name): self.SetBranchAddress(name, value) elif not ignore_missing: raise ValueError( "Attempting to set address for " "branch `{0}` which does not exist".format(name)) else: log.warning( "Skipping entry in buffer for which no " "corresponding branch in the " "tree exists: `{0}`".format(name)) if visible: newbuffer = TreeBuffer() for branch in branches: if branch in treebuffer: newbuffer[branch] = treebuffer[branch] newbuffer.set_objects(treebuffer) self.update_buffer(newbuffer, transfer_objects=transfer_objects)
[ "def", "set_buffer", "(", "self", ",", "treebuffer", ",", "branches", "=", "None", ",", "ignore_branches", "=", "None", ",", "create_branches", "=", "False", ",", "visible", "=", "True", ",", "ignore_missing", "=", "False", ",", "ignore_duplicates", "=", "Fa...
Set the Tree buffer Parameters ---------- treebuffer : rootpy.tree.buffer.TreeBuffer a TreeBuffer branches : list, optional (default=None) only include these branches from the TreeBuffer ignore_branches : list, optional (default=None) ignore these branches from the TreeBuffer create_branches : bool, optional (default=False) If True then the branches in the TreeBuffer should be created. Use this option if initializing the Tree. A ValueError is raised if an attempt is made to create a branch with the same name as one that already exists in the Tree. If False the addresses of existing branches will be set to point at the addresses in this buffer. visible : bool, optional (default=True) If True then the branches will be added to the buffer and will be accessible as attributes of the Tree. ignore_missing : bool, optional (default=False) If True then any branches in this buffer that do not exist in the Tree will be ignored, otherwise a ValueError will be raised. This option is only valid when ``create_branches`` is False. ignore_duplicates : bool, optional (default=False) If False then raise a ValueError if the tree already has a branch with the same name as an entry in the buffer. If True then skip branches that already exist. This option is only valid when ``create_branches`` is True. transfer_objects : bool, optional (default=False) If True, all tree objects and collections will be transferred from the buffer into this Tree's buffer.
[ "Set", "the", "Tree", "buffer" ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_speech.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_speech.py#L45-L50
def unload(self): '''unload module''' self.settings.set('speech', 0) if self.mpstate.functions.say == self.mpstate.functions.say: self.mpstate.functions.say = self.old_mpstate_say_function self.kill_speech_dispatcher()
[ "def", "unload", "(", "self", ")", ":", "self", ".", "settings", ".", "set", "(", "'speech'", ",", "0", ")", "if", "self", ".", "mpstate", ".", "functions", ".", "say", "==", "self", ".", "mpstate", ".", "functions", ".", "say", ":", "self", ".", ...
unload module
[ "unload", "module" ]
python
train
SBRG/ssbio
ssbio/protein/sequence/utils/blast.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/utils/blast.py#L240-L297
def create_orthology_matrix(r_name, genome_to_bbh_files, pid_cutoff=None, bitscore_cutoff=None, evalue_cutoff=None, outname='', outdir='', force_rerun=False): """Create an orthology matrix using best bidirectional BLAST hits (BBH) outputs. Args: r_name (str): Name of the reference genome genome_to_bbh_files (dict): Mapping of genome names to the BBH csv output from the :func:`~ssbio.protein.sequence.utils.blast.calculate_bbh` method pid_cutoff (float): Minimum percent identity between BLAST hits to filter for in the range [0, 100] bitscore_cutoff (float): Minimum bitscore allowed between BLAST hits evalue_cutoff (float): Maximum E-value allowed between BLAST hits outname: Name of output file of orthology matrix outdir: Path to output directory force_rerun (bool): Force recreation of the orthology matrix even if the outfile exists Returns: str: Path to orthologous genes matrix. """ if outname: outfile = op.join(outdir, outname) else: outfile = op.join(outdir, '{}_orthology.csv'.format(r_name)) if op.exists(outfile) and os.stat(outfile).st_size != 0 and not force_rerun: log.info('{}: loaded existing orthology matrix'.format(outfile)) return outfile if not pid_cutoff and not bitscore_cutoff and not evalue_cutoff: log.warning('No cutoffs supplied, insignificant hits may be reported') if not pid_cutoff: pid_cutoff = 0 if not bitscore_cutoff: bitscore_cutoff = 0 if not evalue_cutoff: evalue_cutoff = float('Inf') out = pd.DataFrame() for g_name, bbh_path in genome_to_bbh_files.items(): df_bbh = pd.read_csv(bbh_path, index_col=0) bidirectional = df_bbh[df_bbh.BBH == '<=>'] data = bidirectional[(bidirectional.PID > pid_cutoff) & (bidirectional.eVal < evalue_cutoff) & (bidirectional.bitScore > bitscore_cutoff)] data.index = data.gene data2 = data[['subject']] if out.empty: out = data2 out = out.rename(columns={'subject': g_name}) else: out = pd.merge(out, data2, left_index=True, right_index=True, how='outer') out = out.rename(columns={'subject': g_name}) out.to_csv(outfile) log.debug('{} orthologous genes saved at {}'.format(r_name, outfile)) return outfile
[ "def", "create_orthology_matrix", "(", "r_name", ",", "genome_to_bbh_files", ",", "pid_cutoff", "=", "None", ",", "bitscore_cutoff", "=", "None", ",", "evalue_cutoff", "=", "None", ",", "outname", "=", "''", ",", "outdir", "=", "''", ",", "force_rerun", "=", ...
Create an orthology matrix using best bidirectional BLAST hits (BBH) outputs. Args: r_name (str): Name of the reference genome genome_to_bbh_files (dict): Mapping of genome names to the BBH csv output from the :func:`~ssbio.protein.sequence.utils.blast.calculate_bbh` method pid_cutoff (float): Minimum percent identity between BLAST hits to filter for in the range [0, 100] bitscore_cutoff (float): Minimum bitscore allowed between BLAST hits evalue_cutoff (float): Maximum E-value allowed between BLAST hits outname: Name of output file of orthology matrix outdir: Path to output directory force_rerun (bool): Force recreation of the orthology matrix even if the outfile exists Returns: str: Path to orthologous genes matrix.
[ "Create", "an", "orthology", "matrix", "using", "best", "bidirectional", "BLAST", "hits", "(", "BBH", ")", "outputs", "." ]
python
train
TissueMAPS/TmClient
src/python/tmclient/api.py
https://github.com/TissueMAPS/TmClient/blob/6fb40622af19142cb5169a64b8c2965993a25ab1/src/python/tmclient/api.py#L381-L401
def rename_experiment(self, new_name): '''Renames the experiment. Parameters ---------- See also -------- :func:`tmserver.api.experiment.update_experiment` :class:`tmlib.models.experiment.ExperimentReference` ''' logger.info('rename experiment "%s"', self.experiment_name) content = {'name': new_name} url = self._build_api_url( '/experiments/{experiment_id}'.format( experiment_id=self._experiment_id ) ) res = self._session.put(url, json=content) res.raise_for_status() self.experiment_name = new_name
[ "def", "rename_experiment", "(", "self", ",", "new_name", ")", ":", "logger", ".", "info", "(", "'rename experiment \"%s\"'", ",", "self", ".", "experiment_name", ")", "content", "=", "{", "'name'", ":", "new_name", "}", "url", "=", "self", ".", "_build_api_...
Renames the experiment. Parameters ---------- See also -------- :func:`tmserver.api.experiment.update_experiment` :class:`tmlib.models.experiment.ExperimentReference`
[ "Renames", "the", "experiment", "." ]
python
train
CivicSpleen/ambry
ambry/library/search_backends/base.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/library/search_backends/base.py#L157-L161
def reset(self): """ Resets (deletes) all indexes. """ self.dataset_index.reset() self.partition_index.reset() self.identifier_index.reset()
[ "def", "reset", "(", "self", ")", ":", "self", ".", "dataset_index", ".", "reset", "(", ")", "self", ".", "partition_index", ".", "reset", "(", ")", "self", ".", "identifier_index", ".", "reset", "(", ")" ]
Resets (deletes) all indexes.
[ "Resets", "(", "deletes", ")", "all", "indexes", "." ]
python
train
sernst/cauldron
cauldron/invoke/__init__.py
https://github.com/sernst/cauldron/blob/4086aec9c038c402ea212c79fe8bd0d27104f9cf/cauldron/invoke/__init__.py#L22-L43
def initialize(): """ Initializes the cauldron library by confirming that it can be imported by the importlib library. If the attempt to import it fails, the system path will be modified and the attempt retried. If both attempts fail, an import error will be raised. """ cauldron_module = get_cauldron_module() if cauldron_module is not None: return cauldron_module sys.path.append(ROOT_DIRECTORY) cauldron_module = get_cauldron_module() if cauldron_module is not None: return cauldron_module raise ImportError(' '.join(( 'Unable to import cauldron.' 'The package was not installed in a known location.' )))
[ "def", "initialize", "(", ")", ":", "cauldron_module", "=", "get_cauldron_module", "(", ")", "if", "cauldron_module", "is", "not", "None", ":", "return", "cauldron_module", "sys", ".", "path", ".", "append", "(", "ROOT_DIRECTORY", ")", "cauldron_module", "=", ...
Initializes the cauldron library by confirming that it can be imported by the importlib library. If the attempt to import it fails, the system path will be modified and the attempt retried. If both attempts fail, an import error will be raised.
[ "Initializes", "the", "cauldron", "library", "by", "confirming", "that", "it", "can", "be", "imported", "by", "the", "importlib", "library", ".", "If", "the", "attempt", "to", "import", "it", "fails", "the", "system", "path", "will", "be", "modified", "and",...
python
train
basho/riak-python-client
riak/mapreduce.py
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/mapreduce.py#L626-L634
def to_array(self): """ Convert the RiakLinkPhase to a format that can be output into JSON. Used internally. """ stepdef = {'bucket': self._bucket, 'tag': self._tag, 'keep': self._keep} return {'link': stepdef}
[ "def", "to_array", "(", "self", ")", ":", "stepdef", "=", "{", "'bucket'", ":", "self", ".", "_bucket", ",", "'tag'", ":", "self", ".", "_tag", ",", "'keep'", ":", "self", ".", "_keep", "}", "return", "{", "'link'", ":", "stepdef", "}" ]
Convert the RiakLinkPhase to a format that can be output into JSON. Used internally.
[ "Convert", "the", "RiakLinkPhase", "to", "a", "format", "that", "can", "be", "output", "into", "JSON", ".", "Used", "internally", "." ]
python
train
lesscpy/lesscpy
lesscpy/scripts/compiler.py
https://github.com/lesscpy/lesscpy/blob/51e392fb4a3cd4ccfb6175e0e42ce7d2f6b78126/lesscpy/scripts/compiler.py#L29-L80
def ldirectory(inpath, outpath, args, scope): """Compile all *.less files in directory Args: inpath (str): Path to compile outpath (str): Output directory args (object): Argparse Object scope (Scope): Scope object or None """ yacctab = 'yacctab' if args.debug else None if not outpath: sys.exit("Compile directory option needs -o ...") else: if not os.path.isdir(outpath): if args.verbose: print("Creating '%s'" % outpath, file=sys.stderr) if not args.dry_run: os.mkdir(outpath) less = glob.glob(os.path.join(inpath, '*.less')) f = formatter.Formatter(args) for lf in less: outf = os.path.splitext(os.path.basename(lf)) minx = '.min' if args.min_ending else '' outf = "%s/%s%s.css" % (outpath, outf[0], minx) if not args.force and os.path.exists(outf): recompile = os.path.getmtime(outf) < os.path.getmtime(lf) else: recompile = True if recompile: print('%s -> %s' % (lf, outf)) p = parser.LessParser( yacc_debug=(args.debug), lex_optimize=True, yacc_optimize=(not args.debug), scope=scope, tabfile=yacctab, verbose=args.verbose) p.parse(filename=lf, debuglevel=0) css = f.format(p) if not args.dry_run: with open(outf, 'w') as outfile: outfile.write(css) elif args.verbose: print('skipping %s, not modified' % lf, file=sys.stderr) sys.stdout.flush() if args.recurse: [ ldirectory( os.path.join(inpath, name), os.path.join(outpath, name), args, scope) for name in os.listdir(inpath) if os.path.isdir(os.path.join(inpath, name)) and not name.startswith('.') and not name == outpath ]
[ "def", "ldirectory", "(", "inpath", ",", "outpath", ",", "args", ",", "scope", ")", ":", "yacctab", "=", "'yacctab'", "if", "args", ".", "debug", "else", "None", "if", "not", "outpath", ":", "sys", ".", "exit", "(", "\"Compile directory option needs -o ...\"...
Compile all *.less files in directory Args: inpath (str): Path to compile outpath (str): Output directory args (object): Argparse Object scope (Scope): Scope object or None
[ "Compile", "all", "*", ".", "less", "files", "in", "directory", "Args", ":", "inpath", "(", "str", ")", ":", "Path", "to", "compile", "outpath", "(", "str", ")", ":", "Output", "directory", "args", "(", "object", ")", ":", "Argparse", "Object", "scope"...
python
valid
hanguokai/youku
youku/youku_videos.py
https://github.com/hanguokai/youku/blob/b2df060c7dccfad990bcfa289fff68bb77d1e69b/youku/youku_videos.py#L21-L31
def find_video_by_id(self, video_id): """doc: http://open.youku.com/docs/doc?id=44 """ url = 'https://openapi.youku.com/v2/videos/show_basic.json' params = { 'client_id': self.client_id, 'video_id': video_id } r = requests.get(url, params=params) check_error(r) return r.json()
[ "def", "find_video_by_id", "(", "self", ",", "video_id", ")", ":", "url", "=", "'https://openapi.youku.com/v2/videos/show_basic.json'", "params", "=", "{", "'client_id'", ":", "self", ".", "client_id", ",", "'video_id'", ":", "video_id", "}", "r", "=", "requests",...
doc: http://open.youku.com/docs/doc?id=44
[ "doc", ":", "http", ":", "//", "open", ".", "youku", ".", "com", "/", "docs", "/", "doc?id", "=", "44" ]
python
train
elyase/masstable
masstable/masstable.py
https://github.com/elyase/masstable/blob/3eb72b22cd3337bc5c6bb95bb7bb73fdbe6ae9e2/masstable/masstable.py#L519-L576
def chart_plot(self, ax=None, cmap='RdBu', xlabel='N', ylabel='Z', grid_on=True, colorbar=True): """Plot a nuclear chart with (N,Z) as axis and the values of the Table as a color scale Parameters ---------- ax: optional matplotlib axes defaults to current axes cmap: a matplotlib colormap default: 'RdBu' xlabel: string representing the label of the x axis default: 'N' ylabel: string, default: 'Z' the label of the x axis grid_on: boolean, default: True, whether to draw the axes grid or not colorbar: boolean, default: True whether to draw a colorbar or not Returns ------- ax: a matplotlib axes object Example ------- Plot the theoretical deviation for the Möller's model:: >>> Table('FRDM95').error().chart_plot() """ from matplotlib.mlab import griddata from numpy import linspace, meshgrid import matplotlib.pyplot as plt # extract the 1D arrays to be plotted x = self.dropna().N y = self.dropna().Z z = self.dropna().values #convert to matplotlibs grid format xi = linspace(min(x), max(x), max(x) - min(x) + 1) yi = linspace(min(y), max(y), max(y) - min(y) + 1) Z = griddata(x, y, z, xi, yi) X, Y = meshgrid(xi, yi) # create and customize plot if ax is None: ax = plt.gca() chart = ax.pcolormesh(X, Y, Z, cmap=cmap) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) ax.grid(grid_on) ax.set_aspect('equal') if colorbar: plt.colorbar(chart) return ax
[ "def", "chart_plot", "(", "self", ",", "ax", "=", "None", ",", "cmap", "=", "'RdBu'", ",", "xlabel", "=", "'N'", ",", "ylabel", "=", "'Z'", ",", "grid_on", "=", "True", ",", "colorbar", "=", "True", ")", ":", "from", "matplotlib", ".", "mlab", "imp...
Plot a nuclear chart with (N,Z) as axis and the values of the Table as a color scale Parameters ---------- ax: optional matplotlib axes defaults to current axes cmap: a matplotlib colormap default: 'RdBu' xlabel: string representing the label of the x axis default: 'N' ylabel: string, default: 'Z' the label of the x axis grid_on: boolean, default: True, whether to draw the axes grid or not colorbar: boolean, default: True whether to draw a colorbar or not Returns ------- ax: a matplotlib axes object Example ------- Plot the theoretical deviation for the Möller's model:: >>> Table('FRDM95').error().chart_plot()
[ "Plot", "a", "nuclear", "chart", "with", "(", "N", "Z", ")", "as", "axis", "and", "the", "values", "of", "the", "Table", "as", "a", "color", "scale" ]
python
test
apple/turicreate
src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_feature_engineering/_autovectorizer.py#L224-L241
def short_text__str(self, column_name, output_column_prefix): """ Transforms short text into a dictionary of TFIDF-weighted 3-gram character counts. """ from ._ngram_counter import NGramCounter from ._tfidf import TFIDF return [NGramCounter(features=[column_name], n = 3, method = "character", output_column_prefix = output_column_prefix), TFIDF(features=[column_name], min_document_frequency=0.01, max_document_frequency=0.5, output_column_prefix = output_column_prefix)]
[ "def", "short_text__str", "(", "self", ",", "column_name", ",", "output_column_prefix", ")", ":", "from", ".", "_ngram_counter", "import", "NGramCounter", "from", ".", "_tfidf", "import", "TFIDF", "return", "[", "NGramCounter", "(", "features", "=", "[", "column...
Transforms short text into a dictionary of TFIDF-weighted 3-gram character counts.
[ "Transforms", "short", "text", "into", "a", "dictionary", "of", "TFIDF", "-", "weighted", "3", "-", "gram", "character", "counts", "." ]
python
train
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L3849-L3861
def xpathNextNamespace(self, ctxt): """Traversal function for the "namespace" direction the namespace axis contains the namespace nodes of the context node; the order of nodes on this axis is implementation-defined; the axis will be empty unless the context node is an element We keep the XML namespace node at the end of the list. """ if ctxt is None: ctxt__o = None else: ctxt__o = ctxt._o ret = libxml2mod.xmlXPathNextNamespace(ctxt__o, self._o) if ret is None:raise xpathError('xmlXPathNextNamespace() failed') __tmp = xmlNode(_obj=ret) return __tmp
[ "def", "xpathNextNamespace", "(", "self", ",", "ctxt", ")", ":", "if", "ctxt", "is", "None", ":", "ctxt__o", "=", "None", "else", ":", "ctxt__o", "=", "ctxt", ".", "_o", "ret", "=", "libxml2mod", ".", "xmlXPathNextNamespace", "(", "ctxt__o", ",", "self",...
Traversal function for the "namespace" direction the namespace axis contains the namespace nodes of the context node; the order of nodes on this axis is implementation-defined; the axis will be empty unless the context node is an element We keep the XML namespace node at the end of the list.
[ "Traversal", "function", "for", "the", "namespace", "direction", "the", "namespace", "axis", "contains", "the", "namespace", "nodes", "of", "the", "context", "node", ";", "the", "order", "of", "nodes", "on", "this", "axis", "is", "implementation", "-", "define...
python
train
lrq3000/pyFileFixity
pyFileFixity/lib/eccman.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/eccman.py#L239-L247
def check(self, message, ecc, k=None): '''Check if there's any error in a message+ecc. Can be used before decoding, in addition to hashes to detect if the message was tampered, or after decoding to check that the message was fully recovered.''' if not k: k = self.k message, _ = self.pad(message, k=k) ecc, _ = self.rpad(ecc, k=k) if self.algo == 1 or self.algo == 2: return self.ecc_manager.check_fast(message + ecc, k=k) elif self.algo == 3 or self.algo == 4: return reedsolo.rs_check(bytearray(message + ecc), self.n-k, fcr=self.fcr, generator=self.gen_nb)
[ "def", "check", "(", "self", ",", "message", ",", "ecc", ",", "k", "=", "None", ")", ":", "if", "not", "k", ":", "k", "=", "self", ".", "k", "message", ",", "_", "=", "self", ".", "pad", "(", "message", ",", "k", "=", "k", ")", "ecc", ",", ...
Check if there's any error in a message+ecc. Can be used before decoding, in addition to hashes to detect if the message was tampered, or after decoding to check that the message was fully recovered.
[ "Check", "if", "there", "s", "any", "error", "in", "a", "message", "+", "ecc", ".", "Can", "be", "used", "before", "decoding", "in", "addition", "to", "hashes", "to", "detect", "if", "the", "message", "was", "tampered", "or", "after", "decoding", "to", ...
python
train
SheffieldML/GPy
GPy/kern/src/standard_periodic.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/kern/src/standard_periodic.py#L137-L141
def Kdiag(self, X): """Compute the diagonal of the covariance matrix associated to X.""" ret = np.empty(X.shape[0]) ret[:] = self.variance return ret
[ "def", "Kdiag", "(", "self", ",", "X", ")", ":", "ret", "=", "np", ".", "empty", "(", "X", ".", "shape", "[", "0", "]", ")", "ret", "[", ":", "]", "=", "self", ".", "variance", "return", "ret" ]
Compute the diagonal of the covariance matrix associated to X.
[ "Compute", "the", "diagonal", "of", "the", "covariance", "matrix", "associated", "to", "X", "." ]
python
train
mjirik/io3d
io3d/datareaderqt.py
https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/datareaderqt.py#L95-L141
def __get_datafile(self, app=False): """ Draw a dialog for directory selection. """ if self.cache is not None: cache_loadfiledir = self.cache.get_or_none('loadfiledir') self.loadfiledir = str(cache_loadfiledir) if self.loadfiledir is None: self.loadfiledir = '' directory = str(self.loadfiledir) from PyQt4.QtGui import QFileDialog if not app: inner_app = QApplication(sys.argv) if self._skip_get_path_dialog_for_tests: dcmdir = self.datapath else: dcmdir = QFileDialog.getOpenFileName( caption='Select Data File', directory=directory # ptions=QFileDialog.ShowDirsOnly, ) # dcmdir = QFileDialog.getOpenFileName( # caption='Select Data file', # # ptions=QFileDialog.ShowDirsOnly, # directory=directory # ) # pp.exec_() if not app: inner_app.exit(0) dcmdir = get_str(dcmdir) if len(dcmdir) > 0: # # dcmdir = "%s" % (dcmdir) # dcmdir = dcmdir.encode("utf8") pass else: dcmdir = None head, teil = os.path.split(dcmdir) if self.cache is not None: self.cache.update('loadfiledir', head) return dcmdir
[ "def", "__get_datafile", "(", "self", ",", "app", "=", "False", ")", ":", "if", "self", ".", "cache", "is", "not", "None", ":", "cache_loadfiledir", "=", "self", ".", "cache", ".", "get_or_none", "(", "'loadfiledir'", ")", "self", ".", "loadfiledir", "="...
Draw a dialog for directory selection.
[ "Draw", "a", "dialog", "for", "directory", "selection", "." ]
python
train
Terrance/SkPy
skpy/conn.py
https://github.com/Terrance/SkPy/blob/0f9489c94e8ec4d3effab4314497428872a80ad1/skpy/conn.py#L411-L416
def getUserId(self): """ Ask Skype for the authenticated user's identifier, and store it on the connection object. """ self.userId = self("GET", "{0}/users/self/profile".format(self.API_USER), auth=self.Auth.SkypeToken).json().get("username")
[ "def", "getUserId", "(", "self", ")", ":", "self", ".", "userId", "=", "self", "(", "\"GET\"", ",", "\"{0}/users/self/profile\"", ".", "format", "(", "self", ".", "API_USER", ")", ",", "auth", "=", "self", ".", "Auth", ".", "SkypeToken", ")", ".", "jso...
Ask Skype for the authenticated user's identifier, and store it on the connection object.
[ "Ask", "Skype", "for", "the", "authenticated", "user", "s", "identifier", "and", "store", "it", "on", "the", "connection", "object", "." ]
python
test