repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
pypa/pipenv
pipenv/vendor/requests/utils.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/requests/utils.py#L589-L608
def requote_uri(uri): """Re-quote the given URI. This function passes the given URI through an unquote/quote cycle to ensure that it is fully and consistently quoted. :rtype: str """ safe_with_percent = "!#$%&'()*+,/:;=?@[]~" safe_without_percent = "!#$&'()*+,/:;=?@[]~" try: # Unquote only the unreserved characters # Then quote only illegal characters (do not quote reserved, # unreserved, or '%') return quote(unquote_unreserved(uri), safe=safe_with_percent) except InvalidURL: # We couldn't unquote the given URI, so let's try quoting it, but # there may be unquoted '%'s in the URI. We need to make sure they're # properly quoted so they do not cause issues elsewhere. return quote(uri, safe=safe_without_percent)
[ "def", "requote_uri", "(", "uri", ")", ":", "safe_with_percent", "=", "\"!#$%&'()*+,/:;=?@[]~\"", "safe_without_percent", "=", "\"!#$&'()*+,/:;=?@[]~\"", "try", ":", "# Unquote only the unreserved characters", "# Then quote only illegal characters (do not quote reserved,", "# unreser...
Re-quote the given URI. This function passes the given URI through an unquote/quote cycle to ensure that it is fully and consistently quoted. :rtype: str
[ "Re", "-", "quote", "the", "given", "URI", "." ]
python
train
saltstack/salt
salt/modules/lxc.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/lxc.py#L3227-L3251
def systemd_running_state(name, path=None): ''' Get the operational state of a systemd based container path path to the container parent default: /var/lib/lxc (system default) .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt myminion lxc.systemd_running_state ubuntu ''' try: ret = run_all(name, 'systemctl is-system-running', path=path, ignore_retcode=True)['stdout'] except CommandExecutionError: ret = '' return ret
[ "def", "systemd_running_state", "(", "name", ",", "path", "=", "None", ")", ":", "try", ":", "ret", "=", "run_all", "(", "name", ",", "'systemctl is-system-running'", ",", "path", "=", "path", ",", "ignore_retcode", "=", "True", ")", "[", "'stdout'", "]", ...
Get the operational state of a systemd based container path path to the container parent default: /var/lib/lxc (system default) .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt myminion lxc.systemd_running_state ubuntu
[ "Get", "the", "operational", "state", "of", "a", "systemd", "based", "container" ]
python
train
reingart/gui2py
gui/resource.py
https://github.com/reingart/gui2py/blob/aca0a05f6fcde55c94ad7cc058671a06608b01a4/gui/resource.py#L126-L155
def build_component(res, parent=None): "Create a gui2py control based on the python resource" # control specs (parameters) kwargs = dict(res.items()) comtype = kwargs.pop('type') if 'components' in res: components = kwargs.pop('components') elif comtype == 'Menu' and 'items' in res: components = kwargs.pop('items') else: components = [] from gui import registry if comtype in registry.CONTROLS: comclass = registry.CONTROLS[comtype] elif comtype in registry.MENU: comclass = registry.MENU[comtype] elif comtype in registry.MISC: comclass = registry.MISC[comtype] else: raise RuntimeError("%s not in registry" % comtype) # Instantiate the GUI object com = comclass(parent=parent, **kwargs) for comp in components: build_component(comp, parent=com) return com
[ "def", "build_component", "(", "res", ",", "parent", "=", "None", ")", ":", "# control specs (parameters)", "kwargs", "=", "dict", "(", "res", ".", "items", "(", ")", ")", "comtype", "=", "kwargs", ".", "pop", "(", "'type'", ")", "if", "'components'", "i...
Create a gui2py control based on the python resource
[ "Create", "a", "gui2py", "control", "based", "on", "the", "python", "resource" ]
python
test
juicer/juicer
juicer/common/Repo.py
https://github.com/juicer/juicer/blob/0c9f0fd59e293d45df6b46e81f675d33221c600d/juicer/common/Repo.py#L113-L121
def to_juicer_repo(self): """Returns a JuicerRepo() object representing this pulp repo""" repo_def = {} defaults = juicer.common.Constants.REPO_DEF_DEFAULTS repo_def['name'] = self['name'] for key in juicer.common.Constants.REPO_DEF_OPT_KEYS: repo_def[key] = self.spec.get(key, defaults[key]) juicer.utils.Log.log_debug("Defined %s as %s" % (key, str(self[key]))) return JuicerRepo(None, repo_def=repo_def)
[ "def", "to_juicer_repo", "(", "self", ")", ":", "repo_def", "=", "{", "}", "defaults", "=", "juicer", ".", "common", ".", "Constants", ".", "REPO_DEF_DEFAULTS", "repo_def", "[", "'name'", "]", "=", "self", "[", "'name'", "]", "for", "key", "in", "juicer"...
Returns a JuicerRepo() object representing this pulp repo
[ "Returns", "a", "JuicerRepo", "()", "object", "representing", "this", "pulp", "repo" ]
python
train
nerox8664/pytorch2keras
pytorch2keras/operation_layers.py
https://github.com/nerox8664/pytorch2keras/blob/750eaf747323580e6732d0c5ba9f2f39cb096764/pytorch2keras/operation_layers.py#L60-L89
def convert_concat(params, w_name, scope_name, inputs, layers, weights, names): """ Convert concatenation. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers """ print('Converting concat ...') concat_nodes = [layers[i] for i in inputs] if len(concat_nodes) == 1: # no-op layers[scope_name] = concat_nodes[0] return if names == 'short': tf_name = 'CAT' + random_string(5) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) cat = keras.layers.Concatenate(name=tf_name, axis=params['axis']) layers[scope_name] = cat(concat_nodes)
[ "def", "convert_concat", "(", "params", ",", "w_name", ",", "scope_name", ",", "inputs", ",", "layers", ",", "weights", ",", "names", ")", ":", "print", "(", "'Converting concat ...'", ")", "concat_nodes", "=", "[", "layers", "[", "i", "]", "for", "i", "...
Convert concatenation. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
[ "Convert", "concatenation", "." ]
python
valid
saltstack/salt
salt/states/rabbitmq_user.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/rabbitmq_user.py#L232-L272
def absent(name, runas=None): ''' Ensure the named user is absent name The name of the user to remove runas User to run the command ''' ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} try: user_exists = __salt__['rabbitmq.user_exists'](name, runas=runas) except CommandExecutionError as err: ret['comment'] = 'Error: {0}'.format(err) return ret if user_exists: if not __opts__['test']: try: __salt__['rabbitmq.delete_user'](name, runas=runas) except CommandExecutionError as err: ret['comment'] = 'Error: {0}'.format(err) return ret ret['changes'].update({'name': {'old': name, 'new': ''}}) else: ret['result'] = True ret['comment'] = 'The user \'{0}\' is not present.'.format(name) return ret if __opts__['test'] and ret['changes']: ret['result'] = None ret['comment'] = 'The user \'{0}\' will be removed.'.format(name) return ret ret['result'] = True ret['comment'] = 'The user \'{0}\' was removed.'.format(name) return ret
[ "def", "absent", "(", "name", ",", "runas", "=", "None", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "False", ",", "'comment'", ":", "''", ",", "'changes'", ":", "{", "}", "}", "try", ":", "user_exists", "=", "__salt__",...
Ensure the named user is absent name The name of the user to remove runas User to run the command
[ "Ensure", "the", "named", "user", "is", "absent" ]
python
train
oasis-open/cti-stix-validator
stix2validator/v21/shoulds.py
https://github.com/oasis-open/cti-stix-validator/blob/a607014e3fa500a7678f8b61b278456ca581f9d0/stix2validator/v21/shoulds.py#L733-L741
def network_traffic_ports(instance): """Ensure network-traffic objects contain both src_port and dst_port. """ for key, obj in instance['objects'].items(): if ('type' in obj and obj['type'] == 'network-traffic' and ('src_port' not in obj or 'dst_port' not in obj)): yield JSONError("The Network Traffic object '%s' should contain " "both the 'src_port' and 'dst_port' properties." % key, instance['id'], 'network-traffic-ports')
[ "def", "network_traffic_ports", "(", "instance", ")", ":", "for", "key", ",", "obj", "in", "instance", "[", "'objects'", "]", ".", "items", "(", ")", ":", "if", "(", "'type'", "in", "obj", "and", "obj", "[", "'type'", "]", "==", "'network-traffic'", "a...
Ensure network-traffic objects contain both src_port and dst_port.
[ "Ensure", "network", "-", "traffic", "objects", "contain", "both", "src_port", "and", "dst_port", "." ]
python
train
saltstack/salt
salt/key.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/key.py#L750-L768
def delete_all(self): ''' Delete all keys ''' for status, keys in six.iteritems(self.list_keys()): for key in keys: try: os.remove(os.path.join(self.opts['pki_dir'], status, key)) eload = {'result': True, 'act': 'delete', 'id': key} self.event.fire_event(eload, salt.utils.event.tagify(prefix='key')) except (OSError, IOError): pass self.check_minion_cache() if self.opts.get('rotate_aes_key'): salt.crypt.dropfile(self.opts['cachedir'], self.opts['user']) return self.list_keys()
[ "def", "delete_all", "(", "self", ")", ":", "for", "status", ",", "keys", "in", "six", ".", "iteritems", "(", "self", ".", "list_keys", "(", ")", ")", ":", "for", "key", "in", "keys", ":", "try", ":", "os", ".", "remove", "(", "os", ".", "path", ...
Delete all keys
[ "Delete", "all", "keys" ]
python
train
stevearc/dynamo3
dynamo3/connection.py
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/connection.py#L378-L436
def create_table(self, tablename, hash_key, range_key=None, indexes=None, global_indexes=None, throughput=None, wait=False): """ Create a table Parameters ---------- tablename : str Name of the table hash_key : :class:`~dynamo3.fields.DynamoKey` The key to use as the Hash key range_key : :class:`~dynamo3.fields.DynamoKey`, optional The key to use as the Range key indexes : list, optional List of :class:`~dynamo3.fields.LocalIndex` global_indexes : list, optional List of :class:`~dynamo3.fields.GlobalIndex` throughput : :class:`~dynamo3.fields.Throughput`, optional The throughput of the table """ if throughput is None: throughput = Throughput() all_attrs = set([hash_key]) if range_key is not None: all_attrs.add(range_key) key_schema = [hash_key.hash_schema()] if range_key is not None: key_schema.append(range_key.range_schema()) kwargs = { 'TableName': tablename, 'KeySchema': key_schema, 'ProvisionedThroughput': throughput.schema(), } if indexes: kwargs['LocalSecondaryIndexes'] = [ idx.schema(hash_key) for idx in indexes ] for idx in indexes: all_attrs.add(idx.range_key) if global_indexes: kwargs['GlobalSecondaryIndexes'] = [ idx.schema() for idx in global_indexes ] for idx in global_indexes: all_attrs.add(idx.hash_key) if idx.range_key is not None: all_attrs.add(idx.range_key) kwargs['AttributeDefinitions'] = [attr.definition() for attr in all_attrs] result = self.call('create_table', **kwargs) if wait: self.client.get_waiter('table_exists').wait( TableName=tablename ) return result
[ "def", "create_table", "(", "self", ",", "tablename", ",", "hash_key", ",", "range_key", "=", "None", ",", "indexes", "=", "None", ",", "global_indexes", "=", "None", ",", "throughput", "=", "None", ",", "wait", "=", "False", ")", ":", "if", "throughput"...
Create a table Parameters ---------- tablename : str Name of the table hash_key : :class:`~dynamo3.fields.DynamoKey` The key to use as the Hash key range_key : :class:`~dynamo3.fields.DynamoKey`, optional The key to use as the Range key indexes : list, optional List of :class:`~dynamo3.fields.LocalIndex` global_indexes : list, optional List of :class:`~dynamo3.fields.GlobalIndex` throughput : :class:`~dynamo3.fields.Throughput`, optional The throughput of the table
[ "Create", "a", "table" ]
python
train
signetlabdei/sem
sem/database.py
https://github.com/signetlabdei/sem/blob/5077dd7a6d15644a18790bb6fde320e905f0fef0/sem/database.py#L329-L377
def get_complete_results(self, params=None, result_id=None): """ Return available results, analogously to what get_results does, but also read the corresponding output files for each result, and incorporate them in the result dictionary under the output key, as a dictionary of filename: file_contents. Args: params (dict): parameter specification of the desired parameter values, as described in the get_results documentation. In other words, results returned by this function will be in the form:: { 'params': { 'param1': value1, 'param2': value2, ... 'RngRun': value3 }, 'meta': { 'elapsed_time': value4, 'id': value5 } 'output': { 'stdout': stdout_as_string, 'stderr': stderr_as_string, 'file1': file1_as_string, ... } } Note that the stdout and stderr entries are always included, even if they are empty. """ if result_id is not None: results = deepcopy(self.get_results(result_id=result_id)) else: results = deepcopy(self.get_results(params)) for r in results: r['output'] = {} available_files = self.get_result_files(r['meta']['id']) for name, filepath in available_files.items(): with open(filepath, 'r') as file_contents: r['output'][name] = file_contents.read() return results
[ "def", "get_complete_results", "(", "self", ",", "params", "=", "None", ",", "result_id", "=", "None", ")", ":", "if", "result_id", "is", "not", "None", ":", "results", "=", "deepcopy", "(", "self", ".", "get_results", "(", "result_id", "=", "result_id", ...
Return available results, analogously to what get_results does, but also read the corresponding output files for each result, and incorporate them in the result dictionary under the output key, as a dictionary of filename: file_contents. Args: params (dict): parameter specification of the desired parameter values, as described in the get_results documentation. In other words, results returned by this function will be in the form:: { 'params': { 'param1': value1, 'param2': value2, ... 'RngRun': value3 }, 'meta': { 'elapsed_time': value4, 'id': value5 } 'output': { 'stdout': stdout_as_string, 'stderr': stderr_as_string, 'file1': file1_as_string, ... } } Note that the stdout and stderr entries are always included, even if they are empty.
[ "Return", "available", "results", "analogously", "to", "what", "get_results", "does", "but", "also", "read", "the", "corresponding", "output", "files", "for", "each", "result", "and", "incorporate", "them", "in", "the", "result", "dictionary", "under", "the", "o...
python
train
pybel/pybel-tools
src/pybel_tools/summary/edge_summary.py
https://github.com/pybel/pybel-tools/blob/3491adea0ac4ee60f57275ef72f9b73da6dbfe0c/src/pybel_tools/summary/edge_summary.py#L41-L46
def group_dict_set(iterator: Iterable[Tuple[A, B]]) -> Mapping[A, Set[B]]: """Make a dict that accumulates the values for each key in an iterator of doubles.""" d = defaultdict(set) for key, value in iterator: d[key].add(value) return dict(d)
[ "def", "group_dict_set", "(", "iterator", ":", "Iterable", "[", "Tuple", "[", "A", ",", "B", "]", "]", ")", "->", "Mapping", "[", "A", ",", "Set", "[", "B", "]", "]", ":", "d", "=", "defaultdict", "(", "set", ")", "for", "key", ",", "value", "i...
Make a dict that accumulates the values for each key in an iterator of doubles.
[ "Make", "a", "dict", "that", "accumulates", "the", "values", "for", "each", "key", "in", "an", "iterator", "of", "doubles", "." ]
python
valid
GNS3/gns3-server
gns3server/compute/dynamips/nodes/c7200.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/dynamips/nodes/c7200.py#L171-L196
def set_sensors(self, sensors): """ Sets the 4 sensors with temperature in degree Celcius. :param sensors: list of 4 sensor temperatures corresponding to sensor 1 = I/0 controller inlet sensor 2 = I/0 controller outlet sensor 3 = NPE inlet sensor 4 = NPE outlet Example: [22, 22, 22, 22] """ sensor_id = 0 for sensor in sensors: yield from self._hypervisor.send('c7200 set_temp_sensor "{name}" {sensor_id} {temp}'.format(name=self._name, sensor_id=sensor_id, temp=sensor)) log.info('Router "{name}" [{id}]: sensor {sensor_id} temperature updated from {old_temp}C to {new_temp}C'.format(name=self._name, id=self._id, sensor_id=sensor_id, old_temp=self._sensors[sensor_id], new_temp=sensors[sensor_id])) sensor_id += 1 self._sensors = sensors
[ "def", "set_sensors", "(", "self", ",", "sensors", ")", ":", "sensor_id", "=", "0", "for", "sensor", "in", "sensors", ":", "yield", "from", "self", ".", "_hypervisor", ".", "send", "(", "'c7200 set_temp_sensor \"{name}\" {sensor_id} {temp}'", ".", "format", "(",...
Sets the 4 sensors with temperature in degree Celcius. :param sensors: list of 4 sensor temperatures corresponding to sensor 1 = I/0 controller inlet sensor 2 = I/0 controller outlet sensor 3 = NPE inlet sensor 4 = NPE outlet Example: [22, 22, 22, 22]
[ "Sets", "the", "4", "sensors", "with", "temperature", "in", "degree", "Celcius", "." ]
python
train
numenta/nupic
src/nupic/algorithms/backtracking_tm.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/backtracking_tm.py#L2190-L2298
def _learnBacktrack(self): """ This "backtracks" our learning state, trying to see if we can lock onto the current set of inputs by assuming the sequence started up to N steps ago on start cells. This will adjust @ref lrnActiveState['t'] if it does manage to lock on to a sequence that started earlier. :returns: >0 if we managed to lock on to a sequence that started earlier. The value returned is how many steps in the past we locked on. If 0 is returned, the caller needs to change active state to start on start cells. How it works: ------------------------------------------------------------------- This method gets called from updateLearningState when we detect either of the following two conditions: #. Our PAM counter (@ref pamCounter) expired #. We reached the max allowed learned sequence length Either of these two conditions indicate that we want to start over on start cells. Rather than start over on start cells on the current input, we can accelerate learning by backtracking a few steps ago and seeing if perhaps a sequence we already at least partially know already started. This updates/modifies: - @ref lrnActiveState['t'] This trashes: - @ref lrnActiveState['t-1'] - @ref lrnPredictedState['t'] - @ref lrnPredictedState['t-1'] """ # How much input history have we accumulated? # The current input is always at the end of self._prevInfPatterns (at # index -1), and is not a valid startingOffset to evaluate. numPrevPatterns = len(self._prevLrnPatterns) - 1 if numPrevPatterns <= 0: if self.verbosity >= 3: print "lrnBacktrack: No available history to backtrack from" return False # We will record which previous input patterns did not generate predictions # up to the current time step and remove all the ones at the head of the # input history queue so that we don't waste time evaluating them again at # a later time step. badPatterns = [] # Let's go back in time and replay the recent inputs from start cells and # see if we can lock onto this current set of inputs that way. # # Start the farthest back and work our way forward. For each starting point, # See if firing on start cells at that point would predict the current # input. # # We want to pick the point farthest in the past that has continuity # up to the current time step inSequence = False for startOffset in range(0, numPrevPatterns): # Can we backtrack from startOffset? inSequence = self._learnBacktrackFrom(startOffset, readOnly=True) # Done playing through the sequence from starting point startOffset # Break out as soon as we find a good path if inSequence: break # Take this bad starting point out of our input history so we don't # try it again later. badPatterns.append(startOffset) # If we failed to lock on at any starting point, return failure. The caller # will start over again on start cells if not inSequence: if self.verbosity >= 3: print ("Failed to lock on. Falling back to start cells on current " "time step.") # Nothing in our input history was a valid starting point, so get rid # of it so we don't try any of them again at a later iteration self._prevLrnPatterns = [] return False # We did find a valid starting point in the past. Now, we need to # re-enforce all segments that became active when following this path. if self.verbosity >= 3: print ("Discovered path to current input by using start cells from %d " "steps ago:" % (numPrevPatterns - startOffset), self._prevLrnPatterns[startOffset]) self._learnBacktrackFrom(startOffset, readOnly=False) # Remove any useless patterns at the head of the input pattern history # queue. for i in range(numPrevPatterns): if i in badPatterns or i <= startOffset: if self.verbosity >= 3: print ("Removing useless pattern from history:", self._prevLrnPatterns[0]) self._prevLrnPatterns.pop(0) else: break return numPrevPatterns - startOffset
[ "def", "_learnBacktrack", "(", "self", ")", ":", "# How much input history have we accumulated?", "# The current input is always at the end of self._prevInfPatterns (at", "# index -1), and is not a valid startingOffset to evaluate.", "numPrevPatterns", "=", "len", "(", "self", ".", "_p...
This "backtracks" our learning state, trying to see if we can lock onto the current set of inputs by assuming the sequence started up to N steps ago on start cells. This will adjust @ref lrnActiveState['t'] if it does manage to lock on to a sequence that started earlier. :returns: >0 if we managed to lock on to a sequence that started earlier. The value returned is how many steps in the past we locked on. If 0 is returned, the caller needs to change active state to start on start cells. How it works: ------------------------------------------------------------------- This method gets called from updateLearningState when we detect either of the following two conditions: #. Our PAM counter (@ref pamCounter) expired #. We reached the max allowed learned sequence length Either of these two conditions indicate that we want to start over on start cells. Rather than start over on start cells on the current input, we can accelerate learning by backtracking a few steps ago and seeing if perhaps a sequence we already at least partially know already started. This updates/modifies: - @ref lrnActiveState['t'] This trashes: - @ref lrnActiveState['t-1'] - @ref lrnPredictedState['t'] - @ref lrnPredictedState['t-1']
[ "This", "backtracks", "our", "learning", "state", "trying", "to", "see", "if", "we", "can", "lock", "onto", "the", "current", "set", "of", "inputs", "by", "assuming", "the", "sequence", "started", "up", "to", "N", "steps", "ago", "on", "start", "cells", ...
python
valid
tomnor/channelpack
channelpack/datautils.py
https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/datautils.py#L174-L192
def slicelist(b): """Produce a list of slices given the boolean array b. Start and stop in each slice describe the True sections in b.""" slicelst = [] started = False for i, e in enumerate(b): if e and not started: start = i started = True elif not e and started: slicelst.append(slice(start, i)) started = False if e: slicelst.append(slice(start, i + 1)) # True in the end. return slicelst
[ "def", "slicelist", "(", "b", ")", ":", "slicelst", "=", "[", "]", "started", "=", "False", "for", "i", ",", "e", "in", "enumerate", "(", "b", ")", ":", "if", "e", "and", "not", "started", ":", "start", "=", "i", "started", "=", "True", "elif", ...
Produce a list of slices given the boolean array b. Start and stop in each slice describe the True sections in b.
[ "Produce", "a", "list", "of", "slices", "given", "the", "boolean", "array", "b", "." ]
python
train
manrajgrover/halo
halo/halo.py
https://github.com/manrajgrover/halo/blob/0ac5149dea965b27b09f0776df9095ebf013fb4d/halo/halo.py#L359-L379
def frame(self): """Builds and returns the frame to be rendered Returns ------- self """ frames = self._spinner['frames'] frame = frames[self._frame_index] if self._color: frame = colored_frame(frame, self._color) self._frame_index += 1 self._frame_index = self._frame_index % len(frames) text_frame = self.text_frame() return u'{0} {1}'.format(*[ (text_frame, frame) if self._placement == 'right' else (frame, text_frame) ][0])
[ "def", "frame", "(", "self", ")", ":", "frames", "=", "self", ".", "_spinner", "[", "'frames'", "]", "frame", "=", "frames", "[", "self", ".", "_frame_index", "]", "if", "self", ".", "_color", ":", "frame", "=", "colored_frame", "(", "frame", ",", "s...
Builds and returns the frame to be rendered Returns ------- self
[ "Builds", "and", "returns", "the", "frame", "to", "be", "rendered", "Returns", "-------", "self" ]
python
train
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/api/core_v1_api.py
https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/core_v1_api.py#L6391-L6416
def create_namespaced_pod_eviction(self, name, namespace, body, **kwargs): # noqa: E501 """create_namespaced_pod_eviction # noqa: E501 create eviction of a Pod # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_pod_eviction(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Eviction (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1beta1Eviction body: (required) :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param bool include_uninitialized: If IncludeUninitialized is specified, the object may be returned without completing initialization. :param str pretty: If 'true', then the output is pretty printed. :return: V1beta1Eviction If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_namespaced_pod_eviction_with_http_info(name, namespace, body, **kwargs) # noqa: E501 else: (data) = self.create_namespaced_pod_eviction_with_http_info(name, namespace, body, **kwargs) # noqa: E501 return data
[ "def", "create_namespaced_pod_eviction", "(", "self", ",", "name", ",", "namespace", ",", "body", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ...
create_namespaced_pod_eviction # noqa: E501 create eviction of a Pod # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_pod_eviction(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Eviction (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1beta1Eviction body: (required) :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param bool include_uninitialized: If IncludeUninitialized is specified, the object may be returned without completing initialization. :param str pretty: If 'true', then the output is pretty printed. :return: V1beta1Eviction If the method is called asynchronously, returns the request thread.
[ "create_namespaced_pod_eviction", "#", "noqa", ":", "E501" ]
python
train
mitsei/dlkit
dlkit/json_/resource/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/resource/sessions.py#L3012-L3031
def is_child_of_bin(self, id_, bin_id): """Tests if a bin is a direct child of another. arg: id (osid.id.Id): an ``Id`` arg: bin_id (osid.id.Id): the ``Id`` of a bin return: (boolean) - ``true`` if the ``id`` is a child of ``bin_id,`` ``false`` otherwise raise: NotFound - ``bin_id`` is not found raise: NullArgument - ``id`` or ``bin_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` not found return ``false``. """ # Implemented from template for # osid.resource.BinHierarchySession.is_child_of_bin if self._catalog_session is not None: return self._catalog_session.is_child_of_catalog(id_=id_, catalog_id=bin_id) return self._hierarchy_session.is_child(id_=bin_id, child_id=id_)
[ "def", "is_child_of_bin", "(", "self", ",", "id_", ",", "bin_id", ")", ":", "# Implemented from template for", "# osid.resource.BinHierarchySession.is_child_of_bin", "if", "self", ".", "_catalog_session", "is", "not", "None", ":", "return", "self", ".", "_catalog_sessio...
Tests if a bin is a direct child of another. arg: id (osid.id.Id): an ``Id`` arg: bin_id (osid.id.Id): the ``Id`` of a bin return: (boolean) - ``true`` if the ``id`` is a child of ``bin_id,`` ``false`` otherwise raise: NotFound - ``bin_id`` is not found raise: NullArgument - ``id`` or ``bin_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` not found return ``false``.
[ "Tests", "if", "a", "bin", "is", "a", "direct", "child", "of", "another", "." ]
python
train
anjianshi/flask-restful-extend
flask_restful_extend/model_reqparse.py
https://github.com/anjianshi/flask-restful-extend/blob/cc168729bf341d4f9c0f6938be30463acbf770f1/flask_restful_extend/model_reqparse.py#L18-L61
def make_request_parser(model_or_inst, excludes=None, only=None, for_populate=False): """Pass a `model class` or `model instance` to this function, then, it will generate a `RequestParser` that extract user request data from `request.json` according to the model class's definition. Parameter `excludes` and `only` can be `str` or list of `str`, then are used to specify which columns should be handled. If you passed `excludes` and `only` at same time, only `excludes` will be used. And, the primary key of the model will not be added to `RequestParser`'s argument list, unless you explicitly specify it use `only` parameter. If you pass in a model class, but not a model instance, the function will doing `required` checking, for columns that nullable=False. (If you pass in a model instance, the `required` checking will not proceed. Because in this situation, we should allow the user to ignore the assignment to a field) """ is_inst = _is_inst(model_or_inst) if isinstance(excludes, six.string_types): excludes = [excludes] if excludes and only: only = None elif isinstance(only, six.string_types): only = [only] parser = RequestPopulator() if for_populate else reqparse.RequestParser() for col in model_or_inst.__table__.columns: if only: if col.name not in only: continue elif (excludes and col.name in excludes) or col.primary_key: continue col_type = col.type.python_type kwargs = { "type": _type_dict.get(col_type.__name__, col_type) if hasattr(col_type, '__name__') else col_type } # When the context was to creating a new model instance, if a field has no default value, and is not nullable, # mark it's corresponding argument as `required`. # 创建新数据库实例时,若一个字段既没有默认值,又不允许 NULL,则把它对应 arg 设为 required if not is_inst and col.default is None and col.server_default is None and not col.nullable: kwargs["required"] = True parser.add_argument(col.name, **kwargs) return parser
[ "def", "make_request_parser", "(", "model_or_inst", ",", "excludes", "=", "None", ",", "only", "=", "None", ",", "for_populate", "=", "False", ")", ":", "is_inst", "=", "_is_inst", "(", "model_or_inst", ")", "if", "isinstance", "(", "excludes", ",", "six", ...
Pass a `model class` or `model instance` to this function, then, it will generate a `RequestParser` that extract user request data from `request.json` according to the model class's definition. Parameter `excludes` and `only` can be `str` or list of `str`, then are used to specify which columns should be handled. If you passed `excludes` and `only` at same time, only `excludes` will be used. And, the primary key of the model will not be added to `RequestParser`'s argument list, unless you explicitly specify it use `only` parameter. If you pass in a model class, but not a model instance, the function will doing `required` checking, for columns that nullable=False. (If you pass in a model instance, the `required` checking will not proceed. Because in this situation, we should allow the user to ignore the assignment to a field)
[ "Pass", "a", "model", "class", "or", "model", "instance", "to", "this", "function", "then", "it", "will", "generate", "a", "RequestParser", "that", "extract", "user", "request", "data", "from", "request", ".", "json", "according", "to", "the", "model", "clas...
python
train
Shapeways/coyote_framework
coyote_framework/webdriver/webdriverwrapper/WebDriverWrapper.py
https://github.com/Shapeways/coyote_framework/blob/cb29899b984a21d56bf65d0b1d907073948fe16c/coyote_framework/webdriver/webdriverwrapper/WebDriverWrapper.py#L418-L446
def find_by_dynamic_locator(self, template_locator, variables, find_all=False, search_object=None): ''' Find with dynamic locator @type template_locator: webdriverwrapper.support.locator.Locator @param template_locator: Template locator w/ formatting bits to insert @type variables: dict @param variables: Dictionary of variable substitutions @type find_all: bool @param find_all: True to find all elements immediately, False for find single element only @type search_object: webdriverwrapper.WebElementWrapper @param search_object: Optional WebElement to start search with. If null, search will be on self.driver @rtype: webdriverwrapper.WebElementWrapper or list() @return: Single WebElemetnWrapper if find_all is False, list of WebElementWrappers if find_all is True ''' template_variable_character = '%' # raise an exception if user passed non-dictionary variables if not isinstance(variables, dict): raise TypeError('You must use a dictionary to populate locator variables') # replace all variables that match the keys in 'variables' dict locator = "" for key in variables.keys(): locator = template_locator.replace(template_variable_character + key, variables[key]) return self.find(locator, find_all, search_object)
[ "def", "find_by_dynamic_locator", "(", "self", ",", "template_locator", ",", "variables", ",", "find_all", "=", "False", ",", "search_object", "=", "None", ")", ":", "template_variable_character", "=", "'%'", "# raise an exception if user passed non-dictionary variables", ...
Find with dynamic locator @type template_locator: webdriverwrapper.support.locator.Locator @param template_locator: Template locator w/ formatting bits to insert @type variables: dict @param variables: Dictionary of variable substitutions @type find_all: bool @param find_all: True to find all elements immediately, False for find single element only @type search_object: webdriverwrapper.WebElementWrapper @param search_object: Optional WebElement to start search with. If null, search will be on self.driver @rtype: webdriverwrapper.WebElementWrapper or list() @return: Single WebElemetnWrapper if find_all is False, list of WebElementWrappers if find_all is True
[ "Find", "with", "dynamic", "locator" ]
python
train
aodag/WebDispatch
webdispatch/urldispatcher.py
https://github.com/aodag/WebDispatch/blob/55f8658a2b4100498e098a80303a346c3940f1bc/webdispatch/urldispatcher.py#L65-L67
def make_full_qualified_url(self, path: str) -> str: """ append application url to path""" return self.application_uri.rstrip('/') + '/' + path.lstrip('/')
[ "def", "make_full_qualified_url", "(", "self", ",", "path", ":", "str", ")", "->", "str", ":", "return", "self", ".", "application_uri", ".", "rstrip", "(", "'/'", ")", "+", "'/'", "+", "path", ".", "lstrip", "(", "'/'", ")" ]
append application url to path
[ "append", "application", "url", "to", "path" ]
python
train
casebeer/python-hkdf
hkdf.py
https://github.com/casebeer/python-hkdf/blob/cc3c9dbf0a271b27a7ac5cd04cc1485bbc3b4307/hkdf.py#L10-L25
def hkdf_extract(salt, input_key_material, hash=hashlib.sha512): ''' Extract a pseudorandom key suitable for use with hkdf_expand from the input_key_material and a salt using HMAC with the provided hash (default SHA-512). salt should be a random, application-specific byte string. If salt is None or the empty string, an all-zeros string of the same length as the hash's block size will be used instead per the RFC. See the HKDF draft RFC and paper for usage notes. ''' hash_len = hash().digest_size if salt == None or len(salt) == 0: salt = bytearray((0,) * hash_len) return hmac.new(bytes(salt), buffer(input_key_material), hash).digest()
[ "def", "hkdf_extract", "(", "salt", ",", "input_key_material", ",", "hash", "=", "hashlib", ".", "sha512", ")", ":", "hash_len", "=", "hash", "(", ")", ".", "digest_size", "if", "salt", "==", "None", "or", "len", "(", "salt", ")", "==", "0", ":", "sa...
Extract a pseudorandom key suitable for use with hkdf_expand from the input_key_material and a salt using HMAC with the provided hash (default SHA-512). salt should be a random, application-specific byte string. If salt is None or the empty string, an all-zeros string of the same length as the hash's block size will be used instead per the RFC. See the HKDF draft RFC and paper for usage notes.
[ "Extract", "a", "pseudorandom", "key", "suitable", "for", "use", "with", "hkdf_expand", "from", "the", "input_key_material", "and", "a", "salt", "using", "HMAC", "with", "the", "provided", "hash", "(", "default", "SHA", "-", "512", ")", "." ]
python
train
sorgerlab/indra
indra/sources/rlimsp/api.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/rlimsp/api.py#L62-L88
def process_from_json_file(filename, doc_id_type=None): """Process RLIMSP extractions from a bulk-download JSON file. Parameters ---------- filename : str Path to the JSON file. doc_id_type : Optional[str] In some cases the RLIMS-P paragraph info doesn't contain 'pmid' or 'pmcid' explicitly, instead if contains a 'docId' key. This parameter allows defining what ID type 'docId' sould be interpreted as. Its values should be 'pmid' or 'pmcid' or None if not used. Returns ------- :py:class:`indra.sources.rlimsp.processor.RlimspProcessor` An RlimspProcessor which contains a list of extracted INDRA Statements in its statements attribute. """ with open(filename, 'rt') as f: lines = f.readlines() json_list = [] for line in lines: json_list.append(json.loads(line)) rp = RlimspProcessor(json_list, doc_id_type=doc_id_type) rp.extract_statements() return rp
[ "def", "process_from_json_file", "(", "filename", ",", "doc_id_type", "=", "None", ")", ":", "with", "open", "(", "filename", ",", "'rt'", ")", "as", "f", ":", "lines", "=", "f", ".", "readlines", "(", ")", "json_list", "=", "[", "]", "for", "line", ...
Process RLIMSP extractions from a bulk-download JSON file. Parameters ---------- filename : str Path to the JSON file. doc_id_type : Optional[str] In some cases the RLIMS-P paragraph info doesn't contain 'pmid' or 'pmcid' explicitly, instead if contains a 'docId' key. This parameter allows defining what ID type 'docId' sould be interpreted as. Its values should be 'pmid' or 'pmcid' or None if not used. Returns ------- :py:class:`indra.sources.rlimsp.processor.RlimspProcessor` An RlimspProcessor which contains a list of extracted INDRA Statements in its statements attribute.
[ "Process", "RLIMSP", "extractions", "from", "a", "bulk", "-", "download", "JSON", "file", "." ]
python
train
tanghaibao/jcvi
jcvi/variation/phase.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/phase.py#L53-L74
def counts(args): """ %prog counts vcffile Collect allele counts from RO and AO fields. """ p = OptionParser(counts.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) vcffile, = args vcf_reader = vcf.Reader(open(vcffile)) for r in vcf_reader: v = CPRA(r) if not v.is_valid: continue for sample in r.samples: ro = sample["RO"] ao = sample["AO"] print("\t".join(str(x) for x in (v, ro, ao)))
[ "def", "counts", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "counts", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "n...
%prog counts vcffile Collect allele counts from RO and AO fields.
[ "%prog", "counts", "vcffile" ]
python
train
google/grr
grr/server/grr_response_server/aff4_objects/filestore.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/aff4_objects/filestore.py#L84-L114
def AddFile(self, fd, external=True): """Create a new file in the file store. We delegate the actual file addition to our contained implementations. Implementations can either implement the AddFile() method, returning a file like object which will be written on, or directly support the AddBlobToStore() method which can copy the VFSBlobImage efficiently. Args: fd: An AFF4 object open for read/write. external: If true, attempt to add files to stores defined as EXTERNAL. """ files_for_write = [] for sub_store in self.GetChildrenByPriority(allow_external=external): new_file = sub_store.AddFile(fd) if new_file: files_for_write.append(new_file) fd.Seek(0) while files_for_write: # If we got filehandles back, send them the data. data = fd.Read(self.CHUNK_SIZE) if not data: break for child in files_for_write: child.Write(data) for child in files_for_write: child.Close()
[ "def", "AddFile", "(", "self", ",", "fd", ",", "external", "=", "True", ")", ":", "files_for_write", "=", "[", "]", "for", "sub_store", "in", "self", ".", "GetChildrenByPriority", "(", "allow_external", "=", "external", ")", ":", "new_file", "=", "sub_stor...
Create a new file in the file store. We delegate the actual file addition to our contained implementations. Implementations can either implement the AddFile() method, returning a file like object which will be written on, or directly support the AddBlobToStore() method which can copy the VFSBlobImage efficiently. Args: fd: An AFF4 object open for read/write. external: If true, attempt to add files to stores defined as EXTERNAL.
[ "Create", "a", "new", "file", "in", "the", "file", "store", "." ]
python
train
cdgriffith/puremagic
puremagic/main.py
https://github.com/cdgriffith/puremagic/blob/ae2c4c400930b8a19519e787f61dd779db7e415b/puremagic/main.py#L171-L184
def from_string(string, mime=False, filename=None): """ Reads in string, attempts to identify content based off magic number and will return the file extension. If mime is True it will return the mime type instead. If filename is provided it will be used in the computation. :param string: string representation to check :param mime: Return mime, not extension :param filename: original filename :return: guessed extension or mime """ head, foot = _string_details(string) ext = ext_from_filename(filename) if filename else None return _magic(head, foot, mime, ext)
[ "def", "from_string", "(", "string", ",", "mime", "=", "False", ",", "filename", "=", "None", ")", ":", "head", ",", "foot", "=", "_string_details", "(", "string", ")", "ext", "=", "ext_from_filename", "(", "filename", ")", "if", "filename", "else", "Non...
Reads in string, attempts to identify content based off magic number and will return the file extension. If mime is True it will return the mime type instead. If filename is provided it will be used in the computation. :param string: string representation to check :param mime: Return mime, not extension :param filename: original filename :return: guessed extension or mime
[ "Reads", "in", "string", "attempts", "to", "identify", "content", "based", "off", "magic", "number", "and", "will", "return", "the", "file", "extension", ".", "If", "mime", "is", "True", "it", "will", "return", "the", "mime", "type", "instead", ".", "If", ...
python
train
metacloud/gilt
gilt/config.py
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/config.py#L145-L156
def _get_dst_dir(dst_dir): """ Prefix the provided string with working directory and return a str. :param dst_dir: A string to be prefixed with the working dir. :return: str """ wd = os.getcwd() _makedirs(dst_dir) return os.path.join(wd, dst_dir)
[ "def", "_get_dst_dir", "(", "dst_dir", ")", ":", "wd", "=", "os", ".", "getcwd", "(", ")", "_makedirs", "(", "dst_dir", ")", "return", "os", ".", "path", ".", "join", "(", "wd", ",", "dst_dir", ")" ]
Prefix the provided string with working directory and return a str. :param dst_dir: A string to be prefixed with the working dir. :return: str
[ "Prefix", "the", "provided", "string", "with", "working", "directory", "and", "return", "a", "str", "." ]
python
train
orbingol/NURBS-Python
geomdl/fitting.py
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/fitting.py#L212-L355
def approximate_surface(points, size_u, size_v, degree_u, degree_v, **kwargs): """ Surface approximation using least squares method with fixed number of control points. This algorithm interpolates the corner control points and approximates the remaining control points. Please refer to Algorithm A9.7 of The NURBS Book (2nd Edition), pp.422-423 for details. Keyword Arguments: * ``centripetal``: activates centripetal parametrization method. *Default: False* * ``ctrlpts_size_u``: number of control points on the u-direction. *Default: size_u - 1* * ``ctrlpts_size_v``: number of control points on the v-direction. *Default: size_v - 1* :param points: data points :type points: list, tuple :param size_u: number of data points on the u-direction, :math:`r` :type size_u: int :param size_v: number of data points on the v-direction, :math:`s` :type size_v: int :param degree_u: degree of the output surface for the u-direction :type degree_u: int :param degree_v: degree of the output surface for the v-direction :type degree_v: int :return: approximated B-Spline surface :rtype: BSpline.Surface """ # Keyword arguments use_centripetal = kwargs.get('centripetal', False) num_cpts_u = kwargs.get('ctrlpts_size_u', size_u - 1) # number of datapts, r + 1 > number of ctrlpts, n + 1 num_cpts_v = kwargs.get('ctrlpts_size_v', size_v - 1) # number of datapts, s + 1 > number of ctrlpts, m + 1 # Dimension dim = len(points[0]) # Get uk and vl uk, vl = compute_params_surface(points, size_u, size_v, use_centripetal) # Compute knot vectors kv_u = compute_knot_vector2(degree_u, size_u, num_cpts_u, uk) kv_v = compute_knot_vector2(degree_v, size_v, num_cpts_v, vl) # Construct matrix Nu matrix_nu = [] for i in range(1, size_u - 1): m_temp = [] for j in range(1, num_cpts_u - 1): m_temp.append(helpers.basis_function_one(degree_u, kv_u, j, uk[i])) matrix_nu.append(m_temp) # Compute Nu transpose matrix_ntu = linalg.matrix_transpose(matrix_nu) # Compute NTNu matrix matrix_ntnu = linalg.matrix_multiply(matrix_ntu, matrix_nu) # Compute LU-decomposition of NTNu matrix matrix_ntnul, matrix_ntnuu = linalg.lu_decomposition(matrix_ntnu) # Fit u-direction ctrlpts_tmp = [[0.0 for _ in range(dim)] for _ in range(num_cpts_u * size_v)] for j in range(size_v): ctrlpts_tmp[j + (size_v * 0)] = list(points[j + (size_v * 0)]) ctrlpts_tmp[j + (size_v * (num_cpts_u - 1))] = list(points[j + (size_v * (size_u - 1))]) # Compute Rku - Eqn. 9.63 pt0 = points[j + (size_v * 0)] # Qzero ptm = points[j + (size_v * (size_u - 1))] # Qm rku = [] for i in range(1, size_u - 1): ptk = points[j + (size_v * i)] n0p = helpers.basis_function_one(degree_u, kv_u, 0, uk[i]) nnp = helpers.basis_function_one(degree_u, kv_u, num_cpts_u - 1, uk[i]) elem2 = [c * n0p for c in pt0] elem3 = [c * nnp for c in ptm] rku.append([a - b - c for a, b, c in zip(ptk, elem2, elem3)]) # Compute Ru - Eqn. 9.67 ru = [[0.0 for _ in range(dim)] for _ in range(num_cpts_u - 2)] for i in range(1, num_cpts_u - 1): ru_tmp = [] for idx, pt in enumerate(rku): ru_tmp.append([p * helpers.basis_function_one(degree_u, kv_u, i, uk[idx + 1]) for p in pt]) for d in range(dim): for idx in range(len(ru_tmp)): ru[i - 1][d] += ru_tmp[idx][d] # Get intermediate control points for d in range(dim): b = [pt[d] for pt in ru] y = linalg.forward_substitution(matrix_ntnul, b) x = linalg.backward_substitution(matrix_ntnuu, y) for i in range(1, num_cpts_u - 1): ctrlpts_tmp[j + (size_v * i)][d] = x[i - 1] # Construct matrix Nv matrix_nv = [] for i in range(1, size_v - 1): m_temp = [] for j in range(1, num_cpts_v - 1): m_temp.append(helpers.basis_function_one(degree_v, kv_v, j, vl[i])) matrix_nv.append(m_temp) # Compute Nv transpose matrix_ntv = linalg.matrix_transpose(matrix_nv) # Compute NTNv matrix matrix_ntnv = linalg.matrix_multiply(matrix_ntv, matrix_nv) # Compute LU-decomposition of NTNv matrix matrix_ntnvl, matrix_ntnvu = linalg.lu_decomposition(matrix_ntnv) # Fit v-direction ctrlpts = [[0.0 for _ in range(dim)] for _ in range(num_cpts_u * num_cpts_v)] for i in range(num_cpts_u): ctrlpts[0 + (num_cpts_v * i)] = list(ctrlpts_tmp[0 + (size_v * i)]) ctrlpts[num_cpts_v - 1 + (num_cpts_v * i)] = list(ctrlpts_tmp[size_v - 1 + (size_v * i)]) # Compute Rkv - Eqs. 9.63 pt0 = ctrlpts_tmp[0 + (size_v * i)] # Qzero ptm = ctrlpts_tmp[size_v - 1 + (size_v * i)] # Qm rkv = [] for j in range(1, size_v - 1): ptk = ctrlpts_tmp[j + (size_v * i)] n0p = helpers.basis_function_one(degree_v, kv_v, 0, vl[j]) nnp = helpers.basis_function_one(degree_v, kv_v, num_cpts_v - 1, vl[j]) elem2 = [c * n0p for c in pt0] elem3 = [c * nnp for c in ptm] rkv.append([a - b - c for a, b, c in zip(ptk, elem2, elem3)]) # Compute Rv - Eqn. 9.67 rv = [[0.0 for _ in range(dim)] for _ in range(num_cpts_v - 2)] for j in range(1, num_cpts_v - 1): rv_tmp = [] for idx, pt in enumerate(rkv): rv_tmp.append([p * helpers.basis_function_one(degree_v, kv_v, j, vl[idx + 1]) for p in pt]) for d in range(dim): for idx in range(len(rv_tmp)): rv[j - 1][d] += rv_tmp[idx][d] # Get intermediate control points for d in range(dim): b = [pt[d] for pt in rv] y = linalg.forward_substitution(matrix_ntnvl, b) x = linalg.backward_substitution(matrix_ntnvu, y) for j in range(1, num_cpts_v - 1): ctrlpts[j + (num_cpts_v * i)][d] = x[j - 1] # Generate B-spline surface surf = BSpline.Surface() surf.degree_u = degree_u surf.degree_v = degree_v surf.ctrlpts_size_u = num_cpts_u surf.ctrlpts_size_v = num_cpts_v surf.ctrlpts = ctrlpts surf.knotvector_u = kv_u surf.knotvector_v = kv_v return surf
[ "def", "approximate_surface", "(", "points", ",", "size_u", ",", "size_v", ",", "degree_u", ",", "degree_v", ",", "*", "*", "kwargs", ")", ":", "# Keyword arguments", "use_centripetal", "=", "kwargs", ".", "get", "(", "'centripetal'", ",", "False", ")", "num...
Surface approximation using least squares method with fixed number of control points. This algorithm interpolates the corner control points and approximates the remaining control points. Please refer to Algorithm A9.7 of The NURBS Book (2nd Edition), pp.422-423 for details. Keyword Arguments: * ``centripetal``: activates centripetal parametrization method. *Default: False* * ``ctrlpts_size_u``: number of control points on the u-direction. *Default: size_u - 1* * ``ctrlpts_size_v``: number of control points on the v-direction. *Default: size_v - 1* :param points: data points :type points: list, tuple :param size_u: number of data points on the u-direction, :math:`r` :type size_u: int :param size_v: number of data points on the v-direction, :math:`s` :type size_v: int :param degree_u: degree of the output surface for the u-direction :type degree_u: int :param degree_v: degree of the output surface for the v-direction :type degree_v: int :return: approximated B-Spline surface :rtype: BSpline.Surface
[ "Surface", "approximation", "using", "least", "squares", "method", "with", "fixed", "number", "of", "control", "points", "." ]
python
train
iotile/coretools
iotilecore/iotile/core/hw/update/record.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/hw/update/record.py#L82-L92
def LoadPlugins(cls): """Load all registered iotile.update_record plugins.""" if cls.PLUGINS_LOADED: return reg = ComponentRegistry() for _, record in reg.load_extensions('iotile.update_record'): cls.RegisterRecordType(record) cls.PLUGINS_LOADED = True
[ "def", "LoadPlugins", "(", "cls", ")", ":", "if", "cls", ".", "PLUGINS_LOADED", ":", "return", "reg", "=", "ComponentRegistry", "(", ")", "for", "_", ",", "record", "in", "reg", ".", "load_extensions", "(", "'iotile.update_record'", ")", ":", "cls", ".", ...
Load all registered iotile.update_record plugins.
[ "Load", "all", "registered", "iotile", ".", "update_record", "plugins", "." ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/module.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/module.py#L388-L409
def open_handle(self): """ Opens a new handle to the module. The new handle is stored in the L{hFile} property. """ if not self.get_filename(): msg = "Cannot retrieve filename for module at %s" msg = msg % HexDump.address( self.get_base() ) raise Exception(msg) hFile = win32.CreateFile(self.get_filename(), dwShareMode = win32.FILE_SHARE_READ, dwCreationDisposition = win32.OPEN_EXISTING) # In case hFile was set to an actual handle value instead of a Handle # object. This shouldn't happen unless the user tinkered with hFile. if not hasattr(self.hFile, '__del__'): self.close_handle() self.hFile = hFile
[ "def", "open_handle", "(", "self", ")", ":", "if", "not", "self", ".", "get_filename", "(", ")", ":", "msg", "=", "\"Cannot retrieve filename for module at %s\"", "msg", "=", "msg", "%", "HexDump", ".", "address", "(", "self", ".", "get_base", "(", ")", ")...
Opens a new handle to the module. The new handle is stored in the L{hFile} property.
[ "Opens", "a", "new", "handle", "to", "the", "module", "." ]
python
train
rflamary/POT
ot/stochastic.py
https://github.com/rflamary/POT/blob/c5108efc7b6702e1af3928bef1032e6b37734d1c/ot/stochastic.py#L266-L338
def c_transform_entropic(b, M, reg, beta): ''' The goal is to recover u from the c-transform. The function computes the c_transform of a dual variable from the other dual variable: .. math:: u = v^{c,reg} = -reg \sum_j exp((v - M)/reg) b_j Where : - M is the (ns,nt) metric cost matrix - u, v are dual variables in R^IxR^J - reg is the regularization term It is used to recover an optimal u from optimal v solving the semi dual problem, see Proposition 2.1 of [18]_ Parameters ---------- b : np.ndarray(nt,) target measure M : np.ndarray(ns, nt) cost matrix reg : float regularization term > 0 v : np.ndarray(nt,) dual variable Returns ------- u : np.ndarray(ns,) dual variable Examples -------- >>> n_source = 7 >>> n_target = 4 >>> reg = 1 >>> numItermax = 300000 >>> a = ot.utils.unif(n_source) >>> b = ot.utils.unif(n_target) >>> rng = np.random.RandomState(0) >>> X_source = rng.randn(n_source, 2) >>> Y_target = rng.randn(n_target, 2) >>> M = ot.dist(X_source, Y_target) >>> method = "ASGD" >>> asgd_pi = stochastic.solve_semi_dual_entropic(a, b, M, reg, method, numItermax) >>> print(asgd_pi) References ---------- [Genevay et al., 2016] : Stochastic Optimization for Large-scale Optimal Transport, Advances in Neural Information Processing Systems (2016), arXiv preprint arxiv:1605.08527. ''' n_source = np.shape(M)[0] alpha = np.zeros(n_source) for i in range(n_source): r = M[i, :] - beta min_r = np.min(r) exp_beta = np.exp(-(r - min_r) / reg) * b alpha[i] = min_r - reg * np.log(np.sum(exp_beta)) return alpha
[ "def", "c_transform_entropic", "(", "b", ",", "M", ",", "reg", ",", "beta", ")", ":", "n_source", "=", "np", ".", "shape", "(", "M", ")", "[", "0", "]", "alpha", "=", "np", ".", "zeros", "(", "n_source", ")", "for", "i", "in", "range", "(", "n_...
The goal is to recover u from the c-transform. The function computes the c_transform of a dual variable from the other dual variable: .. math:: u = v^{c,reg} = -reg \sum_j exp((v - M)/reg) b_j Where : - M is the (ns,nt) metric cost matrix - u, v are dual variables in R^IxR^J - reg is the regularization term It is used to recover an optimal u from optimal v solving the semi dual problem, see Proposition 2.1 of [18]_ Parameters ---------- b : np.ndarray(nt,) target measure M : np.ndarray(ns, nt) cost matrix reg : float regularization term > 0 v : np.ndarray(nt,) dual variable Returns ------- u : np.ndarray(ns,) dual variable Examples -------- >>> n_source = 7 >>> n_target = 4 >>> reg = 1 >>> numItermax = 300000 >>> a = ot.utils.unif(n_source) >>> b = ot.utils.unif(n_target) >>> rng = np.random.RandomState(0) >>> X_source = rng.randn(n_source, 2) >>> Y_target = rng.randn(n_target, 2) >>> M = ot.dist(X_source, Y_target) >>> method = "ASGD" >>> asgd_pi = stochastic.solve_semi_dual_entropic(a, b, M, reg, method, numItermax) >>> print(asgd_pi) References ---------- [Genevay et al., 2016] : Stochastic Optimization for Large-scale Optimal Transport, Advances in Neural Information Processing Systems (2016), arXiv preprint arxiv:1605.08527.
[ "The", "goal", "is", "to", "recover", "u", "from", "the", "c", "-", "transform", "." ]
python
train
wglass/lighthouse
lighthouse/haproxy/stanzas/stanza.py
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/haproxy/stanzas/stanza.py#L36-L48
def add_line(self, line): """ Adds a given line string to the list of lines, validating the line first. """ if not self.is_valid_line(line): logger.warn( "Invalid line for %s section: '%s'", self.section_name, line ) return self.lines.append(line)
[ "def", "add_line", "(", "self", ",", "line", ")", ":", "if", "not", "self", ".", "is_valid_line", "(", "line", ")", ":", "logger", ".", "warn", "(", "\"Invalid line for %s section: '%s'\"", ",", "self", ".", "section_name", ",", "line", ")", "return", "sel...
Adds a given line string to the list of lines, validating the line first.
[ "Adds", "a", "given", "line", "string", "to", "the", "list", "of", "lines", "validating", "the", "line", "first", "." ]
python
train
Neurita/boyle
boyle/nifti/sets.py
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/nifti/sets.py#L167-L197
def _load_images_and_labels(self, images, labels=None): """Read the images, load them into self.items and set the labels.""" if not isinstance(images, (list, tuple)): raise ValueError('Expected an iterable (list or tuple) of strings or img-like objects. ' 'Got a {}.'.format(type(images))) if not len(images) > 0: raise ValueError('Expected an iterable (list or tuple) of strings or img-like objects ' 'of size higher than 0. Got {} items.'.format(len(images))) if labels is not None and len(labels) != len(images): raise ValueError('Expected the same length for image set ({}) and ' 'labels list ({}).'.format(len(images), len(labels))) first_file = images[0] if first_file: first_img = NeuroImage(first_file) else: raise('Error reading image {}.'.format(repr_imgs(first_file))) for idx, image in enumerate(images): try: img = NeuroImage(image) self.check_compatibility(img, first_img) except: log.exception('Error reading image {}.'.format(repr_imgs(image))) raise else: self.items.append(img) self.set_labels(labels)
[ "def", "_load_images_and_labels", "(", "self", ",", "images", ",", "labels", "=", "None", ")", ":", "if", "not", "isinstance", "(", "images", ",", "(", "list", ",", "tuple", ")", ")", ":", "raise", "ValueError", "(", "'Expected an iterable (list or tuple) of s...
Read the images, load them into self.items and set the labels.
[ "Read", "the", "images", "load", "them", "into", "self", ".", "items", "and", "set", "the", "labels", "." ]
python
valid
pymc-devs/pymc
pymc/Container.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Container.py#L168-L248
def file_items(container, iterable): """ Files away objects into the appropriate attributes of the container. """ # container._value = copy(iterable) container.nodes = set() container.variables = set() container.deterministics = set() container.stochastics = set() container.potentials = set() container.observed_stochastics = set() # containers needs to be a list to hold unhashable items. container.containers = [] i = -1 for item in iterable: # If this is a dictionary, switch from key to item. if isinstance(iterable, (dict, dict_proxy_type)): key = item item = iterable[key] # Item counter else: i += 1 # If the item isn't iterable, file it away. if isinstance(item, Variable): container.variables.add(item) if isinstance(item, StochasticBase): if item.observed or not getattr(item, 'mask', None) is None: container.observed_stochastics.add(item) if not item.observed: container.stochastics.add(item) elif isinstance(item, DeterministicBase): container.deterministics.add(item) elif isinstance(item, PotentialBase): container.potentials.add(item) elif isinstance(item, ContainerBase): container.assimilate(item) container.containers.append(item) # Wrap internal containers elif hasattr(item, '__iter__'): # If this is a non-object-valued ndarray, don't container-ize it. if isinstance(item, ndarray): if item.dtype != dtype('object'): continue # If the item is iterable, wrap it in a container. Replace the item # with the wrapped version. try: new_container = Container(item) except: continue # Update all of container's variables, potentials, etc. with the new wrapped # iterable's. This process recursively unpacks nested iterables. container.assimilate(new_container) if isinstance(container, dict): container.replace(key, new_container) elif isinstance(container, tuple): return container[:i] + (new_container,) + container[i + 1:] else: container.replace(item, new_container, i) container.nodes = container.potentials | container.variables # 'Freeze' markov blanket, moral neighbors, coparents of all constituent stochastics # for future use for attr in ['moral_neighbors', 'markov_blanket', 'coparents']: setattr(container, attr, {}) for s in container.stochastics: for attr in ['moral_neighbors', 'markov_blanket', 'coparents']: getattr(container, attr)[s] = getattr(s, attr)
[ "def", "file_items", "(", "container", ",", "iterable", ")", ":", "# container._value = copy(iterable)", "container", ".", "nodes", "=", "set", "(", ")", "container", ".", "variables", "=", "set", "(", ")", "container", ".", "deterministics", "=", "set", "(", ...
Files away objects into the appropriate attributes of the container.
[ "Files", "away", "objects", "into", "the", "appropriate", "attributes", "of", "the", "container", "." ]
python
train
idlesign/django-sitetree
sitetree/sitetreeapp.py
https://github.com/idlesign/django-sitetree/blob/61de4608e6e415247c75fe8691027d7c4ed0d1e7/sitetree/sitetreeapp.py#L472-L493
def current_app_is_admin(self): """Returns boolean whether current application is Admin contrib. :rtype: bool """ is_admin = self._current_app_is_admin if is_admin is None: context = self.current_page_context current_app = getattr( # Try from request.resolver_match.app_name getattr(context.get('request', None), 'resolver_match', None), 'app_name', # Try from global context obj. getattr(context, 'current_app', None)) if current_app is None: # Try from global context dict. current_app = context.get('current_app', '') is_admin = current_app == ADMIN_APP_NAME self._current_app_is_admin = is_admin return is_admin
[ "def", "current_app_is_admin", "(", "self", ")", ":", "is_admin", "=", "self", ".", "_current_app_is_admin", "if", "is_admin", "is", "None", ":", "context", "=", "self", ".", "current_page_context", "current_app", "=", "getattr", "(", "# Try from request.resolver_ma...
Returns boolean whether current application is Admin contrib. :rtype: bool
[ "Returns", "boolean", "whether", "current", "application", "is", "Admin", "contrib", "." ]
python
test
davidmogar/cucco
cucco/logging.py
https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/logging.py#L5-L23
def initialize_logger(debug): """Set up logger to be used by the library. Args: debug: Wheter to use debug level or not. Returns: A logger ready to be used. """ level = logging.DEBUG if debug else logging.INFO logger = logging.getLogger('cucco') logger.setLevel(level) formatter = logging.Formatter('%(asctime)s %(levelname).1s %(message)s') console_handler = logging.StreamHandler() console_handler.setLevel(level) console_handler.setFormatter(formatter) logger.addHandler(console_handler) return logger
[ "def", "initialize_logger", "(", "debug", ")", ":", "level", "=", "logging", ".", "DEBUG", "if", "debug", "else", "logging", ".", "INFO", "logger", "=", "logging", ".", "getLogger", "(", "'cucco'", ")", "logger", ".", "setLevel", "(", "level", ")", "form...
Set up logger to be used by the library. Args: debug: Wheter to use debug level or not. Returns: A logger ready to be used.
[ "Set", "up", "logger", "to", "be", "used", "by", "the", "library", "." ]
python
train
HPENetworking/PYHPEIMC
pyhpeimc/plat/termaccess.py
https://github.com/HPENetworking/PYHPEIMC/blob/4fba31827573587e03a6233c7db60f188038c8e5/pyhpeimc/plat/termaccess.py#L233-L295
def add_ip_scope(name, description, auth, url, startip=None, endip=None, network_address=None): """ Function takes input of four strings Start Ip, endIp, name, and description to add new Ip Scope to terminal access in the HPE IMC base platform :param name: str Name of the owner of this IP scope ex. 'admin' :param description: str description of the Ip scope :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :param startip: str Start of IP address scope ex. '10.101.0.1' :param endip: str End of IP address scope ex. '10.101.0.254' :param network_address: ipv4 network address + subnet bits of target scope :return: 200 if successfull :rtype: >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.termaccess import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> delete_ip_scope('10.50.0.0/24', auth.creds, auth.url) <Response [204]> >>> new_scope = add_ip_scope('10.50.0.1', '10.50.0.254', 'cyoung', 'test group', auth.creds, auth.url) >>> assert type(new_scope) is int >>> assert new_scope == 200 >>> existing_scope = add_ip_scope('10.50.0.1', '10.50.0.254', 'cyoung', 'test group', auth.creds, auth.url) >>> assert type(existing_scope) is int >>> assert existing_scope == 409 """ if network_address is not None: nw_address = ipaddress.IPv4Network(network_address) startip = nw_address[1] endip = nw_address[-2] f_url = url + "/imcrs/res/access/assignedIpScope" payload = ('''{ "startIp": "%s", "endIp": "%s","name": "%s","description": "%s" }''' % (str(startip), str(endip), str(name), str(description))) response = requests.post(f_url, auth=auth, headers=HEADERS, data=payload) try: if response.status_code == 200: # print("IP Scope Successfully Created") return response.status_code elif response.status_code == 409: # print ("IP Scope Already Exists") return response.status_code except requests.exceptions.RequestException as error: return "Error:\n" + str(error) + " add_ip_scope: An Error has occured"
[ "def", "add_ip_scope", "(", "name", ",", "description", ",", "auth", ",", "url", ",", "startip", "=", "None", ",", "endip", "=", "None", ",", "network_address", "=", "None", ")", ":", "if", "network_address", "is", "not", "None", ":", "nw_address", "=", ...
Function takes input of four strings Start Ip, endIp, name, and description to add new Ip Scope to terminal access in the HPE IMC base platform :param name: str Name of the owner of this IP scope ex. 'admin' :param description: str description of the Ip scope :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :param startip: str Start of IP address scope ex. '10.101.0.1' :param endip: str End of IP address scope ex. '10.101.0.254' :param network_address: ipv4 network address + subnet bits of target scope :return: 200 if successfull :rtype: >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.termaccess import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> delete_ip_scope('10.50.0.0/24', auth.creds, auth.url) <Response [204]> >>> new_scope = add_ip_scope('10.50.0.1', '10.50.0.254', 'cyoung', 'test group', auth.creds, auth.url) >>> assert type(new_scope) is int >>> assert new_scope == 200 >>> existing_scope = add_ip_scope('10.50.0.1', '10.50.0.254', 'cyoung', 'test group', auth.creds, auth.url) >>> assert type(existing_scope) is int >>> assert existing_scope == 409
[ "Function", "takes", "input", "of", "four", "strings", "Start", "Ip", "endIp", "name", "and", "description", "to", "add", "new", "Ip", "Scope", "to", "terminal", "access", "in", "the", "HPE", "IMC", "base", "platform" ]
python
train
StackStorm/pybind
pybind/nos/v6_0_2f/brocade_ras_ext_rpc/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/brocade_ras_ext_rpc/__init__.py#L139-L162
def _set_show_support_save_status(self, v, load=False): """ Setter method for show_support_save_status, mapped from YANG variable /brocade_ras_ext_rpc/show_support_save_status (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_show_support_save_status is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_show_support_save_status() directly. YANG Description: Information on the status of recent support save request """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=show_support_save_status.show_support_save_status, is_leaf=True, yang_name="show-support-save-status", rest_name="show-support-save-status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'showSupportSaveStatus'}}, namespace='urn:brocade.com:mgmt:brocade-ras-ext', defining_module='brocade-ras-ext', yang_type='rpc', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """show_support_save_status must be of a type compatible with rpc""", 'defined-type': "rpc", 'generated-type': """YANGDynClass(base=show_support_save_status.show_support_save_status, is_leaf=True, yang_name="show-support-save-status", rest_name="show-support-save-status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'showSupportSaveStatus'}}, namespace='urn:brocade.com:mgmt:brocade-ras-ext', defining_module='brocade-ras-ext', yang_type='rpc', is_config=True)""", }) self.__show_support_save_status = t if hasattr(self, '_set'): self._set()
[ "def", "_set_show_support_save_status", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v",...
Setter method for show_support_save_status, mapped from YANG variable /brocade_ras_ext_rpc/show_support_save_status (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_show_support_save_status is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_show_support_save_status() directly. YANG Description: Information on the status of recent support save request
[ "Setter", "method", "for", "show_support_save_status", "mapped", "from", "YANG", "variable", "/", "brocade_ras_ext_rpc", "/", "show_support_save_status", "(", "rpc", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in"...
python
train
openid/JWTConnect-Python-CryptoJWT
src/cryptojwt/key_bundle.py
https://github.com/openid/JWTConnect-Python-CryptoJWT/blob/8863cfbfe77ca885084870b234a66b55bd52930c/src/cryptojwt/key_bundle.py#L786-L842
def key_diff(key_bundle, key_defs): """ Creates a difference dictionary with keys that should added and keys that should be deleted from a Key Bundle to get it updated to a state that mirrors What is in the key_defs specification. :param key_bundle: The original KeyBundle :param key_defs: A set of key definitions :return: A dictionary with possible keys 'add' and 'del'. The values for the keys are lists of :py:class:`cryptojwt.jwk.JWK` instances """ keys = key_bundle.get() diff = {} # My own sorted copy key_defs = order_key_defs(key_defs)[:] used = [] for key in keys: match = False for kd in key_defs: if key.use not in kd['use']: continue if key.kty != kd['type']: continue if key.kty == 'EC': # special test only for EC keys if key.crv != kd['crv']: continue try: _kid = kd['kid'] except KeyError: pass else: if key.kid != _kid: continue match = True used.append(kd) key_defs.remove(kd) break if not match: try: diff['del'].append(key) except KeyError: diff['del'] = [key] if key_defs: _kb = build_key_bundle(key_defs) diff['add'] = _kb.keys() return diff
[ "def", "key_diff", "(", "key_bundle", ",", "key_defs", ")", ":", "keys", "=", "key_bundle", ".", "get", "(", ")", "diff", "=", "{", "}", "# My own sorted copy", "key_defs", "=", "order_key_defs", "(", "key_defs", ")", "[", ":", "]", "used", "=", "[", "...
Creates a difference dictionary with keys that should added and keys that should be deleted from a Key Bundle to get it updated to a state that mirrors What is in the key_defs specification. :param key_bundle: The original KeyBundle :param key_defs: A set of key definitions :return: A dictionary with possible keys 'add' and 'del'. The values for the keys are lists of :py:class:`cryptojwt.jwk.JWK` instances
[ "Creates", "a", "difference", "dictionary", "with", "keys", "that", "should", "added", "and", "keys", "that", "should", "be", "deleted", "from", "a", "Key", "Bundle", "to", "get", "it", "updated", "to", "a", "state", "that", "mirrors", "What", "is", "in", ...
python
train
ManiacalLabs/BiblioPixel
bibliopixel/util/util.py
https://github.com/ManiacalLabs/BiblioPixel/blob/fd97e6c651a4bbcade64733847f4eec8f7704b7c/bibliopixel/util/util.py#L36-L55
def pointOnCircle(cx, cy, radius, angle): """ Calculates the coordinates of a point on a circle given the center point, radius, and angle. """ angle = math.radians(angle) - (math.pi / 2) x = cx + radius * math.cos(angle) if x < cx: x = math.ceil(x) else: x = math.floor(x) y = cy + radius * math.sin(angle) if y < cy: y = math.ceil(y) else: y = math.floor(y) return (int(x), int(y))
[ "def", "pointOnCircle", "(", "cx", ",", "cy", ",", "radius", ",", "angle", ")", ":", "angle", "=", "math", ".", "radians", "(", "angle", ")", "-", "(", "math", ".", "pi", "/", "2", ")", "x", "=", "cx", "+", "radius", "*", "math", ".", "cos", ...
Calculates the coordinates of a point on a circle given the center point, radius, and angle.
[ "Calculates", "the", "coordinates", "of", "a", "point", "on", "a", "circle", "given", "the", "center", "point", "radius", "and", "angle", "." ]
python
valid
mottosso/be
be/vendor/requests/packages/urllib3/__init__.py
https://github.com/mottosso/be/blob/0f3d4f3597c71223f616d78c6d9b2c8dffcd8a71/be/vendor/requests/packages/urllib3/__init__.py#L37-L52
def add_stderr_logger(level=logging.DEBUG): """ Helper for quickly adding a StreamHandler to the logger. Useful for debugging. Returns the handler after adding it. """ # This method needs to be in this __init__.py to get the __name__ correct # even if urllib3 is vendored within another package. logger = logging.getLogger(__name__) handler = logging.StreamHandler() handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s')) logger.addHandler(handler) logger.setLevel(level) logger.debug('Added a stderr logging handler to logger: %s' % __name__) return handler
[ "def", "add_stderr_logger", "(", "level", "=", "logging", ".", "DEBUG", ")", ":", "# This method needs to be in this __init__.py to get the __name__ correct", "# even if urllib3 is vendored within another package.", "logger", "=", "logging", ".", "getLogger", "(", "__name__", "...
Helper for quickly adding a StreamHandler to the logger. Useful for debugging. Returns the handler after adding it.
[ "Helper", "for", "quickly", "adding", "a", "StreamHandler", "to", "the", "logger", ".", "Useful", "for", "debugging", "." ]
python
train
tilde-lab/tilde
tilde/berlinium/cubicspline.py
https://github.com/tilde-lab/tilde/blob/59841578b3503075aa85c76f9ae647b3ff92b0a3/tilde/berlinium/cubicspline.py#L23-L77
def uFuncConverter(variableIndex): '''A decorator to convert python functions to numpy universal functions A standard function of 1 variable is extended by a decorator to handle all values in a list, tuple or numpy array :param variableIndex: Specifies index for args to use as variable. This way the function can be used in classes as well as functions :type variableIndex: An positive integer **How to use:** In the example below uFuncConverter is used on the first parameter x: >>> @uFuncConverter(0) ... def test(x, y = 2): ... return x+y ... >>> x0 = 4 >>> x1 = (1, 2, 3) >>> x2 = [2, 3, 4] >>> x3 = asarray(x1) + 2 >>> print test(x0) 6 >>> print test(x1) [3 4 5] >>> print test(x2) [4 5 6] >>> print test(x3) [5 6 7] ''' def wrap(func): '''Function to wrap around methods and functions ''' def npWrapFunc(*args): '''Function specifying what the wrapping should do ''' if len(args) >= variableIndex: before = list(args[:variableIndex]) arguments = args[variableIndex] after = list(args[variableIndex + 1:]) if isinstance(arguments, (int, float, Decimal)): if variableIndex: return func(*args) else: return func(args[0]) elif isinstance(arguments, (list, tuple, ndarray)): if variableIndex: return asarray([func(*(before + [x] + after)) for x in arguments]) else: return asarray([func(x) for x in arguments]) raise Exception('Error! Arguments (%s) not of proper format' % str(arguments)) return npWrapFunc return wrap
[ "def", "uFuncConverter", "(", "variableIndex", ")", ":", "def", "wrap", "(", "func", ")", ":", "'''Function to wrap around methods and functions\n '''", "def", "npWrapFunc", "(", "*", "args", ")", ":", "'''Function specifying what the wrapping should do\n ''...
A decorator to convert python functions to numpy universal functions A standard function of 1 variable is extended by a decorator to handle all values in a list, tuple or numpy array :param variableIndex: Specifies index for args to use as variable. This way the function can be used in classes as well as functions :type variableIndex: An positive integer **How to use:** In the example below uFuncConverter is used on the first parameter x: >>> @uFuncConverter(0) ... def test(x, y = 2): ... return x+y ... >>> x0 = 4 >>> x1 = (1, 2, 3) >>> x2 = [2, 3, 4] >>> x3 = asarray(x1) + 2 >>> print test(x0) 6 >>> print test(x1) [3 4 5] >>> print test(x2) [4 5 6] >>> print test(x3) [5 6 7]
[ "A", "decorator", "to", "convert", "python", "functions", "to", "numpy", "universal", "functions" ]
python
train
yt-project/unyt
unyt/unit_object.py
https://github.com/yt-project/unyt/blob/7a4eafc229f83784f4c63d639aee554f9a6b1ca0/unyt/unit_object.py#L653-L679
def as_coeff_unit(self): """Factor the coefficient multiplying a unit For units that are multiplied by a constant dimensionless coefficient, returns a tuple containing the coefficient and a new unit object for the unmultiplied unit. Example ------- >>> import unyt as u >>> unit = (u.m**2/u.cm).simplify() >>> unit 100*m >>> unit.as_coeff_unit() (100.0, m) """ coeff, mul = self.expr.as_coeff_Mul() coeff = float(coeff) ret = Unit( mul, self.base_value / coeff, self.base_offset, self.dimensions, self.registry, ) return coeff, ret
[ "def", "as_coeff_unit", "(", "self", ")", ":", "coeff", ",", "mul", "=", "self", ".", "expr", ".", "as_coeff_Mul", "(", ")", "coeff", "=", "float", "(", "coeff", ")", "ret", "=", "Unit", "(", "mul", ",", "self", ".", "base_value", "/", "coeff", ","...
Factor the coefficient multiplying a unit For units that are multiplied by a constant dimensionless coefficient, returns a tuple containing the coefficient and a new unit object for the unmultiplied unit. Example ------- >>> import unyt as u >>> unit = (u.m**2/u.cm).simplify() >>> unit 100*m >>> unit.as_coeff_unit() (100.0, m)
[ "Factor", "the", "coefficient", "multiplying", "a", "unit" ]
python
train
BD2KGenomics/protect
src/protect/mutation_calling/somaticsniper.py
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/somaticsniper.py#L168-L243
def filter_somaticsniper(job, tumor_bam, somaticsniper_output, tumor_pileup, univ_options, somaticsniper_options): """ Filter SomaticSniper calls. :param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq :param toil.fileStore.FileID somaticsniper_output: SomaticSniper output vcf :param toil.fileStore.FileID tumor_pileup: Pileup generated for the tumor bam :param dict univ_options: Dict of universal options used by almost all tools :param dict somaticsniper_options: Options specific to SomaticSniper :returns: fsID for the filtered genome-level vcf :rtype: toil.fileStore.FileID """ work_dir = os.getcwd() input_files = { 'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'], 'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'], 'input.vcf': somaticsniper_output, 'pileup.txt': tumor_pileup, 'genome.fa.tar.gz': somaticsniper_options['genome_fasta'], 'genome.fa.fai.tar.gz': somaticsniper_options['genome_fai']} input_files = get_files_from_filestore(job, input_files, work_dir, docker=False) for key in ('genome.fa', 'genome.fa.fai'): input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir) input_files = {key: docker_path(path) for key, path in input_files.items()} # Run snpfilter.pl parameters = ['snpfilter.pl', '--snp-file', input_files['input.vcf'], '--indel-file', input_files['pileup.txt']] # Creates /data/input.vcf.SNPfilter docker_call(tool='somaticsniper-addons', tool_parameters=parameters, work_dir=work_dir, dockerhub=univ_options['dockerhub'], tool_version=somaticsniper_options['version']) # Run prepare_for_readcount.pl parameters = ['prepare_for_readcount.pl', '--snp-file', input_files['input.vcf'] + '.SNPfilter'] # Creates /data/input.vcf.SNPfilter.pos docker_call(tool='somaticsniper-addons', tool_parameters=parameters, work_dir=work_dir, dockerhub=univ_options['dockerhub'], tool_version=somaticsniper_options['version']) # Run bam-readcount parameters = ['-b', '15', '-f', input_files['genome.fa'], '-l', input_files['input.vcf'] + '.SNPfilter.pos', '-w', '1', input_files['tumor.bam']] # Creates the read counts file with open(os.path.join(work_dir, 'readcounts.txt'), 'w') as readcounts_file: docker_call(tool='bam-readcount', tool_parameters=parameters, work_dir=work_dir, dockerhub=univ_options['dockerhub'], outfile=readcounts_file, tool_version=somaticsniper_options['bam_readcount']['version']) # Run fpfilter.pl parameters = ['fpfilter.pl', '--snp-file', input_files['input.vcf'] + '.SNPfilter', '--readcount-file', docker_path(readcounts_file.name)] # Creates input.vcf.SNPfilter.fp_pass and input.vcf.SNPfilter.fp_fail docker_call(tool='somaticsniper-addons', tool_parameters=parameters, work_dir=work_dir, dockerhub=univ_options['dockerhub'], tool_version=somaticsniper_options['version']) # Run highconfidence.pl parameters = ['highconfidence.pl', '--snp-file', input_files['input.vcf'] + '.SNPfilter.fp_pass'] # Creates input.vcf.SNPfilter.fp_pass.hc docker_call(tool='somaticsniper-addons', tool_parameters=parameters, work_dir=work_dir, dockerhub=univ_options['dockerhub'], tool_version=somaticsniper_options['version']) outfile = job.fileStore.writeGlobalFile(os.path.join(os.getcwd(), 'input.vcf.SNPfilter.fp_pass.hc')) job.fileStore.logToMaster('Filtered SomaticSniper for %s successfully' % univ_options['patient']) return outfile
[ "def", "filter_somaticsniper", "(", "job", ",", "tumor_bam", ",", "somaticsniper_output", ",", "tumor_pileup", ",", "univ_options", ",", "somaticsniper_options", ")", ":", "work_dir", "=", "os", ".", "getcwd", "(", ")", "input_files", "=", "{", "'tumor.bam'", ":...
Filter SomaticSniper calls. :param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq :param toil.fileStore.FileID somaticsniper_output: SomaticSniper output vcf :param toil.fileStore.FileID tumor_pileup: Pileup generated for the tumor bam :param dict univ_options: Dict of universal options used by almost all tools :param dict somaticsniper_options: Options specific to SomaticSniper :returns: fsID for the filtered genome-level vcf :rtype: toil.fileStore.FileID
[ "Filter", "SomaticSniper", "calls", "." ]
python
train
rbit/pydtls
dtls/patch.py
https://github.com/rbit/pydtls/blob/41a71fccd990347d0de5f42418fea1e4e733359c/dtls/patch.py#L100-L123
def _get_server_certificate(addr, ssl_version=PROTOCOL_SSLv23, ca_certs=None): """Retrieve a server certificate Retrieve the certificate from the server at the specified address, and return it as a PEM-encoded string. If 'ca_certs' is specified, validate the server cert against it. If 'ssl_version' is specified, use it in the connection attempt. """ if ssl_version not in (PROTOCOL_DTLS, PROTOCOL_DTLSv1, PROTOCOL_DTLSv1_2): return _orig_get_server_certificate(addr, ssl_version, ca_certs) if ca_certs is not None: cert_reqs = ssl.CERT_REQUIRED else: cert_reqs = ssl.CERT_NONE af = getaddrinfo(addr[0], addr[1])[0][0] s = ssl.wrap_socket(socket(af, SOCK_DGRAM), ssl_version=ssl_version, cert_reqs=cert_reqs, ca_certs=ca_certs) s.connect(addr) dercert = s.getpeercert(True) s.close() return ssl.DER_cert_to_PEM_cert(dercert)
[ "def", "_get_server_certificate", "(", "addr", ",", "ssl_version", "=", "PROTOCOL_SSLv23", ",", "ca_certs", "=", "None", ")", ":", "if", "ssl_version", "not", "in", "(", "PROTOCOL_DTLS", ",", "PROTOCOL_DTLSv1", ",", "PROTOCOL_DTLSv1_2", ")", ":", "return", "_ori...
Retrieve a server certificate Retrieve the certificate from the server at the specified address, and return it as a PEM-encoded string. If 'ca_certs' is specified, validate the server cert against it. If 'ssl_version' is specified, use it in the connection attempt.
[ "Retrieve", "a", "server", "certificate" ]
python
train
nilp0inter/cpe
cpe/comp/cpecomp1_1.py
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/comp/cpecomp1_1.py#L309-L331
def as_wfn(self): r""" Returns the value of compoment encoded as Well-Formed Name (WFN) string. :returns: WFN string :rtype: string TEST: >>> val = 'xp!vista' >>> comp1 = CPEComponent1_1(val, CPEComponentSimple.ATT_VERSION) >>> comp1.as_wfn() 'xp\\!vista' """ result = [] for s in self._standard_value: result.append(s) result.append(CPEComponent1_1._ESCAPE_SEPARATOR) return "".join(result[0:-1])
[ "def", "as_wfn", "(", "self", ")", ":", "result", "=", "[", "]", "for", "s", "in", "self", ".", "_standard_value", ":", "result", ".", "append", "(", "s", ")", "result", ".", "append", "(", "CPEComponent1_1", ".", "_ESCAPE_SEPARATOR", ")", "return", "\...
r""" Returns the value of compoment encoded as Well-Formed Name (WFN) string. :returns: WFN string :rtype: string TEST: >>> val = 'xp!vista' >>> comp1 = CPEComponent1_1(val, CPEComponentSimple.ATT_VERSION) >>> comp1.as_wfn() 'xp\\!vista'
[ "r", "Returns", "the", "value", "of", "compoment", "encoded", "as", "Well", "-", "Formed", "Name", "(", "WFN", ")", "string", "." ]
python
train
rwl/pylon
pylon/solver.py
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/solver.py#L260-L277
def _pwl_costs(self, ny, nxyz, ipwl): """ Returns the piece-wise linear components of the objective function. """ any_pwl = int(ny > 0) if any_pwl: y = self.om.get_var("y") # Sum of y vars. Npwl = csr_matrix((ones(ny), (zeros(ny), array(ipwl) + y.i1))) Hpwl = csr_matrix((1, 1)) Cpwl = array([1]) fparm_pwl = array([[1., 0., 0., 1.]]) else: Npwl = None#zeros((0, nxyz)) Hpwl = None#array([]) Cpwl = array([]) fparm_pwl = zeros((0, 4)) return Npwl, Hpwl, Cpwl, fparm_pwl, any_pwl
[ "def", "_pwl_costs", "(", "self", ",", "ny", ",", "nxyz", ",", "ipwl", ")", ":", "any_pwl", "=", "int", "(", "ny", ">", "0", ")", "if", "any_pwl", ":", "y", "=", "self", ".", "om", ".", "get_var", "(", "\"y\"", ")", "# Sum of y vars.", "Npwl", "=...
Returns the piece-wise linear components of the objective function.
[ "Returns", "the", "piece", "-", "wise", "linear", "components", "of", "the", "objective", "function", "." ]
python
train
brainiak/brainiak
brainiak/funcalign/rsrm.py
https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/funcalign/rsrm.py#L538-L561
def _shrink(v, gamma): """Soft-shrinkage of an array with parameter gamma. Parameters ---------- v : array Array containing the values to be applied to the shrinkage operator gamma : float Shrinkage parameter. Returns ------- v : array The same input array after the shrinkage operator was applied. """ pos = v > gamma neg = v < -gamma v[pos] -= gamma v[neg] += gamma v[np.logical_and(~pos, ~neg)] = .0 return v
[ "def", "_shrink", "(", "v", ",", "gamma", ")", ":", "pos", "=", "v", ">", "gamma", "neg", "=", "v", "<", "-", "gamma", "v", "[", "pos", "]", "-=", "gamma", "v", "[", "neg", "]", "+=", "gamma", "v", "[", "np", ".", "logical_and", "(", "~", "...
Soft-shrinkage of an array with parameter gamma. Parameters ---------- v : array Array containing the values to be applied to the shrinkage operator gamma : float Shrinkage parameter. Returns ------- v : array The same input array after the shrinkage operator was applied.
[ "Soft", "-", "shrinkage", "of", "an", "array", "with", "parameter", "gamma", "." ]
python
train
portantier/habu
habu/cli/cmd_arp_ping.py
https://github.com/portantier/habu/blob/87091e389dc6332fe1b82830c22b2eefc55816f2/habu/cli/cmd_arp_ping.py#L18-L43
def cmd_arp_ping(ip, iface, verbose): """ Send ARP packets to check if a host it's alive in the local network. Example: \b # habu.arp.ping 192.168.0.1 Ether / ARP is at a4:08:f5:19:17:a4 says 192.168.0.1 / Padding """ if verbose: logging.basicConfig(level=logging.INFO, format='%(message)s') conf.verb = False if iface: conf.iface = iface res, unans = srp(Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst=ip), timeout=2) for _, pkt in res: if verbose: print(pkt.show()) else: print(pkt.summary())
[ "def", "cmd_arp_ping", "(", "ip", ",", "iface", ",", "verbose", ")", ":", "if", "verbose", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "INFO", ",", "format", "=", "'%(message)s'", ")", "conf", ".", "verb", "=", "False", "if",...
Send ARP packets to check if a host it's alive in the local network. Example: \b # habu.arp.ping 192.168.0.1 Ether / ARP is at a4:08:f5:19:17:a4 says 192.168.0.1 / Padding
[ "Send", "ARP", "packets", "to", "check", "if", "a", "host", "it", "s", "alive", "in", "the", "local", "network", "." ]
python
train
fulfilio/python-magento
magento/catalog.py
https://github.com/fulfilio/python-magento/blob/720ec136a6e438a9ee4ee92848a9820b91732750/magento/catalog.py#L702-L719
def assign(self, link_type, product, linked_product, data=None, identifierType=None): """ Assign a product link :param link_type: type of link, one of 'cross_sell', 'up_sell', 'related' or 'grouped' :param product: ID or SKU of product :param linked_product: ID or SKU of linked product :param data: dictionary of link data, (position, qty, etc.) Example: { 'position': '0', 'qty': 1} :param identifierType: Defines whether the product or SKU value is passed in the "product" parameter. :return: boolean """ return bool(self.call('catalog_product_link.assign', [link_type, product, linked_product, data, identifierType]))
[ "def", "assign", "(", "self", ",", "link_type", ",", "product", ",", "linked_product", ",", "data", "=", "None", ",", "identifierType", "=", "None", ")", ":", "return", "bool", "(", "self", ".", "call", "(", "'catalog_product_link.assign'", ",", "[", "link...
Assign a product link :param link_type: type of link, one of 'cross_sell', 'up_sell', 'related' or 'grouped' :param product: ID or SKU of product :param linked_product: ID or SKU of linked product :param data: dictionary of link data, (position, qty, etc.) Example: { 'position': '0', 'qty': 1} :param identifierType: Defines whether the product or SKU value is passed in the "product" parameter. :return: boolean
[ "Assign", "a", "product", "link" ]
python
train
IdentityPython/pyop
src/pyop/provider.py
https://github.com/IdentityPython/pyop/blob/7b1385964f079c39752fce5f2dbcf458b8a92e56/src/pyop/provider.py#L428-L455
def handle_userinfo_request(self, request=None, http_headers=None): # type: (Optional[str], Optional[Mapping[str, str]]) -> oic.oic.message.OpenIDSchema """ Handles a userinfo request. :param request: urlencoded request (either query string or POST body) :param http_headers: http headers """ if http_headers is None: http_headers = {} userinfo_request = dict(parse_qsl(request)) bearer_token = extract_bearer_token_from_http_request(userinfo_request, http_headers.get('Authorization')) introspection = self.authz_state.introspect_access_token(bearer_token) if not introspection['active']: raise InvalidAccessToken('The access token has expired') scopes = introspection['scope'].split() user_id = self.authz_state.get_user_id_for_subject_identifier(introspection['sub']) requested_claims = scope2claims(scopes, extra_scope_dict=self.extra_scopes) authentication_request = self.authz_state.get_authorization_request_for_access_token(bearer_token) requested_claims.update(self._get_requested_claims_in(authentication_request, 'userinfo')) user_claims = self.userinfo.get_claims_for(user_id, requested_claims) user_claims.setdefault('sub', introspection['sub']) response = OpenIDSchema(**user_claims) logger.debug('userinfo=%s from requested_claims=%s userinfo=%s', response, requested_claims, user_claims) return response
[ "def", "handle_userinfo_request", "(", "self", ",", "request", "=", "None", ",", "http_headers", "=", "None", ")", ":", "# type: (Optional[str], Optional[Mapping[str, str]]) -> oic.oic.message.OpenIDSchema", "if", "http_headers", "is", "None", ":", "http_headers", "=", "{...
Handles a userinfo request. :param request: urlencoded request (either query string or POST body) :param http_headers: http headers
[ "Handles", "a", "userinfo", "request", ".", ":", "param", "request", ":", "urlencoded", "request", "(", "either", "query", "string", "or", "POST", "body", ")", ":", "param", "http_headers", ":", "http", "headers" ]
python
train
tk0miya/tk.phpautodoc
src/phply/phpparse.py
https://github.com/tk0miya/tk.phpautodoc/blob/cf789f64abaf76351485cee231a075227e665fb6/src/phply/phpparse.py#L210-L212
def p_statement_switch(p): 'statement : SWITCH LPAREN expr RPAREN switch_case_list' p[0] = ast.Switch(p[3], p[5], lineno=p.lineno(1))
[ "def", "p_statement_switch", "(", "p", ")", ":", "p", "[", "0", "]", "=", "ast", ".", "Switch", "(", "p", "[", "3", "]", ",", "p", "[", "5", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")" ]
statement : SWITCH LPAREN expr RPAREN switch_case_list
[ "statement", ":", "SWITCH", "LPAREN", "expr", "RPAREN", "switch_case_list" ]
python
train
tehmaze/diagram
diagram.py
https://github.com/tehmaze/diagram/blob/1701526a91c14dc8ebc6452c45c8ec9a563a56db/diagram.py#L147-L155
def csi_wrap(self, value, capname, *args): """Return a value wrapped in the selected CSI and does a reset.""" if isinstance(value, str): value = value.encode('utf-8') return b''.join([ self.csi(capname, *args), value, self.csi('sgr0'), ])
[ "def", "csi_wrap", "(", "self", ",", "value", ",", "capname", ",", "*", "args", ")", ":", "if", "isinstance", "(", "value", ",", "str", ")", ":", "value", "=", "value", ".", "encode", "(", "'utf-8'", ")", "return", "b''", ".", "join", "(", "[", "...
Return a value wrapped in the selected CSI and does a reset.
[ "Return", "a", "value", "wrapped", "in", "the", "selected", "CSI", "and", "does", "a", "reset", "." ]
python
valid
Guake/guake
guake/guake_app.py
https://github.com/Guake/guake/blob/4153ef38f9044cbed6494075fce80acd5809df2b/guake/guake_app.py#L757-L764
def accel_reset_terminal(self, *args): # TODO KEYBINDINGS ONLY """Callback to reset and clean the terminal""" HidePrevention(self.window).prevent() current_term = self.get_notebook().get_current_terminal() current_term.reset(True, True) HidePrevention(self.window).allow() return True
[ "def", "accel_reset_terminal", "(", "self", ",", "*", "args", ")", ":", "# TODO KEYBINDINGS ONLY", "HidePrevention", "(", "self", ".", "window", ")", ".", "prevent", "(", ")", "current_term", "=", "self", ".", "get_notebook", "(", ")", ".", "get_current_termin...
Callback to reset and clean the terminal
[ "Callback", "to", "reset", "and", "clean", "the", "terminal" ]
python
train
twisted/twistedchecker
twistedchecker/checkers/comment.py
https://github.com/twisted/twistedchecker/blob/80060e1c07cf5d67d747dbec8ec0e5ee913e8929/twistedchecker/checkers/comment.py#L26-L66
def visit_module(self, node): """ A interface will be called when visiting a module. @param node: node of current module """ if not node.file_stream: # Failed to open the module return isFirstLineOfComment = True isDocString = False lines = node.stream().readlines() for linenum, line in enumerate(lines): if line.strip().startswith(b'"""'): # This is a simple assumption than docstring are delimited # with triple double quotes on a single line. # Should do the job for Twisted code. isDocString = not isDocString if isDocString: # We ignore comments in docstrings. continue matchedComment = COMMENT_RGX.search(line) if matchedComment: if isFirstLineOfComment: # Check for W9401 comment = matchedComment.group() if (comment.startswith(b"# ") or not comment.startswith(b"# ")): self.add_message('W9401', line=linenum + 1, node=node) # Check for W9402 strippedComment = comment.lstrip(b"#").lstrip() if strippedComment: firstLetter = strippedComment[0:1] if (firstLetter.isalpha() and not firstLetter.isupper()): self.add_message('W9402', line=linenum + 1, node=node) isFirstLineOfComment = False else: isFirstLineOfComment = True
[ "def", "visit_module", "(", "self", ",", "node", ")", ":", "if", "not", "node", ".", "file_stream", ":", "# Failed to open the module", "return", "isFirstLineOfComment", "=", "True", "isDocString", "=", "False", "lines", "=", "node", ".", "stream", "(", ")", ...
A interface will be called when visiting a module. @param node: node of current module
[ "A", "interface", "will", "be", "called", "when", "visiting", "a", "module", "." ]
python
train
Clinical-Genomics/scout
scout/server/blueprints/cases/views.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/server/blueprints/cases/views.py#L34-L39
def index(): """Display a list of all user institutes.""" institute_objs = user_institutes(store, current_user) institutes_count = ((institute_obj, store.cases(collaborator=institute_obj['_id']).count()) for institute_obj in institute_objs if institute_obj) return dict(institutes=institutes_count)
[ "def", "index", "(", ")", ":", "institute_objs", "=", "user_institutes", "(", "store", ",", "current_user", ")", "institutes_count", "=", "(", "(", "institute_obj", ",", "store", ".", "cases", "(", "collaborator", "=", "institute_obj", "[", "'_id'", "]", ")"...
Display a list of all user institutes.
[ "Display", "a", "list", "of", "all", "user", "institutes", "." ]
python
test
lsst-sqre/lander
lander/ltdclient.py
https://github.com/lsst-sqre/lander/blob/5e4f6123e48b451ba21963724ace0dc59798618e/lander/ltdclient.py#L8-L32
def upload(config): """Upload the build documentation site to LSST the Docs. Parameters ---------- config : `lander.config.Configuration` Site configuration, which includes upload information and credentials. """ token = get_keeper_token(config['keeper_url'], config['keeper_user'], config['keeper_password']) build_resource = register_build(config, token) ltdconveyor.upload_dir( build_resource['bucket_name'], build_resource['bucket_root_dir'], config['build_dir'], aws_access_key_id=config['aws_id'], aws_secret_access_key=config['aws_secret'], surrogate_key=build_resource['surrogate_key'], cache_control='max-age=31536000', surrogate_control=None, upload_dir_redirect_objects=True) confirm_build(config, token, build_resource)
[ "def", "upload", "(", "config", ")", ":", "token", "=", "get_keeper_token", "(", "config", "[", "'keeper_url'", "]", ",", "config", "[", "'keeper_user'", "]", ",", "config", "[", "'keeper_password'", "]", ")", "build_resource", "=", "register_build", "(", "c...
Upload the build documentation site to LSST the Docs. Parameters ---------- config : `lander.config.Configuration` Site configuration, which includes upload information and credentials.
[ "Upload", "the", "build", "documentation", "site", "to", "LSST", "the", "Docs", "." ]
python
train
bykof/billomapy
billomapy/billomapy.py
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L2174-L2185
def mark_offer_as_win(self, offer_id): """ Mark offer as win :param offer_id: the offer id :return Response """ return self._create_put_request( resource=OFFERS, billomat_id=offer_id, command=WIN, )
[ "def", "mark_offer_as_win", "(", "self", ",", "offer_id", ")", ":", "return", "self", ".", "_create_put_request", "(", "resource", "=", "OFFERS", ",", "billomat_id", "=", "offer_id", ",", "command", "=", "WIN", ",", ")" ]
Mark offer as win :param offer_id: the offer id :return Response
[ "Mark", "offer", "as", "win" ]
python
train
ActiveState/simplealchemy
simplealchemy.py
https://github.com/ActiveState/simplealchemy/blob/f745847793f57701776a804ec74791a1f6a66947/simplealchemy.py#L154-L165
def create_from(cls, another, **kwargs): """Create from another object of different type. Another object must be from a derived class of SimpleObject (which contains FIELDS) """ reused_fields = {} for field, value in another.get_fields(): if field in cls.FIELDS: reused_fields[field] = value reused_fields.update(kwargs) return cls(**reused_fields)
[ "def", "create_from", "(", "cls", ",", "another", ",", "*", "*", "kwargs", ")", ":", "reused_fields", "=", "{", "}", "for", "field", ",", "value", "in", "another", ".", "get_fields", "(", ")", ":", "if", "field", "in", "cls", ".", "FIELDS", ":", "r...
Create from another object of different type. Another object must be from a derived class of SimpleObject (which contains FIELDS)
[ "Create", "from", "another", "object", "of", "different", "type", "." ]
python
train
joowani/binarytree
binarytree/__init__.py
https://github.com/joowani/binarytree/blob/23cb6f1e60e66b96133259031e97ec03e932ba13/binarytree/__init__.py#L71-L81
def _generate_perfect_bst(height): """Generate a perfect BST (binary search tree) and return its root. :param height: Height of the BST. :type height: int :return: Root node of the BST. :rtype: binarytree.Node """ max_node_count = 2 ** (height + 1) - 1 node_values = list(range(max_node_count)) return _build_bst_from_sorted_values(node_values)
[ "def", "_generate_perfect_bst", "(", "height", ")", ":", "max_node_count", "=", "2", "**", "(", "height", "+", "1", ")", "-", "1", "node_values", "=", "list", "(", "range", "(", "max_node_count", ")", ")", "return", "_build_bst_from_sorted_values", "(", "nod...
Generate a perfect BST (binary search tree) and return its root. :param height: Height of the BST. :type height: int :return: Root node of the BST. :rtype: binarytree.Node
[ "Generate", "a", "perfect", "BST", "(", "binary", "search", "tree", ")", "and", "return", "its", "root", "." ]
python
train
crackinglandia/pype32
pype32/pype32.py
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L449-L456
def _getPaddingToSectionOffset(self): """ Returns the offset to last section header present in the PE file. @rtype: int @return: The offset where the end of the last section header resides in the PE file. """ return len(str(self.dosHeader) + str(self.dosStub) + str(self.ntHeaders) + str(self.sectionHeaders))
[ "def", "_getPaddingToSectionOffset", "(", "self", ")", ":", "return", "len", "(", "str", "(", "self", ".", "dosHeader", ")", "+", "str", "(", "self", ".", "dosStub", ")", "+", "str", "(", "self", ".", "ntHeaders", ")", "+", "str", "(", "self", ".", ...
Returns the offset to last section header present in the PE file. @rtype: int @return: The offset where the end of the last section header resides in the PE file.
[ "Returns", "the", "offset", "to", "last", "section", "header", "present", "in", "the", "PE", "file", "." ]
python
train
kytos/python-openflow
pyof/foundation/basic_types.py
https://github.com/kytos/python-openflow/blob/4f2d0d08ab28e102ed88fe57a4ee17729f1e1bb7/pyof/foundation/basic_types.py#L161-L181
def unpack(self, buff, offset=0): """Unpack a binary message into this object's attributes. Unpack the binary value *buff* and update this object attributes based on the results. Args: buff (bytes): Binary data package to be unpacked. offset (int): Where to begin unpacking. Raises: Exception: If there is a struct unpacking error. """ begin = offset hexas = [] while begin < offset + 8: number = struct.unpack("!B", buff[begin:begin+1])[0] hexas.append("%.2x" % number) begin += 1 self._value = ':'.join(hexas)
[ "def", "unpack", "(", "self", ",", "buff", ",", "offset", "=", "0", ")", ":", "begin", "=", "offset", "hexas", "=", "[", "]", "while", "begin", "<", "offset", "+", "8", ":", "number", "=", "struct", ".", "unpack", "(", "\"!B\"", ",", "buff", "[",...
Unpack a binary message into this object's attributes. Unpack the binary value *buff* and update this object attributes based on the results. Args: buff (bytes): Binary data package to be unpacked. offset (int): Where to begin unpacking. Raises: Exception: If there is a struct unpacking error.
[ "Unpack", "a", "binary", "message", "into", "this", "object", "s", "attributes", "." ]
python
train
topic2k/pygcgen
pygcgen/generator.py
https://github.com/topic2k/pygcgen/blob/c41701815df2c8c3a57fd5f7b8babe702127c8a1/pygcgen/generator.py#L249-L267
def generate_sub_section(self, issues, prefix): """ Generate formated list of issues for changelog. :param list issues: Issues to put in sub-section. :param str prefix: Title of sub-section. :rtype: str :return: Generated ready-to-add sub-section. """ log = "" if issues: if not self.options.simple_list: log += u"{0}\n\n".format(prefix) for issue in issues: merge_string = self.get_string_for_issue(issue) log += u"- {0}\n".format(merge_string) log += "\n" return log
[ "def", "generate_sub_section", "(", "self", ",", "issues", ",", "prefix", ")", ":", "log", "=", "\"\"", "if", "issues", ":", "if", "not", "self", ".", "options", ".", "simple_list", ":", "log", "+=", "u\"{0}\\n\\n\"", ".", "format", "(", "prefix", ")", ...
Generate formated list of issues for changelog. :param list issues: Issues to put in sub-section. :param str prefix: Title of sub-section. :rtype: str :return: Generated ready-to-add sub-section.
[ "Generate", "formated", "list", "of", "issues", "for", "changelog", "." ]
python
valid
Alignak-monitoring/alignak
alignak/objects/schedulingitem.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L3437-L3470
def del_act_dependency(self, son_id, parent_id): # pragma: no cover, not yet tested """Remove act_dependency between two hosts or services. TODO: do we really intend to remove dynamically ? :param son_id: uuid of son host/service :type son_id: str :param parent_id: uuid of parent host/service :type parent_id: str :return: None """ son = self[son_id] parent = self[parent_id] to_del = [] # First we remove in my list for (host, status, timeperiod, inherits_parent) in son.act_depend_of: if host == parent_id: to_del.append((host, status, timeperiod, inherits_parent)) for tup in to_del: son.act_depend_of.remove(tup) # And now in the father part to_del = [] for (host, status, timeperiod, inherits_parent) in parent.act_depend_of_me: if host == son_id: to_del.append((host, status, timeperiod, inherits_parent)) for tup in to_del: parent.act_depend_of_me.remove(tup) # Remove in child/parents dependencies too # Me in father list parent.child_dependencies.remove(son_id) # and father list in mine son.parent_dependencies.remove(parent_id)
[ "def", "del_act_dependency", "(", "self", ",", "son_id", ",", "parent_id", ")", ":", "# pragma: no cover, not yet tested", "son", "=", "self", "[", "son_id", "]", "parent", "=", "self", "[", "parent_id", "]", "to_del", "=", "[", "]", "# First we remove in my lis...
Remove act_dependency between two hosts or services. TODO: do we really intend to remove dynamically ? :param son_id: uuid of son host/service :type son_id: str :param parent_id: uuid of parent host/service :type parent_id: str :return: None
[ "Remove", "act_dependency", "between", "two", "hosts", "or", "services", "." ]
python
train
dmlc/xgboost
python-package/xgboost/sklearn.py
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/sklearn.py#L217-L226
def get_params(self, deep=False): """Get parameters.""" params = super(XGBModel, self).get_params(deep=deep) if isinstance(self.kwargs, dict): # if kwargs is a dict, update params accordingly params.update(self.kwargs) if params['missing'] is np.nan: params['missing'] = None # sklearn doesn't handle nan. see #4725 if not params.get('eval_metric', True): del params['eval_metric'] # don't give as None param to Booster return params
[ "def", "get_params", "(", "self", ",", "deep", "=", "False", ")", ":", "params", "=", "super", "(", "XGBModel", ",", "self", ")", ".", "get_params", "(", "deep", "=", "deep", ")", "if", "isinstance", "(", "self", ".", "kwargs", ",", "dict", ")", ":...
Get parameters.
[ "Get", "parameters", "." ]
python
train
clalancette/pycdlib
pycdlib/udf.py
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/udf.py#L877-L892
def parse(self, data): # type: (bytes) -> None ''' Parse the passed in data into a UDF Entity ID. Parameters: data - The data to parse. Returns: Nothing. ''' if self._initialized: raise pycdlibexception.PyCdlibInternalError('UDF Entity ID already initialized') (self.flags, self.identifier, self.suffix) = struct.unpack_from(self.FMT, data, 0) self._initialized = True
[ "def", "parse", "(", "self", ",", "data", ")", ":", "# type: (bytes) -> None", "if", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInternalError", "(", "'UDF Entity ID already initialized'", ")", "(", "self", ".", "flags", ",", "self"...
Parse the passed in data into a UDF Entity ID. Parameters: data - The data to parse. Returns: Nothing.
[ "Parse", "the", "passed", "in", "data", "into", "a", "UDF", "Entity", "ID", "." ]
python
train
panzarino/mlbgame
mlbgame/data.py
https://github.com/panzarino/mlbgame/blob/0a2d10540de793fdc3b8476aa18f5cf3b53d0b54/mlbgame/data.py#L143-L151
def get_standings(date): """Return the standings file for current standings (given current date).""" try: return urlopen(STANDINGS_URL.format(date.year, date.strftime('%Y/%m/%d'))) except HTTPError: ValueError('Could not find the standings file. ' 'mlb.com does not provide the file that ' 'mlbgame needs to perform this operation.')
[ "def", "get_standings", "(", "date", ")", ":", "try", ":", "return", "urlopen", "(", "STANDINGS_URL", ".", "format", "(", "date", ".", "year", ",", "date", ".", "strftime", "(", "'%Y/%m/%d'", ")", ")", ")", "except", "HTTPError", ":", "ValueError", "(", ...
Return the standings file for current standings (given current date).
[ "Return", "the", "standings", "file", "for", "current", "standings", "(", "given", "current", "date", ")", "." ]
python
train
lowandrew/OLCTools
spadespipeline/GeneSeekr.py
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/GeneSeekr.py#L173-L190
def blastnthreads(self): """Setup and create threads for blastn and xml path""" # Create the threads for the BLAST analysis for i in range(self.cpus): threads = Thread(target=self.runblast, args=()) threads.setDaemon(True) threads.start() # Populate threads for each gene, genome combination for sample in self.metadata: make_path(sample[self.analysistype].reportdir) sample[self.analysistype].report = os.path.join( sample[self.analysistype].reportdir, '{}.csv'.format(sample.name)) if sample[self.analysistype].combinedtargets != 'NA': # Add each fasta file combination to the threads self.blastqueue.put((sample.general.bestassemblyfile, sample[self.analysistype].combinedtargets, sample)) # Join the threads self.blastqueue.join()
[ "def", "blastnthreads", "(", "self", ")", ":", "# Create the threads for the BLAST analysis", "for", "i", "in", "range", "(", "self", ".", "cpus", ")", ":", "threads", "=", "Thread", "(", "target", "=", "self", ".", "runblast", ",", "args", "=", "(", ")", ...
Setup and create threads for blastn and xml path
[ "Setup", "and", "create", "threads", "for", "blastn", "and", "xml", "path" ]
python
train
iamsteadman/bambu-mail
bambu_mail/views.py
https://github.com/iamsteadman/bambu-mail/blob/5298e6ab861cabc8859c8356ccb2354b1b902cd1/bambu_mail/views.py#L7-L36
def subscribe(request): """ Takes POST data (``email`` and optional ``next`` fields), submitting the ``email`` field to the newsletter provider for subscription to a mailing list, and redirecting the user to the value of ``next`` (this can also be provided in the querystring), or the homepage if no follow-on URL is supplied, with a message in the ``django.contrib.messages`` queue to let them know it was successful. If the email address is invalid or the subscription process was unsuccessful, the user is redirected to the follow-on URL and a message placed in the ``django.contrib.messages`` queue letting them know what the issue was. """ email = request.POST.get('email') next = request.POST.get('next', request.GET.get('next', '/')) valid = False if not email: messages.error(request, u'Please enter your email address') else: try: validate_email(email) valid = True except ValidationError: messages.error(request, u'Please enter a valid email address') if valid: shortcuts.subscribe(email, list_id = 'newsletter') messages.success(request, u'Thanks for subscribing to our newsletter.') return HttpResponseRedirect(next)
[ "def", "subscribe", "(", "request", ")", ":", "email", "=", "request", ".", "POST", ".", "get", "(", "'email'", ")", "next", "=", "request", ".", "POST", ".", "get", "(", "'next'", ",", "request", ".", "GET", ".", "get", "(", "'next'", ",", "'/'", ...
Takes POST data (``email`` and optional ``next`` fields), submitting the ``email`` field to the newsletter provider for subscription to a mailing list, and redirecting the user to the value of ``next`` (this can also be provided in the querystring), or the homepage if no follow-on URL is supplied, with a message in the ``django.contrib.messages`` queue to let them know it was successful. If the email address is invalid or the subscription process was unsuccessful, the user is redirected to the follow-on URL and a message placed in the ``django.contrib.messages`` queue letting them know what the issue was.
[ "Takes", "POST", "data", "(", "email", "and", "optional", "next", "fields", ")", "submitting", "the", "email", "field", "to", "the", "newsletter", "provider", "for", "subscription", "to", "a", "mailing", "list", "and", "redirecting", "the", "user", "to", "th...
python
train
bolt-project/bolt
bolt/spark/array.py
https://github.com/bolt-project/bolt/blob/9cd7104aa085498da3097b72696184b9d3651c51/bolt/spark/array.py#L861-L877
def _reshapebasic(self, shape): """ Check if the requested reshape can be broken into independant reshapes on the keys and values. If it can, returns the index in the new shape separating keys from values, otherwise returns -1 """ new = tupleize(shape) old_key_size = prod(self.keys.shape) old_value_size = prod(self.values.shape) for i in range(len(new)): new_key_size = prod(new[:i]) new_value_size = prod(new[i:]) if new_key_size == old_key_size and new_value_size == old_value_size: return i return -1
[ "def", "_reshapebasic", "(", "self", ",", "shape", ")", ":", "new", "=", "tupleize", "(", "shape", ")", "old_key_size", "=", "prod", "(", "self", ".", "keys", ".", "shape", ")", "old_value_size", "=", "prod", "(", "self", ".", "values", ".", "shape", ...
Check if the requested reshape can be broken into independant reshapes on the keys and values. If it can, returns the index in the new shape separating keys from values, otherwise returns -1
[ "Check", "if", "the", "requested", "reshape", "can", "be", "broken", "into", "independant", "reshapes", "on", "the", "keys", "and", "values", ".", "If", "it", "can", "returns", "the", "index", "in", "the", "new", "shape", "separating", "keys", "from", "val...
python
test
CygnusNetworks/pypureomapi
pypureomapi.py
https://github.com/CygnusNetworks/pypureomapi/blob/ff4459678ec023fd56e64ce518a86860efec26bf/pypureomapi.py#L1332-L1352
def del_host(self, mac): """Delete a host object with with given mac address. @type mac: str @raises ValueError: @raises OmapiError: @raises OmapiErrorNotFound: if no lease object with the given mac address could be found @raises socket.error: """ msg = OmapiMessage.open(b"host") msg.obj.append((b"hardware-address", pack_mac(mac))) msg.obj.append((b"hardware-type", struct.pack("!I", 1))) response = self.query_server(msg) if response.opcode != OMAPI_OP_UPDATE: raise OmapiErrorNotFound() if response.handle == 0: raise OmapiError("received invalid handle from server") response = self.query_server(OmapiMessage.delete(response.handle)) if response.opcode != OMAPI_OP_STATUS: raise OmapiError("delete failed")
[ "def", "del_host", "(", "self", ",", "mac", ")", ":", "msg", "=", "OmapiMessage", ".", "open", "(", "b\"host\"", ")", "msg", ".", "obj", ".", "append", "(", "(", "b\"hardware-address\"", ",", "pack_mac", "(", "mac", ")", ")", ")", "msg", ".", "obj", ...
Delete a host object with with given mac address. @type mac: str @raises ValueError: @raises OmapiError: @raises OmapiErrorNotFound: if no lease object with the given mac address could be found @raises socket.error:
[ "Delete", "a", "host", "object", "with", "with", "given", "mac", "address", "." ]
python
train
pmacosta/peng
peng/functions.py
https://github.com/pmacosta/peng/blob/976935377adaa3de26fc5677aceb2cdfbd6f93a7/peng/functions.py#L764-L801
def round_mantissa(arg, decimals=0): """ Round floating point number(s) mantissa to given number of digits. Integers are not altered. The mantissa used is that of the floating point number(s) when expressed in `normalized scientific notation <https://en.wikipedia.org/wiki/Scientific_notation#Normalized_notation>`_ :param arg: Input data :type arg: integer, float, Numpy vector of integers or floats, or None :param decimals: Number of digits to round the fractional part of the mantissa to. :type decimals: integer :rtype: same as **arg** For example:: >>> import peng >>> peng.round_mantissa(012345678E-6, 3) 12.35 >>> peng.round_mantissa(5, 3) 5 """ if arg is None: return arg if isinstance(arg, np.ndarray): foi = [isinstance(item, int) for item in arg] return np.array( [ item if isint else float(to_scientific_string(item, decimals)) for isint, item in zip(foi, arg) ] ) if isinstance(arg, int): return arg return float(to_scientific_string(arg, decimals))
[ "def", "round_mantissa", "(", "arg", ",", "decimals", "=", "0", ")", ":", "if", "arg", "is", "None", ":", "return", "arg", "if", "isinstance", "(", "arg", ",", "np", ".", "ndarray", ")", ":", "foi", "=", "[", "isinstance", "(", "item", ",", "int", ...
Round floating point number(s) mantissa to given number of digits. Integers are not altered. The mantissa used is that of the floating point number(s) when expressed in `normalized scientific notation <https://en.wikipedia.org/wiki/Scientific_notation#Normalized_notation>`_ :param arg: Input data :type arg: integer, float, Numpy vector of integers or floats, or None :param decimals: Number of digits to round the fractional part of the mantissa to. :type decimals: integer :rtype: same as **arg** For example:: >>> import peng >>> peng.round_mantissa(012345678E-6, 3) 12.35 >>> peng.round_mantissa(5, 3) 5
[ "Round", "floating", "point", "number", "(", "s", ")", "mantissa", "to", "given", "number", "of", "digits", "." ]
python
test
wmayner/pyphi
pyphi/examples.py
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/examples.py#L499-L519
def macro_network(): """A network of micro elements which has greater integrated information after coarse graining to a macro scale. """ tpm = np.array([[0.3, 0.3, 0.3, 0.3], [0.3, 0.3, 0.3, 0.3], [0.3, 0.3, 0.3, 0.3], [0.3, 0.3, 1.0, 1.0], [0.3, 0.3, 0.3, 0.3], [0.3, 0.3, 0.3, 0.3], [0.3, 0.3, 0.3, 0.3], [0.3, 0.3, 1.0, 1.0], [0.3, 0.3, 0.3, 0.3], [0.3, 0.3, 0.3, 0.3], [0.3, 0.3, 0.3, 0.3], [0.3, 0.3, 1.0, 1.0], [1.0, 1.0, 0.3, 0.3], [1.0, 1.0, 0.3, 0.3], [1.0, 1.0, 0.3, 0.3], [1.0, 1.0, 1.0, 1.0]]) return Network(tpm, node_labels=LABELS[:tpm.shape[1]])
[ "def", "macro_network", "(", ")", ":", "tpm", "=", "np", ".", "array", "(", "[", "[", "0.3", ",", "0.3", ",", "0.3", ",", "0.3", "]", ",", "[", "0.3", ",", "0.3", ",", "0.3", ",", "0.3", "]", ",", "[", "0.3", ",", "0.3", ",", "0.3", ",", ...
A network of micro elements which has greater integrated information after coarse graining to a macro scale.
[ "A", "network", "of", "micro", "elements", "which", "has", "greater", "integrated", "information", "after", "coarse", "graining", "to", "a", "macro", "scale", "." ]
python
train
IceflowRE/unidown
unidown/plugin/link_item.py
https://github.com/IceflowRE/unidown/blob/2a6f82ab780bb825668bfc55b67c11c4f72ec05c/unidown/plugin/link_item.py#L37-L49
def from_protobuf(cls, proto: LinkItemProto) -> LinkItem: """ Constructor from protobuf. :param proto: protobuf structure :type proto: ~unidown.plugin.protobuf.link_item_pb2.LinkItemProto :return: the LinkItem :rtype: ~unidown.plugin.link_item.LinkItem :raises ValueError: name of LinkItem does not exist inside the protobuf or is empty """ if proto.name == '': raise ValueError("name of LinkItem does not exist or is empty inside the protobuf.") return cls(proto.name, Timestamp.ToDatetime(proto.time))
[ "def", "from_protobuf", "(", "cls", ",", "proto", ":", "LinkItemProto", ")", "->", "LinkItem", ":", "if", "proto", ".", "name", "==", "''", ":", "raise", "ValueError", "(", "\"name of LinkItem does not exist or is empty inside the protobuf.\"", ")", "return", "cls",...
Constructor from protobuf. :param proto: protobuf structure :type proto: ~unidown.plugin.protobuf.link_item_pb2.LinkItemProto :return: the LinkItem :rtype: ~unidown.plugin.link_item.LinkItem :raises ValueError: name of LinkItem does not exist inside the protobuf or is empty
[ "Constructor", "from", "protobuf", "." ]
python
train
rigetti/rpcq
rpcq/_spec.py
https://github.com/rigetti/rpcq/blob/9091e3541c4419d7ab882bb32a8b86aa85cedb6f/rpcq/_spec.py#L86-L100
def get_handler(self, request): """ Get callable from JSON RPC request :param RPCRequest request: JSON RPC request :return: Method :rtype: callable """ try: f = self._json_rpc_methods[request.method] except (AttributeError, KeyError): # pragma no coverage raise RPCMethodError("Received invalid method '{}'".format(request.method)) return f
[ "def", "get_handler", "(", "self", ",", "request", ")", ":", "try", ":", "f", "=", "self", ".", "_json_rpc_methods", "[", "request", ".", "method", "]", "except", "(", "AttributeError", ",", "KeyError", ")", ":", "# pragma no coverage", "raise", "RPCMethodEr...
Get callable from JSON RPC request :param RPCRequest request: JSON RPC request :return: Method :rtype: callable
[ "Get", "callable", "from", "JSON", "RPC", "request" ]
python
train
neherlab/treetime
treetime/treeanc.py
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/treeanc.py#L1641-L1724
def optimize_branch_length(self, mode='joint', **kwargs): """ Perform optimization for the branch lengths of the entire tree. This method only does a single path and needs to be iterated. **Note** this method assumes that each node stores information about its sequence as numpy.array object (node.sequence attribute). Therefore, before calling this method, sequence reconstruction with either of the available models must be performed. Parameters ---------- mode : str Optimize branch length assuming the joint ML sequence assignment of both ends of the branch (:code:`joint`), or trace over all possible sequence assignments on both ends of the branch (:code:`marginal`) (slower, experimental). **kwargs : Keyword arguments Keyword Args ------------ verbose : int Output level store_old : bool If True, the old lengths will be saved in :code:`node._old_dist` attribute. Useful for testing, and special post-processing. """ self.logger("TreeAnc.optimize_branch_length: running branch length optimization in mode %s..."%mode,1) if (self.tree is None) or (self.aln is None): self.logger("TreeAnc.optimize_branch_length: ERROR, alignment or tree are missing", 0) return ttconf.ERROR store_old_dist = False if 'store_old' in kwargs: store_old_dist = kwargs['store_old'] if mode=='marginal': # a marginal ancestral reconstruction is required for # marginal branch length inference if not hasattr(self.tree.root, "marginal_profile"): self.infer_ancestral_sequences(marginal=True) max_bl = 0 for node in self.tree.find_clades(order='postorder'): if node.up is None: continue # this is the root if store_old_dist: node._old_length = node.branch_length if mode=='marginal': new_len = self.optimal_marginal_branch_length(node) elif mode=='joint': new_len = self.optimal_branch_length(node) else: self.logger("treeanc.optimize_branch_length: unsupported optimization mode",4, warn=True) new_len = node.branch_length if new_len < 0: continue self.logger("Optimization results: old_len=%.4e, new_len=%.4e, naive=%.4e" " Updating branch length..."%(node.branch_length, new_len, len(node.mutations)*self.one_mutation), 5) node.branch_length = new_len node.mutation_length=new_len max_bl = max(max_bl, new_len) # as branch lengths changed, the params must be fixed self.tree.root.up = None self.tree.root.dist2root = 0.0 if max_bl>0.15 and mode=='joint': self.logger("TreeAnc.optimize_branch_length: THIS TREE HAS LONG BRANCHES." " \n\t ****TreeTime IS NOT DESIGNED TO OPTIMIZE LONG BRANCHES." " \n\t ****PLEASE OPTIMIZE BRANCHES WITH ANOTHER TOOL AND RERUN WITH" " \n\t ****branch_length_mode='input'", 0, warn=True) self._prepare_nodes() return ttconf.SUCCESS
[ "def", "optimize_branch_length", "(", "self", ",", "mode", "=", "'joint'", ",", "*", "*", "kwargs", ")", ":", "self", ".", "logger", "(", "\"TreeAnc.optimize_branch_length: running branch length optimization in mode %s...\"", "%", "mode", ",", "1", ")", "if", "(", ...
Perform optimization for the branch lengths of the entire tree. This method only does a single path and needs to be iterated. **Note** this method assumes that each node stores information about its sequence as numpy.array object (node.sequence attribute). Therefore, before calling this method, sequence reconstruction with either of the available models must be performed. Parameters ---------- mode : str Optimize branch length assuming the joint ML sequence assignment of both ends of the branch (:code:`joint`), or trace over all possible sequence assignments on both ends of the branch (:code:`marginal`) (slower, experimental). **kwargs : Keyword arguments Keyword Args ------------ verbose : int Output level store_old : bool If True, the old lengths will be saved in :code:`node._old_dist` attribute. Useful for testing, and special post-processing.
[ "Perform", "optimization", "for", "the", "branch", "lengths", "of", "the", "entire", "tree", ".", "This", "method", "only", "does", "a", "single", "path", "and", "needs", "to", "be", "iterated", "." ]
python
test
TadLeonard/tfatool
tfatool/util.py
https://github.com/TadLeonard/tfatool/blob/12da2807b5fb538c5317ef255d846b32ceb174d0/tfatool/util.py#L7-L17
def parse_datetime(datetime_input): """The arrow library is sadly not good enough to parse certain date strings. It even gives unexpected results for partial date strings such as '2015-01' or just '2015', which I think should be seen as 'the first moment of 2014'. This function should overcome those limitations.""" date_els, time_els = _split_datetime(datetime_input) date_vals = _parse_date(date_els) time_vals = _parse_time(time_els) vals = tuple(date_vals) + tuple(time_vals) return arrow.get(*vals)
[ "def", "parse_datetime", "(", "datetime_input", ")", ":", "date_els", ",", "time_els", "=", "_split_datetime", "(", "datetime_input", ")", "date_vals", "=", "_parse_date", "(", "date_els", ")", "time_vals", "=", "_parse_time", "(", "time_els", ")", "vals", "=", ...
The arrow library is sadly not good enough to parse certain date strings. It even gives unexpected results for partial date strings such as '2015-01' or just '2015', which I think should be seen as 'the first moment of 2014'. This function should overcome those limitations.
[ "The", "arrow", "library", "is", "sadly", "not", "good", "enough", "to", "parse", "certain", "date", "strings", ".", "It", "even", "gives", "unexpected", "results", "for", "partial", "date", "strings", "such", "as", "2015", "-", "01", "or", "just", "2015",...
python
train
openstack/networking-cisco
networking_cisco/apps/saf/server/services/firewall/native/fw_mgr.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/fw_mgr.py#L673-L676
def fw_rule_delete(self, data, fw_name=None): """Top level rule delete function. """ LOG.debug("FW Rule delete %s", data) self._fw_rule_delete(fw_name, data)
[ "def", "fw_rule_delete", "(", "self", ",", "data", ",", "fw_name", "=", "None", ")", ":", "LOG", ".", "debug", "(", "\"FW Rule delete %s\"", ",", "data", ")", "self", ".", "_fw_rule_delete", "(", "fw_name", ",", "data", ")" ]
Top level rule delete function.
[ "Top", "level", "rule", "delete", "function", "." ]
python
train
rhayes777/PyAutoFit
autofit/tools/fit.py
https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/tools/fit.py#L59-L63
def signal_to_noise_map(self): """The signal-to-noise_map of the data and noise-map which are fitted.""" signal_to_noise_map = np.divide(self.data, self.noise_map) signal_to_noise_map[signal_to_noise_map < 0] = 0 return signal_to_noise_map
[ "def", "signal_to_noise_map", "(", "self", ")", ":", "signal_to_noise_map", "=", "np", ".", "divide", "(", "self", ".", "data", ",", "self", ".", "noise_map", ")", "signal_to_noise_map", "[", "signal_to_noise_map", "<", "0", "]", "=", "0", "return", "signal_...
The signal-to-noise_map of the data and noise-map which are fitted.
[ "The", "signal", "-", "to", "-", "noise_map", "of", "the", "data", "and", "noise", "-", "map", "which", "are", "fitted", "." ]
python
train
Tinche/django-bower-cache
registry/gitwrapper.py
https://github.com/Tinche/django-bower-cache/blob/5245b2ee80c33c09d85ce0bf8f047825d9df2118/registry/gitwrapper.py#L45-L54
def clone_from(repo_url, repo_dir): """Clone a remote git repo into a local directory.""" repo_url = _fix_repo_url(repo_url) LOG.info("Cloning %s into %s." % (repo_url, repo_dir)) cmd = GIT_CLONE_CMD.format(repo_url, repo_dir) resp = envoy.run(cmd) if resp.status_code != 0: LOG.error("Cloned failed: %s" % resp.std_err) raise GitException(resp.std_err) LOG.info("Clone successful.")
[ "def", "clone_from", "(", "repo_url", ",", "repo_dir", ")", ":", "repo_url", "=", "_fix_repo_url", "(", "repo_url", ")", "LOG", ".", "info", "(", "\"Cloning %s into %s.\"", "%", "(", "repo_url", ",", "repo_dir", ")", ")", "cmd", "=", "GIT_CLONE_CMD", ".", ...
Clone a remote git repo into a local directory.
[ "Clone", "a", "remote", "git", "repo", "into", "a", "local", "directory", "." ]
python
train
grabbles/grabbit
grabbit/core.py
https://github.com/grabbles/grabbit/blob/83ff93df36019eaaee9d4e31f816a518e46cae07/grabbit/core.py#L699-L791
def get(self, return_type='tuple', target=None, extensions=None, domains=None, regex_search=None, **kwargs): """ Retrieve files and/or metadata from the current Layout. Args: return_type (str): Type of result to return. Valid values: 'tuple': returns a list of namedtuples containing file name as well as attribute/value pairs for all named entities. 'file': returns a list of matching filenames. 'dir': returns a list of directories. 'id': returns a list of unique IDs. Must be used together with a valid target. 'obj': returns a list of matching File objects. target (str): The name of the target entity to get results for (if return_type is 'dir' or 'id'). extensions (str, list): One or more file extensions to filter on. Files with any other extensions will be excluded. domains (list): Optional list of domain names to scan for files. If None, all available domains are scanned. regex_search (bool or None): Whether to require exact matching (False) or regex search (True) when comparing the query string to each entity. If None (default), uses the value found in self. kwargs (dict): Any optional key/values to filter the entities on. Keys are entity names, values are regexes to filter on. For example, passing filter={ 'subject': 'sub-[12]'} would return only files that match the first two subjects. Returns: A named tuple (default) or a list (see return_type for details). """ if regex_search is None: regex_search = self.regex_search result = [] filters = {} filters.update(kwargs) for filename, file in self.files.items(): if not file._matches(filters, extensions, domains, regex_search): continue result.append(file) # Convert to relative paths if needed if not self.absolute_paths: for i, f in enumerate(result): f = copy(f) f.path = relpath(f.path, self.root) result[i] = f if return_type == 'file': return natural_sort([f.path for f in result]) if return_type == 'tuple': result = [r.as_named_tuple() for r in result] return natural_sort(result, field='filename') if return_type.startswith('obj'): return result else: valid_entities = self.get_domain_entities(domains) if target is None: raise ValueError('If return_type is "id" or "dir", a valid ' 'target entity must also be specified.') result = [x for x in result if target in x.entities] if return_type == 'id': result = list(set([x.entities[target] for x in result])) return natural_sort(result) elif return_type == 'dir': template = valid_entities[target].directory if template is None: raise ValueError('Return type set to directory, but no ' 'directory template is defined for the ' 'target entity (\"%s\").' % target) # Construct regex search pattern from target directory template to_rep = re.findall('\{(.*?)\}', template) for ent in to_rep: patt = valid_entities[ent].pattern template = template.replace('{%s}' % ent, patt) template += '[^\%s]*$' % os.path.sep matches = [f.dirname for f in result if re.search(template, f.dirname)] return natural_sort(list(set(matches))) else: raise ValueError("Invalid return_type specified (must be one " "of 'tuple', 'file', 'id', or 'dir'.")
[ "def", "get", "(", "self", ",", "return_type", "=", "'tuple'", ",", "target", "=", "None", ",", "extensions", "=", "None", ",", "domains", "=", "None", ",", "regex_search", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "regex_search", "is", "...
Retrieve files and/or metadata from the current Layout. Args: return_type (str): Type of result to return. Valid values: 'tuple': returns a list of namedtuples containing file name as well as attribute/value pairs for all named entities. 'file': returns a list of matching filenames. 'dir': returns a list of directories. 'id': returns a list of unique IDs. Must be used together with a valid target. 'obj': returns a list of matching File objects. target (str): The name of the target entity to get results for (if return_type is 'dir' or 'id'). extensions (str, list): One or more file extensions to filter on. Files with any other extensions will be excluded. domains (list): Optional list of domain names to scan for files. If None, all available domains are scanned. regex_search (bool or None): Whether to require exact matching (False) or regex search (True) when comparing the query string to each entity. If None (default), uses the value found in self. kwargs (dict): Any optional key/values to filter the entities on. Keys are entity names, values are regexes to filter on. For example, passing filter={ 'subject': 'sub-[12]'} would return only files that match the first two subjects. Returns: A named tuple (default) or a list (see return_type for details).
[ "Retrieve", "files", "and", "/", "or", "metadata", "from", "the", "current", "Layout", "." ]
python
train
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_vswitch.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_vswitch.py#L175-L186
def get_vnetwork_hosts_output_instance_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_vnetwork_hosts = ET.Element("get_vnetwork_hosts") config = get_vnetwork_hosts output = ET.SubElement(get_vnetwork_hosts, "output") instance_id = ET.SubElement(output, "instance-id") instance_id.text = kwargs.pop('instance_id') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_vnetwork_hosts_output_instance_id", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_vnetwork_hosts", "=", "ET", ".", "Element", "(", "\"get_vnetwork_hosts\"", ")", "config", "=", "ge...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
wandb/client
wandb/__init__.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/__init__.py#L498-L507
def reset_env(exclude=[]): """Remove environment variables, used in Jupyter notebooks""" if os.getenv(env.INITED): wandb_keys = [key for key in os.environ.keys() if key.startswith( 'WANDB_') and key not in exclude] for key in wandb_keys: del os.environ[key] return True else: return False
[ "def", "reset_env", "(", "exclude", "=", "[", "]", ")", ":", "if", "os", ".", "getenv", "(", "env", ".", "INITED", ")", ":", "wandb_keys", "=", "[", "key", "for", "key", "in", "os", ".", "environ", ".", "keys", "(", ")", "if", "key", ".", "star...
Remove environment variables, used in Jupyter notebooks
[ "Remove", "environment", "variables", "used", "in", "Jupyter", "notebooks" ]
python
train
pybel/pybel
src/pybel/manager/cache_manager.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/manager/cache_manager.py#L519-L526
def get_graphs_by_ids(self, network_ids: Iterable[int]) -> List[BELGraph]: """Get a list of networks with the given identifiers and converts to BEL graphs.""" rv = [ self.get_graph_by_id(network_id) for network_id in network_ids ] log.debug('returning graphs for network identifiers: %s', network_ids) return rv
[ "def", "get_graphs_by_ids", "(", "self", ",", "network_ids", ":", "Iterable", "[", "int", "]", ")", "->", "List", "[", "BELGraph", "]", ":", "rv", "=", "[", "self", ".", "get_graph_by_id", "(", "network_id", ")", "for", "network_id", "in", "network_ids", ...
Get a list of networks with the given identifiers and converts to BEL graphs.
[ "Get", "a", "list", "of", "networks", "with", "the", "given", "identifiers", "and", "converts", "to", "BEL", "graphs", "." ]
python
train
litl/rauth
rauth/service.py
https://github.com/litl/rauth/blob/a6d887d7737cf21ec896a8104f25c2754c694011/rauth/service.py#L521-L543
def get_access_token(self, method='POST', decoder=parse_utf8_qsl, key='access_token', **kwargs): ''' Returns an access token. :param method: A string representation of the HTTP method to be used, defaults to `POST`. :type method: str :param decoder: A function used to parse the Response content. Should return a dictionary. :type decoder: func :param key: The key the access token will be decoded by, defaults to 'access_token'. :type string: :param \*\*kwargs: Optional arguments. Same as Requests. :type \*\*kwargs: dict ''' r = self.get_raw_access_token(method, **kwargs) access_token, = process_token_request(r, decoder, key) return access_token
[ "def", "get_access_token", "(", "self", ",", "method", "=", "'POST'", ",", "decoder", "=", "parse_utf8_qsl", ",", "key", "=", "'access_token'", ",", "*", "*", "kwargs", ")", ":", "r", "=", "self", ".", "get_raw_access_token", "(", "method", ",", "*", "*"...
Returns an access token. :param method: A string representation of the HTTP method to be used, defaults to `POST`. :type method: str :param decoder: A function used to parse the Response content. Should return a dictionary. :type decoder: func :param key: The key the access token will be decoded by, defaults to 'access_token'. :type string: :param \*\*kwargs: Optional arguments. Same as Requests. :type \*\*kwargs: dict
[ "Returns", "an", "access", "token", "." ]
python
train
pneff/wsgiservice
wsgiservice/resource.py
https://github.com/pneff/wsgiservice/blob/03c064ac2e8c53a1aac9c7b99970f23cf79e20f4/wsgiservice/resource.py#L157-L175
def data(self): """Returns the request data as a dictionary. Merges the path parameters, GET parameters and POST parameters (form-encoded or JSON dictionary). If a key is present in multiple of these, the first one defined is used. """ if self._data: return self._data retval = {} data = self.get_request_data() for subdata in data: for key, value in subdata.iteritems(): if not key in retval: retval[key] = value self._data = retval return retval
[ "def", "data", "(", "self", ")", ":", "if", "self", ".", "_data", ":", "return", "self", ".", "_data", "retval", "=", "{", "}", "data", "=", "self", ".", "get_request_data", "(", ")", "for", "subdata", "in", "data", ":", "for", "key", ",", "value",...
Returns the request data as a dictionary. Merges the path parameters, GET parameters and POST parameters (form-encoded or JSON dictionary). If a key is present in multiple of these, the first one defined is used.
[ "Returns", "the", "request", "data", "as", "a", "dictionary", "." ]
python
train
swift-nav/libsbp
generator/sbpg/targets/latex.py
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/targets/latex.py#L168-L253
def handle_fields(definitions, fields, prefix, offset, multiplier): """ Helper for handling naming and sizing of fields. It's terrible. """ items = [] for f in fields: if f.type_id == "array" and f.options['fill'].value in CONSTRUCT_CODE: prefix_name = '.'.join([prefix, f.identifier]) if prefix else f.identifier n_with_values = f.options['n_with_values'].value bitfields = f.options['fields'].value if n_with_values > 0 else None if 'size' in f.options: name = "%s[%s]" % (f.options['fill'].value, str(f.options['size'].value)) size = field_sizes[f.options['fill'].value] * f.options['size'].value item = FieldItem(prefix_name, name, offset, size, str(f.units), f.desc, n_with_values, bitfields) items.append(item) offset += size else: name = "%s[%s]" % (f.options['fill'].value, "N") multiplier = field_sizes[f.options['fill'].value] size = field_sizes[f.options['fill'].value] * 1 item = FieldItem(prefix_name, name, offset, "N", str(f.units), f.desc, n_with_values, bitfields) items.append(item) offset += size elif f.type_id == "string": prefix_name = '.'.join([prefix, f.identifier]) if prefix else f.identifier n_with_values = f.options['n_with_values'].value bitfields = f.options['fields'].value if n_with_values > 0 else None if 'size' in f.options: name = "string" size = field_sizes['u8'] * f.options['size'].value item = FieldItem(prefix_name, name, offset, size, str(f.units), f.desc, n_with_values, bitfields) items.append(item) offset += size else: name = "string" size = field_sizes['u8'] multiplier = 1 item = FieldItem(prefix_name, name, offset, "N", str(f.units), f.desc, n_with_values, bitfields) items.append(item) offset += size elif f.type_id == "array": name = f.options['fill'].value definition = next(d for d in definitions if name == d.identifier) prefix_name = '.'.join([prefix, f.identifier]) if prefix else f.identifier (new_items, new_offset, new_multiplier) \ = handle_fields(definitions, definition.fields, prefix_name + "[N]", offset, multiplier) multiplier = new_offset - offset (newer_items, newer_offset, newer_multiplier) \ = handle_fields(definitions, definition.fields, prefix_name + "[N]", offset, multiplier) items += newer_items offset = newer_offset elif f.type_id not in CONSTRUCT_CODE: name = f.type_id definition = next(d for d in definitions if name == d.identifier) prefix_name = '.'.join([prefix, f.identifier]) if prefix else f.identifier (new_items, new_offset, new_multiplier) \ = handle_fields(definitions, definition.fields, prefix_name, offset, multiplier) items += new_items offset = new_offset multiplier = new_multiplier else: size = field_sizes[f.type_id] name = f.type_id adj_offset = "%dN+%d" % (multiplier, offset) if multiplier else offset prefix_name = '.'.join([prefix, f.identifier]) if prefix else f.identifier n_with_values = f.options['n_with_values'].value bitfields = f.options['fields'].value if n_with_values > 0 else None item = FieldItem(prefix_name, name, adj_offset, size, str(f.units), f.desc, n_with_values, bitfields) items.append(item) offset += size return (items, offset, multiplier)
[ "def", "handle_fields", "(", "definitions", ",", "fields", ",", "prefix", ",", "offset", ",", "multiplier", ")", ":", "items", "=", "[", "]", "for", "f", "in", "fields", ":", "if", "f", ".", "type_id", "==", "\"array\"", "and", "f", ".", "options", "...
Helper for handling naming and sizing of fields. It's terrible.
[ "Helper", "for", "handling", "naming", "and", "sizing", "of", "fields", ".", "It", "s", "terrible", "." ]
python
train
biolink/ontobio
bin/qbiogolr.py
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/bin/qbiogolr.py#L175-L186
def show_graph(g, nodes, query_ids, args): """ Writes graph """ if args.graph.find('m') > -1: logging.info("SLIMMING") g = get_minimal_subgraph(g, query_ids) w = GraphRenderer.create(args.to) if args.outfile is not None: w.outfile = args.outfile logging.info("Writing subg from "+str(g)) w.write(g, query_ids=query_ids, container_predicates=args.container_properties)
[ "def", "show_graph", "(", "g", ",", "nodes", ",", "query_ids", ",", "args", ")", ":", "if", "args", ".", "graph", ".", "find", "(", "'m'", ")", ">", "-", "1", ":", "logging", ".", "info", "(", "\"SLIMMING\"", ")", "g", "=", "get_minimal_subgraph", ...
Writes graph
[ "Writes", "graph" ]
python
train
openstack/networking-cisco
networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py#L1799-L1837
def _prepare_fabric_fw_internal(self, tenant_id, fw_dict, is_fw_virt, result): """Internal routine to prepare the fabric. This creates an entry in FW DB and runs the SM. """ if not self.auto_nwk_create: LOG.info("Auto network creation disabled") return False try: tenant_name = fw_dict.get('tenant_name') fw_id = fw_dict.get('fw_id') fw_name = fw_dict.get('fw_name') # TODO(padkrish) More than 1 FW per tenant not supported. if tenant_id in self.service_attr and ( result == fw_const.RESULT_FW_CREATE_DONE): LOG.error("Fabric already prepared for tenant %(tenant)s," " %(name)s", {'tenant': tenant_id, 'name': tenant_name}) return True if tenant_id not in self.service_attr: self.create_serv_obj(tenant_id) self.service_attr[tenant_id].create_fw_db(fw_id, fw_name, tenant_id) ret = self.run_create_sm(tenant_id, fw_dict, is_fw_virt) if ret: LOG.info("SM create returned True for Tenant Name " "%(tenant)s FW %(fw)s", {'tenant': tenant_name, 'fw': fw_name}) self.service_attr[tenant_id].set_fabric_create(True) else: LOG.error("SM create returned False for Tenant Name " "%(tenant)s FW %(fw)s", {'tenant': tenant_name, 'fw': fw_name}) except Exception as exc: LOG.error("Exception raised in create fabric int %s", str(exc)) return False return ret
[ "def", "_prepare_fabric_fw_internal", "(", "self", ",", "tenant_id", ",", "fw_dict", ",", "is_fw_virt", ",", "result", ")", ":", "if", "not", "self", ".", "auto_nwk_create", ":", "LOG", ".", "info", "(", "\"Auto network creation disabled\"", ")", "return", "Fals...
Internal routine to prepare the fabric. This creates an entry in FW DB and runs the SM.
[ "Internal", "routine", "to", "prepare", "the", "fabric", "." ]
python
train
hermanschaaf/mafan
mafan/text.py
https://github.com/hermanschaaf/mafan/blob/373ddf299aeb2bd8413bf921c71768af7a8170ea/mafan/text.py#L209-L246
def split_text(text, include_part_of_speech=False, strip_english=False, strip_numbers=False): u""" Split Chinese text at word boundaries. include_pos: also returns the Part Of Speech for each of the words. Some of the different parts of speech are: r: pronoun v: verb ns: proper noun etc... This all gets returned as a tuple: index 0: the split word index 1: the word's part of speech strip_english: remove all entries that have English or numbers in them (useful sometimes) """ if not include_part_of_speech: seg_list = pseg.cut(text) if strip_english: seg_list = filter(lambda x: not contains_english(x), seg_list) if strip_numbers: seg_list = filter(lambda x: not _is_number(x), seg_list) return list(map(lambda i: i.word, seg_list)) else: seg_list = pseg.cut(text) objs = map(lambda w: (w.word, w.flag), seg_list) if strip_english: objs = filter(lambda x: not contains_english(x[0]), objs) if strip_english: objs = filter(lambda x: not _is_number(x[0]), objs) return objs # if was_traditional: # seg_list = map(tradify, seg_list) return list(seg_list)
[ "def", "split_text", "(", "text", ",", "include_part_of_speech", "=", "False", ",", "strip_english", "=", "False", ",", "strip_numbers", "=", "False", ")", ":", "if", "not", "include_part_of_speech", ":", "seg_list", "=", "pseg", ".", "cut", "(", "text", ")"...
u""" Split Chinese text at word boundaries. include_pos: also returns the Part Of Speech for each of the words. Some of the different parts of speech are: r: pronoun v: verb ns: proper noun etc... This all gets returned as a tuple: index 0: the split word index 1: the word's part of speech strip_english: remove all entries that have English or numbers in them (useful sometimes)
[ "u", "Split", "Chinese", "text", "at", "word", "boundaries", "." ]
python
train
yinkaisheng/Python-UIAutomation-for-Windows
uiautomation/uiautomation.py
https://github.com/yinkaisheng/Python-UIAutomation-for-Windows/blob/2cc91060982cc8b777152e698d677cc2989bf263/uiautomation/uiautomation.py#L3046-L3054
def SetPixelColor(self, x: int, y: int, argb: int) -> bool: """ Set color value of a pixel. x: int. y: int. argb: int, color value. Return bool, True if succeed otherwise False. """ return _DllClient.instance().dll.BitmapSetPixel(self._bitmap, x, y, argb)
[ "def", "SetPixelColor", "(", "self", ",", "x", ":", "int", ",", "y", ":", "int", ",", "argb", ":", "int", ")", "->", "bool", ":", "return", "_DllClient", ".", "instance", "(", ")", ".", "dll", ".", "BitmapSetPixel", "(", "self", ".", "_bitmap", ","...
Set color value of a pixel. x: int. y: int. argb: int, color value. Return bool, True if succeed otherwise False.
[ "Set", "color", "value", "of", "a", "pixel", ".", "x", ":", "int", ".", "y", ":", "int", ".", "argb", ":", "int", "color", "value", ".", "Return", "bool", "True", "if", "succeed", "otherwise", "False", "." ]
python
valid
jleclanche/fireplace
fireplace/card.py
https://github.com/jleclanche/fireplace/blob/d0fc0e97e185c0210de86631be20638659c0609e/fireplace/card.py#L103-L117
def buff(self, target, buff, **kwargs): """ Summon \a buff and apply it to \a target If keyword arguments are given, attempt to set the given values to the buff. Example: player.buff(target, health=random.randint(1, 5)) NOTE: Any Card can buff any other Card. The controller of the Card that buffs the target becomes the controller of the buff. """ ret = self.controller.card(buff, self) ret.source = self ret.apply(target) for k, v in kwargs.items(): setattr(ret, k, v) return ret
[ "def", "buff", "(", "self", ",", "target", ",", "buff", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "self", ".", "controller", ".", "card", "(", "buff", ",", "self", ")", "ret", ".", "source", "=", "self", "ret", ".", "apply", "(", "target", ...
Summon \a buff and apply it to \a target If keyword arguments are given, attempt to set the given values to the buff. Example: player.buff(target, health=random.randint(1, 5)) NOTE: Any Card can buff any other Card. The controller of the Card that buffs the target becomes the controller of the buff.
[ "Summon", "\\", "a", "buff", "and", "apply", "it", "to", "\\", "a", "target", "If", "keyword", "arguments", "are", "given", "attempt", "to", "set", "the", "given", "values", "to", "the", "buff", ".", "Example", ":", "player", ".", "buff", "(", "target"...
python
train
rigetti/pyquil
pyquil/api/_base_connection.py
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/api/_base_connection.py#L152-L180
def prepare_register_list(register_dict: Dict[str, Union[bool, Sequence[int]]]): """ Canonicalize classical addresses for the payload and ready MemoryReference instances for serialization. This function will cast keys that are iterables of int-likes to a list of Python ints. This is to support specifying the register offsets as ``range()`` or numpy arrays. This mutates ``register_dict``. :param register_dict: The classical memory to retrieve. Specified as a dictionary: the keys are the names of memory regions, and the values are either (1) a list of integers for reading out specific entries in that memory region, or (2) True, for reading out the entire memory region. """ if not isinstance(register_dict, dict): raise TypeError("register_dict must be a dict but got " + repr(register_dict)) for k, v in register_dict.items(): if isinstance(v, bool): assert v # If boolean v must be True continue indices = [int(x) for x in v] # support ranges, numpy, ... if not all(x >= 0 for x in indices): raise TypeError("Negative indices into classical arrays are not allowed.") register_dict[k] = indices return register_dict
[ "def", "prepare_register_list", "(", "register_dict", ":", "Dict", "[", "str", ",", "Union", "[", "bool", ",", "Sequence", "[", "int", "]", "]", "]", ")", ":", "if", "not", "isinstance", "(", "register_dict", ",", "dict", ")", ":", "raise", "TypeError", ...
Canonicalize classical addresses for the payload and ready MemoryReference instances for serialization. This function will cast keys that are iterables of int-likes to a list of Python ints. This is to support specifying the register offsets as ``range()`` or numpy arrays. This mutates ``register_dict``. :param register_dict: The classical memory to retrieve. Specified as a dictionary: the keys are the names of memory regions, and the values are either (1) a list of integers for reading out specific entries in that memory region, or (2) True, for reading out the entire memory region.
[ "Canonicalize", "classical", "addresses", "for", "the", "payload", "and", "ready", "MemoryReference", "instances", "for", "serialization", "." ]
python
train
peterbrittain/asciimatics
samples/maps.py
https://github.com/peterbrittain/asciimatics/blob/f471427d7786ce2d5f1eeb2dae0e67d19e46e085/samples/maps.py#L375-L397
def _draw_tiles(self, x_offset, y_offset, bg): """Render all visible tiles a layer at a time.""" count = 0 for layer_name, c_filters, t_filters in self._get_features(): colour = (self._256_PALETTE[layer_name] if self._screen.colours >= 256 else self._16_PALETTE[layer_name]) for x, y, z, tile, satellite in sorted(self._tiles.values(), key=lambda k: k[0]): # Don't draw the wrong type or zoom of tile. if satellite != self._satellite or z != self._zoom: continue # Convert tile location into pixels and draw the tile. x *= self._size y *= self._size if satellite: count += self._draw_satellite_tile( tile, int((x-x_offset + self._screen.width // 4) * 2), int(y-y_offset + self._screen.height // 2)) else: count += self._draw_tile_layer(tile, layer_name, c_filters, colour, t_filters, x - x_offset, y - y_offset, bg) return count
[ "def", "_draw_tiles", "(", "self", ",", "x_offset", ",", "y_offset", ",", "bg", ")", ":", "count", "=", "0", "for", "layer_name", ",", "c_filters", ",", "t_filters", "in", "self", ".", "_get_features", "(", ")", ":", "colour", "=", "(", "self", ".", ...
Render all visible tiles a layer at a time.
[ "Render", "all", "visible", "tiles", "a", "layer", "at", "a", "time", "." ]
python
train
CI-WATER/gsshapy
gsshapy/orm/prj.py
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L1068-L1134
def getModelSummaryAsGeoJson(self, session, withStreamNetwork=True, withNodes=False): """ Retrieve a GeoJSON representation of the model. Includes vectorized mask map and stream network. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database withStreamNetwork (bool, optional): Include stream network. Defaults to True. withNodes (bool, optional): Include nodes. Defaults to False. Returns: str: GeoJSON string """ # Get mask map watershedMaskCard = self.getCard('WATERSHED_MASK') maskFilename = watershedMaskCard.value maskExtension = maskFilename.strip('"').split('.')[1] maskMap = session.query(RasterMapFile).\ filter(RasterMapFile.projectFile == self).\ filter(RasterMapFile.fileExtension == maskExtension).\ one() # Get mask map as a KML polygon statement = """ SELECT val, ST_AsGeoJSON(geom) As polygon FROM ( SELECT (ST_DumpAsPolygons({0})).* FROM {1} WHERE id={2} ) As foo ORDER BY val; """.format('raster', maskMap.tableName, maskMap.id) result = session.execute(statement) maskMapJsonPolygon = '' for row in result: maskMapJsonPolygon = row.polygon jsonString = maskMapJsonPolygon if withStreamNetwork: # Get the channel input file for the stream network channelInputFile = self.channelInputFile if channelInputFile is not None: # Use the existing method on the channel input file to generate the stream network GeoJson jsonStreamNetwork = channelInputFile.getStreamNetworkAsGeoJson(session=session, withNodes=withNodes) # Convert to json Python objects featureCollection = json.loads(jsonStreamNetwork) jsonMaskMapObjects = json.loads(maskMapJsonPolygon) # Create a mask feature maskFeature = {"type": "Feature", "geometry": jsonMaskMapObjects, "properties": {}, "id": maskMap.id} # Add mask map to feature collection tempFeatures = featureCollection['features'] tempFeatures.append(maskFeature) featureCollection['features'] = tempFeatures # Dump to string jsonString = json.dumps(featureCollection) return jsonString
[ "def", "getModelSummaryAsGeoJson", "(", "self", ",", "session", ",", "withStreamNetwork", "=", "True", ",", "withNodes", "=", "False", ")", ":", "# Get mask map", "watershedMaskCard", "=", "self", ".", "getCard", "(", "'WATERSHED_MASK'", ")", "maskFilename", "=", ...
Retrieve a GeoJSON representation of the model. Includes vectorized mask map and stream network. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database withStreamNetwork (bool, optional): Include stream network. Defaults to True. withNodes (bool, optional): Include nodes. Defaults to False. Returns: str: GeoJSON string
[ "Retrieve", "a", "GeoJSON", "representation", "of", "the", "model", ".", "Includes", "vectorized", "mask", "map", "and", "stream", "network", "." ]
python
train
calmjs/calmjs.parse
src/calmjs/parse/utils.py
https://github.com/calmjs/calmjs.parse/blob/369f0ee346c5a84c4d5c35a7733a0e63b02eac59/src/calmjs/parse/utils.py#L37-L52
def generate_tab_names(name): """ Return the names to lextab and yacctab modules for the given module name. Typical usage should be like so:: >>> lextab, yacctab = generate_tab_names(__name__) """ package_name, module_name = name.rsplit('.', 1) version = ply_dist.version.replace( '.', '_') if ply_dist is not None else 'unknown' data = (package_name, module_name, py_major, version) lextab = '%s.lextab_%s_py%d_ply%s' % data yacctab = '%s.yacctab_%s_py%d_ply%s' % data return lextab, yacctab
[ "def", "generate_tab_names", "(", "name", ")", ":", "package_name", ",", "module_name", "=", "name", ".", "rsplit", "(", "'.'", ",", "1", ")", "version", "=", "ply_dist", ".", "version", ".", "replace", "(", "'.'", ",", "'_'", ")", "if", "ply_dist", "i...
Return the names to lextab and yacctab modules for the given module name. Typical usage should be like so:: >>> lextab, yacctab = generate_tab_names(__name__)
[ "Return", "the", "names", "to", "lextab", "and", "yacctab", "modules", "for", "the", "given", "module", "name", ".", "Typical", "usage", "should", "be", "like", "so", "::" ]
python
train
ArchiveTeam/wpull
wpull/document/htmlparse/lxml_.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/document/htmlparse/lxml_.py#L124-L186
def parse_lxml(self, file, encoding=None, target_class=HTMLParserTarget, parser_type='html'): '''Return an iterator of elements found in the document. Args: file: A file object containing the document. encoding (str): The encoding of the document. target_class: A class to be used for target parsing. parser_type (str): The type of parser to use. Accepted values: ``html``, ``xhtml``, ``xml``. Returns: iterator: Each item is an element from :mod:`.document.htmlparse.element` ''' if encoding: lxml_encoding = to_lxml_encoding(encoding) or 'latin1' else: lxml_encoding = encoding elements = [] callback_func = elements.append target = target_class(callback_func) if parser_type == 'html': parser = lxml.html.HTMLParser( encoding=lxml_encoding, target=target ) elif parser_type == 'xhtml': parser = lxml.html.XHTMLParser( encoding=lxml_encoding, target=target, recover=True ) else: parser = lxml.etree.XMLParser( encoding=lxml_encoding, target=target, recover=True ) if parser_type == 'html': # XXX: Force libxml2 to do full read in case of early "</html>" # See https://github.com/chfoo/wpull/issues/104 # See https://bugzilla.gnome.org/show_bug.cgi?id=727935 for dummy in range(3): parser.feed('<html>'.encode(encoding)) while True: data = file.read(self.BUFFER_SIZE) if not data: break parser.feed(data) for element in elements: yield element del elements[:] parser.close() for element in elements: yield element
[ "def", "parse_lxml", "(", "self", ",", "file", ",", "encoding", "=", "None", ",", "target_class", "=", "HTMLParserTarget", ",", "parser_type", "=", "'html'", ")", ":", "if", "encoding", ":", "lxml_encoding", "=", "to_lxml_encoding", "(", "encoding", ")", "or...
Return an iterator of elements found in the document. Args: file: A file object containing the document. encoding (str): The encoding of the document. target_class: A class to be used for target parsing. parser_type (str): The type of parser to use. Accepted values: ``html``, ``xhtml``, ``xml``. Returns: iterator: Each item is an element from :mod:`.document.htmlparse.element`
[ "Return", "an", "iterator", "of", "elements", "found", "in", "the", "document", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xtreewidget/xtreewidgetitem.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xtreewidget/xtreewidgetitem.py#L242-L290
def initGroupStyle(self, useIcons=True, columnCount=None): """ Initialzes this item with a grouping style option. """ flags = self.flags() if flags & QtCore.Qt.ItemIsSelectable: flags ^= QtCore.Qt.ItemIsSelectable self.setFlags(flags) if useIcons: ico = QtGui.QIcon(resources.find('img/treeview/triangle_right.png')) expand_ico = QtGui.QIcon(resources.find('img/treeview/triangle_down.png')) self.setIcon(0, ico) self.setExpandedIcon(0, expand_ico) palette = QtGui.QApplication.palette() line_clr = palette.color(palette.Mid) base_clr = palette.color(palette.Button) text_clr = palette.color(palette.ButtonText) gradient = QtGui.QLinearGradient() gradient.setColorAt(0.00, line_clr) gradient.setColorAt(0.03, line_clr) gradient.setColorAt(0.04, base_clr.lighter(105)) gradient.setColorAt(0.25, base_clr) gradient.setColorAt(0.96, base_clr.darker(105)) gradient.setColorAt(0.97, line_clr) gradient.setColorAt(1.00, line_clr) h = self._fixedHeight if not h: h = self.sizeHint(0).height() if not h: h = 18 gradient.setStart(0.0, 0.0) gradient.setFinalStop(0.0, h) brush = QtGui.QBrush(gradient) tree = self.treeWidget() columnCount = columnCount or (tree.columnCount() if tree else self.columnCount()) for i in range(columnCount): self.setForeground(i, text_clr) self.setBackground(i, brush)
[ "def", "initGroupStyle", "(", "self", ",", "useIcons", "=", "True", ",", "columnCount", "=", "None", ")", ":", "flags", "=", "self", ".", "flags", "(", ")", "if", "flags", "&", "QtCore", ".", "Qt", ".", "ItemIsSelectable", ":", "flags", "^=", "QtCore",...
Initialzes this item with a grouping style option.
[ "Initialzes", "this", "item", "with", "a", "grouping", "style", "option", "." ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_1/wiki/wiki_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/wiki/wiki_client.py#L338-L370
def get_page_by_id_text(self, project, wiki_identifier, id, recursion_level=None, include_content=None, **kwargs): """GetPageByIdText. [Preview API] Gets metadata or content of the wiki page for the provided page id. Content negotiation is done based on the `Accept` header sent in the request. :param str project: Project ID or project name :param str wiki_identifier: Wiki Id or name. :param int id: Wiki page id. :param str recursion_level: Recursion level for subpages retrieval. Defaults to `None` (Optional). :param bool include_content: True to include the content of the page in the response for Json content type. Defaults to false (Optional) :rtype: object """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if wiki_identifier is not None: route_values['wikiIdentifier'] = self._serialize.url('wiki_identifier', wiki_identifier, 'str') if id is not None: route_values['id'] = self._serialize.url('id', id, 'int') query_parameters = {} if recursion_level is not None: query_parameters['recursionLevel'] = self._serialize.query('recursion_level', recursion_level, 'str') if include_content is not None: query_parameters['includeContent'] = self._serialize.query('include_content', include_content, 'bool') response = self._send(http_method='GET', location_id='ceddcf75-1068-452d-8b13-2d4d76e1f970', version='5.1-preview.1', route_values=route_values, query_parameters=query_parameters, accept_media_type='text/plain') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback)
[ "def", "get_page_by_id_text", "(", "self", ",", "project", ",", "wiki_identifier", ",", "id", ",", "recursion_level", "=", "None", ",", "include_content", "=", "None", ",", "*", "*", "kwargs", ")", ":", "route_values", "=", "{", "}", "if", "project", "is",...
GetPageByIdText. [Preview API] Gets metadata or content of the wiki page for the provided page id. Content negotiation is done based on the `Accept` header sent in the request. :param str project: Project ID or project name :param str wiki_identifier: Wiki Id or name. :param int id: Wiki page id. :param str recursion_level: Recursion level for subpages retrieval. Defaults to `None` (Optional). :param bool include_content: True to include the content of the page in the response for Json content type. Defaults to false (Optional) :rtype: object
[ "GetPageByIdText", ".", "[", "Preview", "API", "]", "Gets", "metadata", "or", "content", "of", "the", "wiki", "page", "for", "the", "provided", "page", "id", ".", "Content", "negotiation", "is", "done", "based", "on", "the", "Accept", "header", "sent", "in...
python
train
DarkEnergySurvey/ugali
ugali/analysis/results.py
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/results.py#L57-L81
def estimate(self,param,burn=None,clip=10.0,alpha=0.32): """ Estimate parameter value and uncertainties """ # FIXME: Need to add age and metallicity to composite isochrone params (currently properties) if param not in list(self.samples.names) + list(self.source.params) + ['age','metallicity']: msg = 'Unrecognized parameter: %s'%param raise KeyError(msg) # If the parameter is in the samples if param in self.samples.names: if param.startswith('position_angle'): return self.estimate_position_angle(param,burn=burn, clip=clip,alpha=alpha) return self.samples.peak_interval(param,burn=burn,clip=clip,alpha=alpha) mle = self.get_mle() errors = [np.nan,np.nan] # Set default value to the MLE value if param in self.source.params: err = self.source.params[param].errors if err is not None: errors = err # For age and metallicity from composite isochrone return [float(mle[param]),errors]
[ "def", "estimate", "(", "self", ",", "param", ",", "burn", "=", "None", ",", "clip", "=", "10.0", ",", "alpha", "=", "0.32", ")", ":", "# FIXME: Need to add age and metallicity to composite isochrone params (currently properties)", "if", "param", "not", "in", "list"...
Estimate parameter value and uncertainties
[ "Estimate", "parameter", "value", "and", "uncertainties" ]
python
train
tempodb/tempodb-python
tempodb/client.py
https://github.com/tempodb/tempodb-python/blob/8ce45231bd728c6c97ef799cf0f1513ea3a9a7d3/tempodb/client.py#L433-L489
def aggregate_data(self, start, end, aggregation, keys=[], tags=[], attrs={}, rollup=None, period=None, interpolationf=None, interpolation_period=None, tz=None, limit=1000): """Read data from multiple series according to a filter and apply a function across all the returned series to put the datapoints together into one aggregrate series. See the :meth:`list_series` method for a description of how the filter criteria are applied, and the :meth:`read_data` method for how to work with the start, end, and tz parameters. Valid aggregation functions are the same as valid rollup functions. :param string aggregation: the aggregation to perform :param keys: (optional) filter by one or more series keys :type keys: list or string :param tags: (optional) filter by one or more tags :type tags: list or string :param dict attrs: (optional) filter by one or more key-value attributes :param start: the start time for the data points :type start: string or Datetime :param end: the end time for the data points :type end: string or Datetime :param string rollup: (optional) the name of a rollup function to use :param string period: (optional) downsampling rate for the data :param string interpolationf: (optional) an interpolation function to run over the series :param string interpolation_period: (optional) the period to interpolate data into :param string tz: (optional) the timezone to place the data into :rtype: :class:`tempodb.protocol.cursor.DataPointCursor` with an iterator over :class:`tempodb.protocol.objects.DataPoint` objects""" url = 'segment' vstart = check_time_param(start) vend = check_time_param(end) params = { 'start': vstart, 'end': vend, 'key': keys, 'tag': tags, 'attr': attrs, 'aggregation.fold': aggregation, 'rollup.fold': rollup, 'rollup.period': period, 'interpolation.function': interpolationf, 'interpolation.period': interpolation_period, 'tz': tz, 'limit': limit } url_args = endpoint.make_url_args(params) url = '?'.join([url, url_args]) resp = self.session.get(url) return resp
[ "def", "aggregate_data", "(", "self", ",", "start", ",", "end", ",", "aggregation", ",", "keys", "=", "[", "]", ",", "tags", "=", "[", "]", ",", "attrs", "=", "{", "}", ",", "rollup", "=", "None", ",", "period", "=", "None", ",", "interpolationf", ...
Read data from multiple series according to a filter and apply a function across all the returned series to put the datapoints together into one aggregrate series. See the :meth:`list_series` method for a description of how the filter criteria are applied, and the :meth:`read_data` method for how to work with the start, end, and tz parameters. Valid aggregation functions are the same as valid rollup functions. :param string aggregation: the aggregation to perform :param keys: (optional) filter by one or more series keys :type keys: list or string :param tags: (optional) filter by one or more tags :type tags: list or string :param dict attrs: (optional) filter by one or more key-value attributes :param start: the start time for the data points :type start: string or Datetime :param end: the end time for the data points :type end: string or Datetime :param string rollup: (optional) the name of a rollup function to use :param string period: (optional) downsampling rate for the data :param string interpolationf: (optional) an interpolation function to run over the series :param string interpolation_period: (optional) the period to interpolate data into :param string tz: (optional) the timezone to place the data into :rtype: :class:`tempodb.protocol.cursor.DataPointCursor` with an iterator over :class:`tempodb.protocol.objects.DataPoint` objects
[ "Read", "data", "from", "multiple", "series", "according", "to", "a", "filter", "and", "apply", "a", "function", "across", "all", "the", "returned", "series", "to", "put", "the", "datapoints", "together", "into", "one", "aggregrate", "series", "." ]
python
train