repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
openstack/python-scciclient
scciclient/irmc/snmp.py
https://github.com/openstack/python-scciclient/blob/4585ce2f76853b9773fb190ca0cfff0aa04a7cf8/scciclient/irmc/snmp.py#L69-L86
def get_irmc_firmware_version(snmp_client): """Get irmc firmware version of the node. :param snmp_client: an SNMP client object. :raises: SNMPFailure if SNMP operation failed. :returns: a string of bmc name and irmc firmware version. """ try: bmc_name = snmp_client.get(BMC_NAME_OID) irmc_firm_ver = snmp_client.get(IRMC_FW_VERSION_OID) return ('%(bmc)s%(sep)s%(firm_ver)s' % {'bmc': bmc_name if bmc_name else '', 'firm_ver': irmc_firm_ver if irmc_firm_ver else '', 'sep': '-' if bmc_name and irmc_firm_ver else ''}) except SNMPFailure as e: raise SNMPIRMCFirmwareFailure( SNMP_FAILURE_MSG % ("GET IRMC FIRMWARE VERSION", e))
[ "def", "get_irmc_firmware_version", "(", "snmp_client", ")", ":", "try", ":", "bmc_name", "=", "snmp_client", ".", "get", "(", "BMC_NAME_OID", ")", "irmc_firm_ver", "=", "snmp_client", ".", "get", "(", "IRMC_FW_VERSION_OID", ")", "return", "(", "'%(bmc)s%(sep)s%(f...
Get irmc firmware version of the node. :param snmp_client: an SNMP client object. :raises: SNMPFailure if SNMP operation failed. :returns: a string of bmc name and irmc firmware version.
[ "Get", "irmc", "firmware", "version", "of", "the", "node", "." ]
python
train
tundish/turberfield-dialogue
turberfield/dialogue/matcher.py
https://github.com/tundish/turberfield-dialogue/blob/e7ccf7c19ae162e2f315ddf2642394e858529b4a/turberfield/dialogue/matcher.py#L70-L87
def options(self, data): """Generate folders to best match metadata. The results will be a single, perfectly matched folder, or the two nearest neighbours of an imperfect match. :param dict data: metadata matching criteria. This method is a generator. It yields :py:class:`turberfield.dialogue.model.SceneScript.Folder` objects. """ if self.mapping_key(data) in self.keys: yield next(i for i in self.folders if i.metadata == data) else: index = bisect.bisect_left(self.keys, self.mapping_key(data)) posns = sorted(set([max(0, index - 1), index])) yield from (self.folders[i] for i in posns)
[ "def", "options", "(", "self", ",", "data", ")", ":", "if", "self", ".", "mapping_key", "(", "data", ")", "in", "self", ".", "keys", ":", "yield", "next", "(", "i", "for", "i", "in", "self", ".", "folders", "if", "i", ".", "metadata", "==", "data...
Generate folders to best match metadata. The results will be a single, perfectly matched folder, or the two nearest neighbours of an imperfect match. :param dict data: metadata matching criteria. This method is a generator. It yields :py:class:`turberfield.dialogue.model.SceneScript.Folder` objects.
[ "Generate", "folders", "to", "best", "match", "metadata", "." ]
python
train
grycap/RADL
radl/radl.py
https://github.com/grycap/RADL/blob/03ccabb0313a48a5aa0e20c1f7983fddcb95e9cb/radl/radl.py#L1205-L1221
def __getIP(self, public): """Return the first net_interface.%d.ip for a system in a public/private network.""" maybeNot = (lambda x: x) if public else (lambda x: not x) nets_id = [net.id for net in self.networks if maybeNot(net.isPublic())] for s in self.systems: i = 0 while True: value = s.getValue("net_interface.%d.connection" % i) if not value: break if value in nets_id: ip = s.getValue("net_interface.%d.ip" % i) if ip: return ip i += 1 return None
[ "def", "__getIP", "(", "self", ",", "public", ")", ":", "maybeNot", "=", "(", "lambda", "x", ":", "x", ")", "if", "public", "else", "(", "lambda", "x", ":", "not", "x", ")", "nets_id", "=", "[", "net", ".", "id", "for", "net", "in", "self", "."...
Return the first net_interface.%d.ip for a system in a public/private network.
[ "Return", "the", "first", "net_interface", ".", "%d", ".", "ip", "for", "a", "system", "in", "a", "public", "/", "private", "network", "." ]
python
train
pyQode/pyqode.core
pyqode/core/backend/workers.py
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/backend/workers.py#L171-L199
def finditer_noregex(string, sub, whole_word): """ Search occurrences using str.find instead of regular expressions. :param string: string to parse :param sub: search string :param whole_word: True to select whole words only """ start = 0 while True: start = string.find(sub, start) if start == -1: return if whole_word: if start: pchar = string[start - 1] else: pchar = ' ' try: nchar = string[start + len(sub)] except IndexError: nchar = ' ' if nchar in DocumentWordsProvider.separators and \ pchar in DocumentWordsProvider.separators: yield start start += len(sub) else: yield start start += 1
[ "def", "finditer_noregex", "(", "string", ",", "sub", ",", "whole_word", ")", ":", "start", "=", "0", "while", "True", ":", "start", "=", "string", ".", "find", "(", "sub", ",", "start", ")", "if", "start", "==", "-", "1", ":", "return", "if", "who...
Search occurrences using str.find instead of regular expressions. :param string: string to parse :param sub: search string :param whole_word: True to select whole words only
[ "Search", "occurrences", "using", "str", ".", "find", "instead", "of", "regular", "expressions", "." ]
python
train
openid/JWTConnect-Python-OidcService
src/oidcservice/oidc/provider_info_discovery.py
https://github.com/openid/JWTConnect-Python-OidcService/blob/759ab7adef30a7e3b9d75475e2971433b9613788/src/oidcservice/oidc/provider_info_discovery.py#L93-L178
def match_preferences(self, pcr=None, issuer=None): """ Match the clients preferences against what the provider can do. This is to prepare for later client registration and or what functionality the client actually will use. In the client configuration the client preferences are expressed. These are then compared with the Provider Configuration information. If the Provider has left some claims out, defaults specified in the standard will be used. :param pcr: Provider configuration response if available :param issuer: The issuer identifier """ if not pcr: pcr = self.service_context.provider_info regreq = oidc.RegistrationRequest for _pref, _prov in PREFERENCE2PROVIDER.items(): try: vals = self.service_context.client_preferences[_pref] except KeyError: continue try: _pvals = pcr[_prov] except KeyError: try: # If the provider have not specified use what the # standard says is mandatory if at all. _pvals = PROVIDER_DEFAULT[_pref] except KeyError: logger.info( 'No info from provider on {} and no default'.format( _pref)) _pvals = vals if isinstance(vals, str): if vals in _pvals: self.service_context.behaviour[_pref] = vals else: try: vtyp = regreq.c_param[_pref] except KeyError: # Allow non standard claims if isinstance(vals, list): self.service_context.behaviour[_pref] = [ v for v in vals if v in _pvals] elif vals in _pvals: self.service_context.behaviour[_pref] = vals else: if isinstance(vtyp[0], list): self.service_context.behaviour[_pref] = [] for val in vals: if val in _pvals: self.service_context.behaviour[_pref].append( val) else: for val in vals: if val in _pvals: self.service_context.behaviour[_pref] = val break if _pref not in self.service_context.behaviour: raise ConfigurationError( "OP couldn't match preference:%s" % _pref, pcr) for key, val in self.service_context.client_preferences.items(): if key in self.service_context.behaviour: continue try: vtyp = regreq.c_param[key] if isinstance(vtyp[0], list): pass elif isinstance(val, list) and not isinstance(val, str): val = val[0] except KeyError: pass if key not in PREFERENCE2PROVIDER: self.service_context.behaviour[key] = val logger.debug( 'service_context behaviour: {}'.format( self.service_context.behaviour))
[ "def", "match_preferences", "(", "self", ",", "pcr", "=", "None", ",", "issuer", "=", "None", ")", ":", "if", "not", "pcr", ":", "pcr", "=", "self", ".", "service_context", ".", "provider_info", "regreq", "=", "oidc", ".", "RegistrationRequest", "for", "...
Match the clients preferences against what the provider can do. This is to prepare for later client registration and or what functionality the client actually will use. In the client configuration the client preferences are expressed. These are then compared with the Provider Configuration information. If the Provider has left some claims out, defaults specified in the standard will be used. :param pcr: Provider configuration response if available :param issuer: The issuer identifier
[ "Match", "the", "clients", "preferences", "against", "what", "the", "provider", "can", "do", ".", "This", "is", "to", "prepare", "for", "later", "client", "registration", "and", "or", "what", "functionality", "the", "client", "actually", "will", "use", ".", ...
python
train
AbletonAG/abl.vpath
abl/vpath/base/fs.py
https://github.com/AbletonAG/abl.vpath/blob/a57491347f6e7567afa047216e5b6f6035226eaf/abl/vpath/base/fs.py#L652-L664
def isexec(self, mode=stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH): """ isexec: @rtype: bool @return: Indicates whether the path points to a file or folder which has the executable flag set. Note that on systems which do not support executables flags the result may be unpredicatable. On Windows the value is determined by the file extension, a *.exe file is executable, a *.sh not. """ return self.connection.isexec(self, mode)
[ "def", "isexec", "(", "self", ",", "mode", "=", "stat", ".", "S_IXUSR", "|", "stat", ".", "S_IXGRP", "|", "stat", ".", "S_IXOTH", ")", ":", "return", "self", ".", "connection", ".", "isexec", "(", "self", ",", "mode", ")" ]
isexec: @rtype: bool @return: Indicates whether the path points to a file or folder which has the executable flag set. Note that on systems which do not support executables flags the result may be unpredicatable. On Windows the value is determined by the file extension, a *.exe file is executable, a *.sh not.
[ "isexec", ":" ]
python
train
jelmer/python-fastimport
fastimport/dates.py
https://github.com/jelmer/python-fastimport/blob/5cef9e037b7d7b37f58f522ac9ea4e343e6a1dff/fastimport/dates.py#L30-L42
def parse_raw(s, lineno=0): """Parse a date from a raw string. The format must be exactly "seconds-since-epoch offset-utc". See the spec for details. """ timestamp_str, timezone_str = s.split(b' ', 1) timestamp = float(timestamp_str) try: timezone = parse_tz(timezone_str) except ValueError: raise errors.InvalidTimezone(lineno, timezone_str) return timestamp, timezone
[ "def", "parse_raw", "(", "s", ",", "lineno", "=", "0", ")", ":", "timestamp_str", ",", "timezone_str", "=", "s", ".", "split", "(", "b' '", ",", "1", ")", "timestamp", "=", "float", "(", "timestamp_str", ")", "try", ":", "timezone", "=", "parse_tz", ...
Parse a date from a raw string. The format must be exactly "seconds-since-epoch offset-utc". See the spec for details.
[ "Parse", "a", "date", "from", "a", "raw", "string", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/models/transformer.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1861-L1871
def transformer_tall_train_uniencdec(): """Train CNN/DM with a unidirectional encoder and decoder.""" hparams = transformer_tall() hparams.max_input_seq_length = 750 hparams.max_target_seq_length = 100 hparams.optimizer = "true_adam" hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay") hparams.learning_rate_decay_steps = 150000 hparams.learning_rate_constant = 2e-4 hparams.unidirectional_encoder = True return hparams
[ "def", "transformer_tall_train_uniencdec", "(", ")", ":", "hparams", "=", "transformer_tall", "(", ")", "hparams", ".", "max_input_seq_length", "=", "750", "hparams", ".", "max_target_seq_length", "=", "100", "hparams", ".", "optimizer", "=", "\"true_adam\"", "hpara...
Train CNN/DM with a unidirectional encoder and decoder.
[ "Train", "CNN", "/", "DM", "with", "a", "unidirectional", "encoder", "and", "decoder", "." ]
python
train
hollenstein/maspy
maspy/core.py
https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/core.py#L1191-L1204
def _fromJSON(cls, jsonobject): """Generates a new instance of :class:`maspy.core.MzmlPrecursor` from a decoded JSON object (as generated by :func:`maspy.core.MzmlPrecursor._reprJSON()`). :param jsonobject: decoded JSON object :returns: a new instance of :class:`MzmlPrecursor` """ spectrumRef = jsonobject[0] activation = [tuple(param) for param in jsonobject[1]] isolationWindow =[tuple(param) for param in jsonobject[2]] selectedIonList = _mzmlListAttribToTuple(jsonobject[3]) return cls(spectrumRef, activation, isolationWindow, selectedIonList)
[ "def", "_fromJSON", "(", "cls", ",", "jsonobject", ")", ":", "spectrumRef", "=", "jsonobject", "[", "0", "]", "activation", "=", "[", "tuple", "(", "param", ")", "for", "param", "in", "jsonobject", "[", "1", "]", "]", "isolationWindow", "=", "[", "tupl...
Generates a new instance of :class:`maspy.core.MzmlPrecursor` from a decoded JSON object (as generated by :func:`maspy.core.MzmlPrecursor._reprJSON()`). :param jsonobject: decoded JSON object :returns: a new instance of :class:`MzmlPrecursor`
[ "Generates", "a", "new", "instance", "of", ":", "class", ":", "maspy", ".", "core", ".", "MzmlPrecursor", "from", "a", "decoded", "JSON", "object", "(", "as", "generated", "by", ":", "func", ":", "maspy", ".", "core", ".", "MzmlPrecursor", ".", "_reprJSO...
python
train
opendatateam/udata
udata/commands/worker.py
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/commands/worker.py#L125-L133
def status(queue, munin, munin_config): """List queued tasks aggregated by name""" if munin_config: return status_print_config(queue) queues = get_queues(queue) for queue in queues: status_print_queue(queue, munin=munin) if not munin: print('-' * 40)
[ "def", "status", "(", "queue", ",", "munin", ",", "munin_config", ")", ":", "if", "munin_config", ":", "return", "status_print_config", "(", "queue", ")", "queues", "=", "get_queues", "(", "queue", ")", "for", "queue", "in", "queues", ":", "status_print_queu...
List queued tasks aggregated by name
[ "List", "queued", "tasks", "aggregated", "by", "name" ]
python
train
simpleai-team/simpleai
simpleai/search/traditional.py
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/search/traditional.py#L52-L73
def iterative_limited_depth_first(problem, graph_search=False, viewer=None): ''' Iterative limited depth first search. If graph_search=True, will avoid exploring repeated states. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.is_goal. ''' solution = None limit = 0 while not solution: solution = limited_depth_first(problem, depth_limit=limit, graph_search=graph_search, viewer=viewer) limit += 1 if viewer: viewer.event('no_more_runs', solution, 'returned after %i runs' % limit) return solution
[ "def", "iterative_limited_depth_first", "(", "problem", ",", "graph_search", "=", "False", ",", "viewer", "=", "None", ")", ":", "solution", "=", "None", "limit", "=", "0", "while", "not", "solution", ":", "solution", "=", "limited_depth_first", "(", "problem"...
Iterative limited depth first search. If graph_search=True, will avoid exploring repeated states. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.is_goal.
[ "Iterative", "limited", "depth", "first", "search", "." ]
python
train
inasafe/inasafe
safe/gui/tools/batch/batch_dialog.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/batch/batch_dialog.py#L419-L568
def run_task(self, task_item, status_item, count=0, index=''): """Run a single task. :param task_item: Table task_item containing task name / details. :type task_item: QTableWidgetItem :param status_item: Table task_item that holds the task status. :type status_item: QTableWidgetItem :param count: Count of scenarios that have been run already. :type count: :param index: The index for the table item that will be run. :type index: int :returns: Flag indicating if the task succeeded or not. :rtype: bool """ self.enable_busy_cursor() for layer_group in self.layer_group_container: layer_group.setItemVisibilityChecked(False) # set status to 'running' status_item.setText(self.tr('Running')) # .. see also:: :func:`appendRow` to understand the next 2 lines variant = task_item.data(QtCore.Qt.UserRole) value = variant[0] result = True if isinstance(value, str): filename = value # run script try: self.run_script(filename) # set status to 'OK' status_item.setText(self.tr('Script OK')) except Exception as e: # pylint: disable=W0703 # set status to 'fail' status_item.setText(self.tr('Script Fail')) LOGGER.exception( 'Running macro failed. The exception: ' + str(e)) result = False elif isinstance(value, dict): # start in new project if toggle is active if self.start_in_new_project: self.iface.newProject() # create layer group group_name = value['scenario_name'] self.layer_group = self.root.addGroup(group_name) self.layer_group_container.append(self.layer_group) # Its a dict containing files for a scenario success, parameters = self.prepare_task(value) if not success: # set status to 'running' status_item.setText(self.tr('Please update scenario')) self.disable_busy_cursor() return False directory = self.output_directory.text() if self.scenario_directory_radio.isChecked(): directory = self.source_directory.text() output_directory = os.path.join(directory, group_name) if not os.path.exists(output_directory): os.makedirs(output_directory) # If impact function parameters loaded successfully, initiate IF. impact_function = ImpactFunction() impact_function.datastore = Folder(output_directory) impact_function.datastore.default_vector_format = "geojson" impact_function.hazard = parameters[layer_purpose_hazard['key']] impact_function.exposure = ( parameters[layer_purpose_exposure['key']]) if parameters[layer_purpose_aggregation['key']]: impact_function.aggregation = ( parameters[layer_purpose_aggregation['key']]) elif parameters['extent']: impact_function.requested_extent = parameters['extent'] impact_function.crs = parameters['crs'] prepare_status, prepare_message = impact_function.prepare() if prepare_status == PREPARE_SUCCESS: LOGGER.info('Impact function ready') status, message = impact_function.run() if status == ANALYSIS_SUCCESS: status_item.setText(self.tr('Analysis Success')) impact_layer = impact_function.impact if impact_layer.isValid(): layer_list = [ impact_layer, impact_function.analysis_impacted, parameters[layer_purpose_hazard['key']], parameters[layer_purpose_exposure['key']], parameters[layer_purpose_aggregation['key']]] QgsProject.instance().addMapLayers( layer_list, False) for layer in layer_list: self.layer_group.addLayer(layer) map_canvas = QgsProject.instance().mapLayers() for layer in map_canvas: # turn of layer visibility if not impact layer if map_canvas[layer].id() == impact_layer.id(): self.set_layer_visible( map_canvas[layer], True) else: self.set_layer_visible( map_canvas[layer], False) # we need to set analysis_impacted as an active layer # because we need to get all qgis variables that we # need from this layer for infographic. if self.iface: self.iface.setActiveLayer( impact_function.analysis_impacted) report_directory = os.path.join( output_directory, 'output') # generate map report and impact report try: error_code, message = ( impact_function.generate_report( all_default_report_components, report_directory)) except BaseException: status_item.setText( self.tr('Report failed to generate.')) else: LOGGER.info('Impact layer is invalid') elif status == ANALYSIS_FAILED_BAD_INPUT: LOGGER.info('Bad input detected') elif status == ANALYSIS_FAILED_BAD_CODE: LOGGER.info( 'Impact function encountered a bug: %s' % message) else: LOGGER.warning('Impact function not ready') send_error_message(self, prepare_message) else: LOGGER.exception('Data type not supported: "%s"' % value) result = False self.disable_busy_cursor() return result
[ "def", "run_task", "(", "self", ",", "task_item", ",", "status_item", ",", "count", "=", "0", ",", "index", "=", "''", ")", ":", "self", ".", "enable_busy_cursor", "(", ")", "for", "layer_group", "in", "self", ".", "layer_group_container", ":", "layer_grou...
Run a single task. :param task_item: Table task_item containing task name / details. :type task_item: QTableWidgetItem :param status_item: Table task_item that holds the task status. :type status_item: QTableWidgetItem :param count: Count of scenarios that have been run already. :type count: :param index: The index for the table item that will be run. :type index: int :returns: Flag indicating if the task succeeded or not. :rtype: bool
[ "Run", "a", "single", "task", "." ]
python
train
wiheto/teneto
teneto/networkmeasures/reachability_latency.py
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/networkmeasures/reachability_latency.py#L9-L72
def reachability_latency(tnet=None, paths=None, rratio=1, calc='global'): """ Reachability latency. This is the r-th longest temporal path. Parameters --------- data : array or dict Can either be a network (graphlet or contact), binary unidrected only. Alternative can be a paths dictionary (output of teneto.networkmeasure.shortest_temporal_path) rratio: float (default: 1) reachability ratio that the latency is calculated in relation to. Value must be over 0 and up to 1. 1 (default) - all nodes must be reached. Other values (e.g. .5 imply that 50% of nodes are reached) This is rounded to the nearest node inter. E.g. if there are 6 nodes [1,2,3,4,5,6], it will be node 4 (due to round upwards) calc : str what to calculate. Alternatives: 'global' entire network; 'nodes': for each node. Returns -------- reach_lat : array Reachability latency Notes ------ Reachability latency calculates the time it takes for the paths. """ if tnet is not None and paths is not None: raise ValueError('Only network or path input allowed.') if tnet is None and paths is None: raise ValueError('No input.') # if shortest paths are not calculated, calculate them if tnet is not None: paths = shortest_temporal_path(tnet) pathmat = np.zeros([paths[['from', 'to']].max().max( )+1, paths[['from', 'to']].max().max()+1, paths[['t_start']].max().max()+1]) * np.nan pathmat[paths['from'].values, paths['to'].values, paths['t_start'].values] = paths['temporal-distance'] netshape = pathmat.shape edges_to_reach = netshape[0] - np.round(netshape[0] * rratio) reach_lat = np.zeros([netshape[1], netshape[2]]) * np.nan for t_ind in range(0, netshape[2]): paths_sort = -np.sort(-pathmat[:, :, t_ind], axis=1) reach_lat[:, t_ind] = paths_sort[:, edges_to_reach] if calc == 'global': reach_lat = np.nansum(reach_lat) reach_lat = reach_lat / ((netshape[0]) * netshape[2]) elif calc == 'nodes': reach_lat = np.nansum(reach_lat, axis=1) reach_lat = reach_lat / (netshape[2]) return reach_lat
[ "def", "reachability_latency", "(", "tnet", "=", "None", ",", "paths", "=", "None", ",", "rratio", "=", "1", ",", "calc", "=", "'global'", ")", ":", "if", "tnet", "is", "not", "None", "and", "paths", "is", "not", "None", ":", "raise", "ValueError", "...
Reachability latency. This is the r-th longest temporal path. Parameters --------- data : array or dict Can either be a network (graphlet or contact), binary unidrected only. Alternative can be a paths dictionary (output of teneto.networkmeasure.shortest_temporal_path) rratio: float (default: 1) reachability ratio that the latency is calculated in relation to. Value must be over 0 and up to 1. 1 (default) - all nodes must be reached. Other values (e.g. .5 imply that 50% of nodes are reached) This is rounded to the nearest node inter. E.g. if there are 6 nodes [1,2,3,4,5,6], it will be node 4 (due to round upwards) calc : str what to calculate. Alternatives: 'global' entire network; 'nodes': for each node. Returns -------- reach_lat : array Reachability latency Notes ------ Reachability latency calculates the time it takes for the paths.
[ "Reachability", "latency", ".", "This", "is", "the", "r", "-", "th", "longest", "temporal", "path", "." ]
python
train
galaxy-genome-annotation/python-apollo
apollo/annotations/__init__.py
https://github.com/galaxy-genome-annotation/python-apollo/blob/2bc9991302abe4402ec2885dcaac35915475b387/apollo/annotations/__init__.py#L743-L758
def get_sequence_alterations(self, organism=None, sequence=None): """ [UNTESTED] Get all of the sequence's alterations :type organism: str :param organism: Organism Common Name :type sequence: str :param sequence: Sequence Name :rtype: list :return: A list of sequence alterations(?) """ data = {} data = self._update_data(data, organism, sequence) return self.post('getSequenceAlterations', data)
[ "def", "get_sequence_alterations", "(", "self", ",", "organism", "=", "None", ",", "sequence", "=", "None", ")", ":", "data", "=", "{", "}", "data", "=", "self", ".", "_update_data", "(", "data", ",", "organism", ",", "sequence", ")", "return", "self", ...
[UNTESTED] Get all of the sequence's alterations :type organism: str :param organism: Organism Common Name :type sequence: str :param sequence: Sequence Name :rtype: list :return: A list of sequence alterations(?)
[ "[", "UNTESTED", "]", "Get", "all", "of", "the", "sequence", "s", "alterations" ]
python
train
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_normalizer.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_normalizer.py#L24-L64
def convert(model, input_features, output_features): """Convert a normalizer model to the protobuf spec. Parameters ---------- model: Normalizer A Normalizer. input_features: str Name of the input column. output_features: str Name of the output column. Returns ------- model_spec: An object of type Model_pb. Protobuf representation of the model """ if not(_HAS_SKLEARN): raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.') # Test the scikit-learn model _sklearn_util.check_expected_type(model, Normalizer) _sklearn_util.check_fitted(model, lambda m: hasattr(m, 'norm')) # Set the interface params. spec = _Model_pb2.Model() spec.specificationVersion = SPECIFICATION_VERSION spec = _set_transform_interface_params(spec, input_features, output_features) # Set the one hot encoder parameters _normalizer_spec = spec.normalizer if model.norm == 'l1': _normalizer_spec.normType = _proto__normalizer.L1 elif model.norm == 'l2': _normalizer_spec.normType = _proto__normalizer.L2 elif model.norm == 'max': _normalizer_spec.normType = _proto__normalizer.LMax return _MLModel(spec)
[ "def", "convert", "(", "model", ",", "input_features", ",", "output_features", ")", ":", "if", "not", "(", "_HAS_SKLEARN", ")", ":", "raise", "RuntimeError", "(", "'scikit-learn not found. scikit-learn conversion API is disabled.'", ")", "# Test the scikit-learn model", "...
Convert a normalizer model to the protobuf spec. Parameters ---------- model: Normalizer A Normalizer. input_features: str Name of the input column. output_features: str Name of the output column. Returns ------- model_spec: An object of type Model_pb. Protobuf representation of the model
[ "Convert", "a", "normalizer", "model", "to", "the", "protobuf", "spec", "." ]
python
train
zqfang/GSEApy
gseapy/gsea.py
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/gsea.py#L348-L382
def load_data(self, cls_vec): """pre-processed the data frame.new filtering methods will be implement here. """ # read data in if isinstance(self.data, pd.DataFrame) : exprs = self.data.copy() # handle index is gene_names if exprs.index.dtype == 'O': exprs = exprs.reset_index() elif os.path.isfile(self.data) : # GCT input format? if self.data.endswith("gct"): exprs = pd.read_csv(self.data, skiprows=1, comment='#',sep="\t") else: exprs = pd.read_csv(self.data, comment='#',sep="\t") else: raise Exception('Error parsing gene expression DataFrame!') #drop duplicated gene names if exprs.iloc[:,0].duplicated().sum() > 0: self._logger.warning("Warning: dropping duplicated gene names, only keep the first values") exprs.drop_duplicates(subset=exprs.columns[0], inplace=True) #drop duplicate gene_names. if exprs.isnull().any().sum() > 0: self._logger.warning("Warning: Input data contains NA, filled NA with 0") exprs.dropna(how='all', inplace=True) #drop rows with all NAs exprs = exprs.fillna(0) # set gene name as index exprs.set_index(keys=exprs.columns[0], inplace=True) # select numberic columns df = exprs.select_dtypes(include=[np.number]) # drop any genes which std ==0 df_std = df.groupby(by=cls_vec, axis=1).std() df = df[~df_std.isin([0]).any(axis=1)] df = df + 0.00001 # we don't like zeros!!! return df
[ "def", "load_data", "(", "self", ",", "cls_vec", ")", ":", "# read data in", "if", "isinstance", "(", "self", ".", "data", ",", "pd", ".", "DataFrame", ")", ":", "exprs", "=", "self", ".", "data", ".", "copy", "(", ")", "# handle index is gene_names", "i...
pre-processed the data frame.new filtering methods will be implement here.
[ "pre", "-", "processed", "the", "data", "frame", ".", "new", "filtering", "methods", "will", "be", "implement", "here", "." ]
python
test
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/app/application.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/app/application.py#L158-L241
def _use(self, backend_name=None): """Select a backend by name. See class docstring for details. """ # See if we're in a specific testing mode, if so DONT check to see # if it's a valid backend. If it isn't, it's a good thing we # get an error later because we should have decorated our test # with requires_application() test_name = os.getenv('_VISPY_TESTING_APP', None) # Check whether the given name is valid if backend_name is not None: if backend_name.lower() == 'default': backend_name = None # Explicitly use default, avoid using test elif backend_name.lower() not in BACKENDMAP: raise ValueError('backend_name must be one of %s or None, not ' '%r' % (BACKEND_NAMES, backend_name)) elif test_name is not None: backend_name = test_name.lower() assert backend_name in BACKENDMAP # Should we try and load any backend, or just this specific one? try_others = backend_name is None # Get backends to try ... imported_toolkits = [] # Backends for which the native lib is imported backends_to_try = [] if not try_others: # We should never hit this, since we check above assert backend_name.lower() in BACKENDMAP.keys() # Add it backends_to_try.append(backend_name.lower()) else: # See if a backend is loaded for name, module_name, native_module_name in CORE_BACKENDS: if native_module_name and native_module_name in sys.modules: imported_toolkits.append(name.lower()) backends_to_try.append(name.lower()) # See if a default is given default_backend = config['default_backend'].lower() if default_backend.lower() in BACKENDMAP.keys(): if default_backend not in backends_to_try: backends_to_try.append(default_backend) # After this, try each one for name, module_name, native_module_name in CORE_BACKENDS: name = name.lower() if name not in backends_to_try: backends_to_try.append(name) # Now try each one for key in backends_to_try: name, module_name, native_module_name = BACKENDMAP[key] TRIED_BACKENDS.append(name) mod_name = 'backends.' + module_name __import__(mod_name, globals(), level=1) mod = getattr(backends, module_name) if not mod.available: msg = ('Could not import backend "%s":\n%s' % (name, str(mod.why_not))) if not try_others: # Fail if user wanted to use a specific backend raise RuntimeError(msg) elif key in imported_toolkits: # Warn if were unable to use an already imported toolkit msg = ('Although %s is already imported, the %s backend ' 'could not\nbe used ("%s"). \nNote that running ' 'multiple GUI toolkits simultaneously can cause ' 'side effects.' % (native_module_name, name, str(mod.why_not))) logger.warning(msg) else: # Inform otherwise logger.info(msg) else: # Success! self._backend_module = mod logger.debug('Selected backend %s' % module_name) break else: raise RuntimeError('Could not import any of the backends. ' 'You need to install any of %s. We recommend ' 'PyQt' % [b[0] for b in CORE_BACKENDS]) # Store classes for app backend and canvas backend self._backend = self.backend_module.ApplicationBackend()
[ "def", "_use", "(", "self", ",", "backend_name", "=", "None", ")", ":", "# See if we're in a specific testing mode, if so DONT check to see", "# if it's a valid backend. If it isn't, it's a good thing we", "# get an error later because we should have decorated our test", "# with requires_a...
Select a backend by name. See class docstring for details.
[ "Select", "a", "backend", "by", "name", ".", "See", "class", "docstring", "for", "details", "." ]
python
train
dhhagan/py-opc
opc/__init__.py
https://github.com/dhhagan/py-opc/blob/2c8f19530fb64bf5fd4ee0d694a47850161ed8a7/opc/__init__.py#L612-L642
def save_config_variables(self): """Save the configuration variables in non-volatile memory. This method should be used in conjuction with *write_config_variables*. :rtype: boolean :Example: >>> alpha.save_config_variables() True """ command = 0x43 byte_list = [0x3F, 0x3C, 0x3F, 0x3C, 0x43] success = [0xF3, 0x43, 0x3F, 0x3C, 0x3F, 0x3C] resp = [] # Send the command byte and then wait for 10 ms r = self.cnxn.xfer([command])[0] sleep(10e-3) # append the response of the command byte to the List resp.append(r) # Send the rest of the config bytes for each in byte_list: r = self.cnxn.xfer([each])[0] resp.append(r) sleep(0.1) return True if resp == success else False
[ "def", "save_config_variables", "(", "self", ")", ":", "command", "=", "0x43", "byte_list", "=", "[", "0x3F", ",", "0x3C", ",", "0x3F", ",", "0x3C", ",", "0x43", "]", "success", "=", "[", "0xF3", ",", "0x43", ",", "0x3F", ",", "0x3C", ",", "0x3F", ...
Save the configuration variables in non-volatile memory. This method should be used in conjuction with *write_config_variables*. :rtype: boolean :Example: >>> alpha.save_config_variables() True
[ "Save", "the", "configuration", "variables", "in", "non", "-", "volatile", "memory", ".", "This", "method", "should", "be", "used", "in", "conjuction", "with", "*", "write_config_variables", "*", "." ]
python
valid
chrisvoncsefalvay/urbanpyctionary
urbanpyctionary/client.py
https://github.com/chrisvoncsefalvay/urbanpyctionary/blob/77ce3262d25d16ae9179909a34197c102adb2f06/urbanpyctionary/client.py#L115-L140
def get(self, word): """ Obtains the definition of a word from Urban Dictionary. :param word: word to be searched for :type word: str :return: a result set with all definitions for the word """ url = "https://mashape-community-urban-dictionary.p.mashape.com/define?term=%s" % word try: res = requests.get(url, headers = {"X-Mashape-Key": self.api_key, "Accept": "text/plain"}) except requests.ConnectionError: raise errors.ConnectionError if res.status_code == 200: if res.json()["result_type"] == 'no_results': raise errors.NoResultsError else: return Result(res.json()) else: if res.status_code == 403: raise errors.APIUnauthorizedError
[ "def", "get", "(", "self", ",", "word", ")", ":", "url", "=", "\"https://mashape-community-urban-dictionary.p.mashape.com/define?term=%s\"", "%", "word", "try", ":", "res", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "{", "\"X-Mashape-Key\"", "...
Obtains the definition of a word from Urban Dictionary. :param word: word to be searched for :type word: str :return: a result set with all definitions for the word
[ "Obtains", "the", "definition", "of", "a", "word", "from", "Urban", "Dictionary", "." ]
python
train
pantsbuild/pants
contrib/go/src/python/pants/contrib/go/tasks/go_workspace_task.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/contrib/go/src/python/pants/contrib/go/tasks/go_workspace_task.py#L56-L67
def remove_unused_links(dirpath, required_links): """Recursively remove any links in dirpath which are not contained in required_links. :param str dirpath: Absolute path of directory to search. :param container required_links: Container of "in use" links which should not be removed, where each link is an absolute path. """ for root, dirs, files in os.walk(dirpath): for p in chain(dirs, files): p = os.path.join(root, p) if os.path.islink(p) and p not in required_links: os.unlink(p)
[ "def", "remove_unused_links", "(", "dirpath", ",", "required_links", ")", ":", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "dirpath", ")", ":", "for", "p", "in", "chain", "(", "dirs", ",", "files", ")", ":", "p", "=", "o...
Recursively remove any links in dirpath which are not contained in required_links. :param str dirpath: Absolute path of directory to search. :param container required_links: Container of "in use" links which should not be removed, where each link is an absolute path.
[ "Recursively", "remove", "any", "links", "in", "dirpath", "which", "are", "not", "contained", "in", "required_links", "." ]
python
train
HewlettPackard/python-hpOneView
hpOneView/resources/servers/server_profiles.py
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/resources/servers/server_profiles.py#L149-L165
def get_profile_ports(self, **kwargs): """ Retrieves the port model associated with a server or server hardware type and enclosure group. Args: enclosureGroupUri (str): The URI of the enclosure group associated with the resource. serverHardwareTypeUri (str): The URI of the server hardware type associated with the resource. serverHardwareUri (str): The URI of the server hardware associated with the resource. Returns: dict: Profile port. """ uri = self._helper.build_uri_with_query_string(kwargs, '/profile-ports') return self._helper.do_get(uri)
[ "def", "get_profile_ports", "(", "self", ",", "*", "*", "kwargs", ")", ":", "uri", "=", "self", ".", "_helper", ".", "build_uri_with_query_string", "(", "kwargs", ",", "'/profile-ports'", ")", "return", "self", ".", "_helper", ".", "do_get", "(", "uri", ")...
Retrieves the port model associated with a server or server hardware type and enclosure group. Args: enclosureGroupUri (str): The URI of the enclosure group associated with the resource. serverHardwareTypeUri (str): The URI of the server hardware type associated with the resource. serverHardwareUri (str): The URI of the server hardware associated with the resource. Returns: dict: Profile port.
[ "Retrieves", "the", "port", "model", "associated", "with", "a", "server", "or", "server", "hardware", "type", "and", "enclosure", "group", "." ]
python
train
sdispater/orator
orator/dbal/foreign_key_constraint.py
https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/dbal/foreign_key_constraint.py#L189-L208
def get_quoted_foreign_columns(self, platform): """ Returns the quoted representation of the referenced table column names the foreign key constraint is associated with. But only if they were defined with one or the referenced table column name is a keyword reserved by the platform. Otherwise the plain unquoted value as inserted is returned. :param platform: The platform to use for quotation. :type platform: Platform :rtype: list """ columns = [] for column in self._foreign_column_names.values(): columns.append(column.get_quoted_name(platform)) return columns
[ "def", "get_quoted_foreign_columns", "(", "self", ",", "platform", ")", ":", "columns", "=", "[", "]", "for", "column", "in", "self", ".", "_foreign_column_names", ".", "values", "(", ")", ":", "columns", ".", "append", "(", "column", ".", "get_quoted_name",...
Returns the quoted representation of the referenced table column names the foreign key constraint is associated with. But only if they were defined with one or the referenced table column name is a keyword reserved by the platform. Otherwise the plain unquoted value as inserted is returned. :param platform: The platform to use for quotation. :type platform: Platform :rtype: list
[ "Returns", "the", "quoted", "representation", "of", "the", "referenced", "table", "column", "names", "the", "foreign", "key", "constraint", "is", "associated", "with", "." ]
python
train
toomore/goristock
grs/goristock.py
https://github.com/toomore/goristock/blob/e61f57f11a626cfbc4afbf66337fd9d1c51e3e71/grs/goristock.py#L616-L658
def XMPP_display(self,*arg): """ For XMPP Demo 輸出到 XMPP 之樣式。 """ MA = '' for i in arg: MAs = '- MA%02s: %.2f %s(%s)\n' % ( unicode(i), self.MA(i), self.MAC(i), unicode(self.MA_serial(i)[0]) ) MA = MA + MAs vol = '- Volume: %s %s(%s)' % ( unicode(self.MAVOL(1)/1000), unicode(self.MACVOL(1)), unicode(self.MAVOL_serial(1)[0]) ) MAO = self.MAO(3,6) re = """%(stock_name)s %(stock_no)s %(stock_date)s: %(stock_price)s %(stock_range)s(%(range_per)+.2f%%) %(MA)s%(vol)s - MAO(3-6): %(MAO_v).2f %(MAO_c)s(%(MAO_times)s) - RABC: %(RABC)s""" % { 'stock_name': unicode(self.stock_name), 'stock_no': unicode(self.stock_no), 'stock_date': unicode(self.data_date[-1]), 'stock_price': unicode(self.raw_data[-1]), 'stock_range': unicode(self.stock_range[-1]), 'range_per': self.range_per, 'MA': MA, 'vol': vol, 'MAO_v': MAO[0][1][-1], 'MAO_c': unicode(MAO[1]), 'MAO_times': unicode(MAO[0][0]), 'RABC': self.RABC } return re
[ "def", "XMPP_display", "(", "self", ",", "*", "arg", ")", ":", "MA", "=", "''", "for", "i", "in", "arg", ":", "MAs", "=", "'- MA%02s: %.2f %s(%s)\\n'", "%", "(", "unicode", "(", "i", ")", ",", "self", ".", "MA", "(", "i", ")", ",", "self", ".", ...
For XMPP Demo 輸出到 XMPP 之樣式。
[ "For", "XMPP", "Demo", "輸出到", "XMPP", "之樣式。" ]
python
train
MartinThoma/hwrt
hwrt/create_ffiles.py
https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/create_ffiles.py#L302-L322
def _normalize_features(feature_list, prepared, is_traindata): """Normalize features (mean subtraction, division by variance or range). """ if is_traindata: _calculate_feature_stats(feature_list, prepared, "featurenormalization.csv") start = 0 for feature in feature_list: end = start + feature.get_dimension() # For every instance in the dataset: Normalize! for i in range(len(prepared)): # The 0 is necessary as every element is (x, y) feature_range = (feature.max - feature.min) if feature_range == 0: feature_range = 1 prepared[i][0][start:end] = (prepared[i][0][start:end] - feature.mean) / feature_range start = end return prepared
[ "def", "_normalize_features", "(", "feature_list", ",", "prepared", ",", "is_traindata", ")", ":", "if", "is_traindata", ":", "_calculate_feature_stats", "(", "feature_list", ",", "prepared", ",", "\"featurenormalization.csv\"", ")", "start", "=", "0", "for", "featu...
Normalize features (mean subtraction, division by variance or range).
[ "Normalize", "features", "(", "mean", "subtraction", "division", "by", "variance", "or", "range", ")", "." ]
python
train
theonion/django-bulbs
bulbs/api/views.py
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/api/views.py#L159-L183
def publish(self, request, **kwargs): """sets the `published` value of the `Content` :param request: a WSGI request object :param kwargs: keyword arguments (optional) :return: `rest_framework.response.Response` """ content = self.get_object() if "published" in get_request_data(request): if not get_request_data(request)["published"]: content.published = None else: publish_dt = parse_datetime(get_request_data(request)["published"]) if publish_dt: publish_dt = publish_dt.astimezone(timezone.utc) else: publish_dt = None content.published = publish_dt else: content.published = timezone.now() content.save() LogEntry.objects.log(request.user, content, content.get_status()) return Response({"status": content.get_status(), "published": content.published})
[ "def", "publish", "(", "self", ",", "request", ",", "*", "*", "kwargs", ")", ":", "content", "=", "self", ".", "get_object", "(", ")", "if", "\"published\"", "in", "get_request_data", "(", "request", ")", ":", "if", "not", "get_request_data", "(", "reque...
sets the `published` value of the `Content` :param request: a WSGI request object :param kwargs: keyword arguments (optional) :return: `rest_framework.response.Response`
[ "sets", "the", "published", "value", "of", "the", "Content" ]
python
train
olt/scriptine
scriptine/_path.py
https://github.com/olt/scriptine/blob/f4cfea939f2f3ad352b24c5f6410f79e78723d0e/scriptine/_path.py#L217-L228
def splitext(self): """ p.splitext() -> Return (p.stripext(), p.ext). Split the filename extension from this path and return the two parts. Either part may be empty. The extension is everything from '.' to the end of the last path segment. This has the property that if (a, b) == p.splitext(), then a + b == p. """ filename, ext = os.path.splitext(self) return self.__class__(filename), ext
[ "def", "splitext", "(", "self", ")", ":", "filename", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "self", ")", "return", "self", ".", "__class__", "(", "filename", ")", ",", "ext" ]
p.splitext() -> Return (p.stripext(), p.ext). Split the filename extension from this path and return the two parts. Either part may be empty. The extension is everything from '.' to the end of the last path segment. This has the property that if (a, b) == p.splitext(), then a + b == p.
[ "p", ".", "splitext", "()", "-", ">", "Return", "(", "p", ".", "stripext", "()", "p", ".", "ext", ")", "." ]
python
train
celery/cell
cell/workflow/entities.py
https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/workflow/entities.py#L73-L82
def main(self, *args, **kwargs): """Implement the actor main loop by waiting forever for messages.""" self.start(*args, **kwargs) try: while 1: body, message = yield self.receive() handler = self.get_handler(message) handler(body, message) finally: self.stop(*args, **kwargs)
[ "def", "main", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "start", "(", "*", "args", ",", "*", "*", "kwargs", ")", "try", ":", "while", "1", ":", "body", ",", "message", "=", "yield", "self", ".", "receive", ...
Implement the actor main loop by waiting forever for messages.
[ "Implement", "the", "actor", "main", "loop", "by", "waiting", "forever", "for", "messages", "." ]
python
train
OpenKMIP/PyKMIP
kmip/services/kmip_client.py
https://github.com/OpenKMIP/PyKMIP/blob/b51c5b044bd05f8c85a1d65d13a583a4d8fc1b0e/kmip/services/kmip_client.py#L969-L1033
def sign(self, data, unique_identifier=None, cryptographic_parameters=None, credential=None): """ Sign specified data using a specified signing key. Args: data (bytes): Data to be signed. Required. unique_identifier (string): The unique ID of the signing key to be used. Optional, defaults to None. cryptographic_parameters (CryptographicParameters): A structure containing various cryptographic settings to be used for creating the signature. Optional, defaults to None. credential (Credential): A credential object containing a set of authorization parameters for the operation. Optional, defaults to None. Returns: dict: The results of the sign operation, containing the following key/value pairs: Key | Value ---------------------|----------------------------------------- 'unique_identifier' | (string) The unique ID of the signing | key used to create the signature 'signature' | (bytes) The bytes of the signature 'result_status' | (ResultStatus) An enumeration indicating | the status of the operation result 'result_reason' | (ResultReason) An enumeration providing | context for the result status. 'result_message' | (string) A message providing additional | context for the operation result. """ operation = Operation(OperationEnum.SIGN) request_payload = payloads.SignRequestPayload( unique_identifier=unique_identifier, cryptographic_parameters=cryptographic_parameters, data=data ) batch_item = messages.RequestBatchItem( operation=operation, request_payload=request_payload ) request = self._build_request_message(credential, [batch_item]) response = self._send_and_receive_message(request) batch_item = response.batch_items[0] payload = batch_item.response_payload result = {} if payload: result['unique_identifier'] = payload.unique_identifier result['signature'] = payload.signature_data result['result_status'] = batch_item.result_status.value try: result['result_reason'] = batch_item.result_reason.value except Exception: result['result_reason'] = batch_item.result_reason try: result['result_message'] = batch_item.result_message.value except Exception: result['result_message'] = batch_item.result_message return result
[ "def", "sign", "(", "self", ",", "data", ",", "unique_identifier", "=", "None", ",", "cryptographic_parameters", "=", "None", ",", "credential", "=", "None", ")", ":", "operation", "=", "Operation", "(", "OperationEnum", ".", "SIGN", ")", "request_payload", ...
Sign specified data using a specified signing key. Args: data (bytes): Data to be signed. Required. unique_identifier (string): The unique ID of the signing key to be used. Optional, defaults to None. cryptographic_parameters (CryptographicParameters): A structure containing various cryptographic settings to be used for creating the signature. Optional, defaults to None. credential (Credential): A credential object containing a set of authorization parameters for the operation. Optional, defaults to None. Returns: dict: The results of the sign operation, containing the following key/value pairs: Key | Value ---------------------|----------------------------------------- 'unique_identifier' | (string) The unique ID of the signing | key used to create the signature 'signature' | (bytes) The bytes of the signature 'result_status' | (ResultStatus) An enumeration indicating | the status of the operation result 'result_reason' | (ResultReason) An enumeration providing | context for the result status. 'result_message' | (string) A message providing additional | context for the operation result.
[ "Sign", "specified", "data", "using", "a", "specified", "signing", "key", "." ]
python
test
ask/redish
redish/types.py
https://github.com/ask/redish/blob/4845f8d5e12fd953ecad624b4e1e89f79a082a3e/redish/types.py#L322-L326
def range_by_score(self, min, max, num=None, withscores=False): """Return all the elements with score >= min and score <= max (a range query) from the sorted set.""" return self.client.zrangebyscore(self.name, min, max, num=num, withscores=withscores)
[ "def", "range_by_score", "(", "self", ",", "min", ",", "max", ",", "num", "=", "None", ",", "withscores", "=", "False", ")", ":", "return", "self", ".", "client", ".", "zrangebyscore", "(", "self", ".", "name", ",", "min", ",", "max", ",", "num", "...
Return all the elements with score >= min and score <= max (a range query) from the sorted set.
[ "Return", "all", "the", "elements", "with", "score", ">", "=", "min", "and", "score", "<", "=", "max", "(", "a", "range", "query", ")", "from", "the", "sorted", "set", "." ]
python
train
avelkoski/FRB
fred/clients/releases.py
https://github.com/avelkoski/FRB/blob/692bcf576e17bd1a81db2b7644f4f61aeb39e5c7/fred/clients/releases.py#L195-L225
def related_tags(self,release_id=None,tag_names=None,response_type=None,params=None): """ Function to request FRED related tags for a particular release. FRED tags are attributes assigned to series. Series are assigned tags and releases. Indirectly through series, it is possible to get the tags for a category. No tags exist for a release that does not have series. `<https://research.stlouisfed.org/docs/api/fred/release_related_tags.html>`_ :arg int release_id: The id for a release. Required. :arg str tag_names: Tag names that series match. Separate with semicolon as in "income;bea". Required :arg str response_type: File extension of response. Options are 'xml', 'json', 'dict','df','numpy','csv','tab,'pipe'. Required. :arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD" :arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD" :arg int limit: The maximum number of results to return. Options 1 to 1000 :arg int offset: Data offset. Options >=0 :arg str order_by: Order results by values of the specified attribute. Options are 'series_count', 'popularity', 'created', 'name', 'group_id' :arg str sort_order: Sort results for attribute values specified by order_by. Options are 'asc','desc' :arg str exclude_tag_names: Tag names to exclude. Separate with semicolon as in "income;bea" :arg str tag_group_id: Tag ID to filter tags by. Options are 'freq', 'gen', 'geo', 'geot', 'rls', 'seas', 'src' :arg str search_text: The words to find matching tags with. For example 'mortgage rates' :arg bool ssl_verify: To verify HTTPs. """ path='/release/related_tags?' params['release_id'], params['tag_names'] = release_id, tag_names response_type = response_type if response_type else self.response_type if response_type != 'xml': params['file_type'] = 'json' response = _get_request(self.url_root,self.api_key,path,response_type,params,self.ssl_verify) return response
[ "def", "related_tags", "(", "self", ",", "release_id", "=", "None", ",", "tag_names", "=", "None", ",", "response_type", "=", "None", ",", "params", "=", "None", ")", ":", "path", "=", "'/release/related_tags?'", "params", "[", "'release_id'", "]", ",", "p...
Function to request FRED related tags for a particular release. FRED tags are attributes assigned to series. Series are assigned tags and releases. Indirectly through series, it is possible to get the tags for a category. No tags exist for a release that does not have series. `<https://research.stlouisfed.org/docs/api/fred/release_related_tags.html>`_ :arg int release_id: The id for a release. Required. :arg str tag_names: Tag names that series match. Separate with semicolon as in "income;bea". Required :arg str response_type: File extension of response. Options are 'xml', 'json', 'dict','df','numpy','csv','tab,'pipe'. Required. :arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD" :arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD" :arg int limit: The maximum number of results to return. Options 1 to 1000 :arg int offset: Data offset. Options >=0 :arg str order_by: Order results by values of the specified attribute. Options are 'series_count', 'popularity', 'created', 'name', 'group_id' :arg str sort_order: Sort results for attribute values specified by order_by. Options are 'asc','desc' :arg str exclude_tag_names: Tag names to exclude. Separate with semicolon as in "income;bea" :arg str tag_group_id: Tag ID to filter tags by. Options are 'freq', 'gen', 'geo', 'geot', 'rls', 'seas', 'src' :arg str search_text: The words to find matching tags with. For example 'mortgage rates' :arg bool ssl_verify: To verify HTTPs.
[ "Function", "to", "request", "FRED", "related", "tags", "for", "a", "particular", "release", ".", "FRED", "tags", "are", "attributes", "assigned", "to", "series", ".", "Series", "are", "assigned", "tags", "and", "releases", ".", "Indirectly", "through", "serie...
python
train
ratt-ru/PyMORESANE
pymoresane/iuwt.py
https://github.com/ratt-ru/PyMORESANE/blob/b024591ad0bbb69320d08841f28a2c27f62ae1af/pymoresane/iuwt.py#L384-L499
def gpu_iuwt_decomposition(in1, scale_count, scale_adjust, store_smoothed, store_on_gpu): """ This function calls the a trous algorithm code to decompose the input into its wavelet coefficients. This is the isotropic undecimated wavelet transform implemented for a GPU. INPUTS: in1 (no default): Array on which the decomposition is to be performed. scale_count (no default): Maximum scale to be considered. scale_adjust (no default): Adjustment to scale value if first scales are of no interest. store_smoothed (no default): Boolean specifier for whether the smoothed image is stored or not. store_on_gpu (no default): Boolean specifier for whether the decomposition is stored on the gpu or not. OUTPUTS: detail_coeffs Array containing the detail coefficients. C0 (optional): Array containing the smoothest version of the input. """ # The following simple kernel just allows for the construction of a 3D decomposition on the GPU. ker = SourceModule(""" __global__ void gpu_store_detail_coeffs(float *in1, float *in2, float* out1, int *scale, int *adjust) { const int len = gridDim.x*blockDim.x; const int i = (blockDim.x * blockIdx.x + threadIdx.x); const int j = (blockDim.y * blockIdx.y + threadIdx.y)*len; const int k = (blockDim.z * blockIdx.z + threadIdx.z)*(len*len); const int tid2 = i + j; const int tid3 = i + j + k; if ((blockIdx.z + adjust[0])==scale[0]) { out1[tid3] = in1[tid2] - in2[tid2]; } } """) wavelet_filter = (1./16)*np.array([1,4,6,4,1], dtype=np.float32) # Filter-bank for use in the a trous algorithm. wavelet_filter = gpuarray.to_gpu_async(wavelet_filter) # Initialises an empty array to store the detail coefficients. detail_coeffs = gpuarray.empty([scale_count-scale_adjust, in1.shape[0], in1.shape[1]], np.float32) # Determines whether the array is already on the GPU or not. If not, moves it to the GPU. try: gpu_in1 = gpuarray.to_gpu_async(in1.astype(np.float32)) except: gpu_in1 = in1 # Sets up some working arrays on the GPU to prevent memory transfers. gpu_tmp = gpuarray.empty_like(gpu_in1) gpu_out1 = gpuarray.empty_like(gpu_in1) gpu_out2 = gpuarray.empty_like(gpu_in1) # Sets up some parameters required by the algorithm on the GPU. gpu_scale = gpuarray.zeros([1], np.int32) gpu_adjust = gpuarray.zeros([1], np.int32) gpu_adjust += scale_adjust # Fetches the a trous kernels and sets up the unique storing kernel. gpu_a_trous_row_kernel, gpu_a_trous_col_kernel = gpu_a_trous() gpu_store_detail_coeffs = ker.get_function("gpu_store_detail_coeffs") grid_rows = int(in1.shape[0]//32) grid_cols = int(in1.shape[1]//32) # The following loop, which iterates up to scale_adjust, applies the a trous algorithm to the scales which are # considered insignificant. This is important as each set of wavelet coefficients depends on the last smoothed # version of the input. if scale_adjust>0: for i in range(0, scale_adjust): gpu_a_trous_row_kernel(gpu_in1, gpu_tmp, wavelet_filter, gpu_scale, block=(32,32,1), grid=(grid_cols, grid_rows)) gpu_a_trous_col_kernel(gpu_tmp, gpu_out1, wavelet_filter, gpu_scale, block=(32,32,1), grid=(grid_cols, grid_rows)) gpu_in1, gpu_out1 = gpu_out1, gpu_in1 gpu_scale += 1 # The meat of the algorithm - two sequential applications fo the a trous followed by determination and storing of # the detail coefficients. C0 is reassigned the value of C on each loop - C0 is always the smoothest version of the # input image. for i in range(scale_adjust, scale_count): gpu_a_trous_row_kernel(gpu_in1, gpu_tmp, wavelet_filter, gpu_scale, block=(32,32,1), grid=(grid_cols, grid_rows)) gpu_a_trous_col_kernel(gpu_tmp, gpu_out1, wavelet_filter, gpu_scale, block=(32,32,1), grid=(grid_cols, grid_rows)) # Approximation coefficients. gpu_a_trous_row_kernel(gpu_out1, gpu_tmp, wavelet_filter, gpu_scale, block=(32,32,1), grid=(grid_cols, grid_rows)) gpu_a_trous_col_kernel(gpu_tmp, gpu_out2, wavelet_filter, gpu_scale, block=(32,32,1), grid=(grid_cols, grid_rows)) # Approximation coefficients. gpu_store_detail_coeffs(gpu_in1, gpu_out2, detail_coeffs, gpu_scale, gpu_adjust, block=(32,32,1), grid=(grid_cols, grid_rows, int(scale_count))) # Detail coefficients. gpu_in1, gpu_out1 = gpu_out1, gpu_in1 gpu_scale += 1 # Return values depend on mode. NOTE: store_smoothed does not work if the result stays on the gpu. if store_on_gpu: return detail_coeffs elif store_smoothed: return detail_coeffs.get(), gpu_in1.get() else: return detail_coeffs.get()
[ "def", "gpu_iuwt_decomposition", "(", "in1", ",", "scale_count", ",", "scale_adjust", ",", "store_smoothed", ",", "store_on_gpu", ")", ":", "# The following simple kernel just allows for the construction of a 3D decomposition on the GPU.", "ker", "=", "SourceModule", "(", "\"\"...
This function calls the a trous algorithm code to decompose the input into its wavelet coefficients. This is the isotropic undecimated wavelet transform implemented for a GPU. INPUTS: in1 (no default): Array on which the decomposition is to be performed. scale_count (no default): Maximum scale to be considered. scale_adjust (no default): Adjustment to scale value if first scales are of no interest. store_smoothed (no default): Boolean specifier for whether the smoothed image is stored or not. store_on_gpu (no default): Boolean specifier for whether the decomposition is stored on the gpu or not. OUTPUTS: detail_coeffs Array containing the detail coefficients. C0 (optional): Array containing the smoothest version of the input.
[ "This", "function", "calls", "the", "a", "trous", "algorithm", "code", "to", "decompose", "the", "input", "into", "its", "wavelet", "coefficients", ".", "This", "is", "the", "isotropic", "undecimated", "wavelet", "transform", "implemented", "for", "a", "GPU", ...
python
train
Murali-group/halp
halp/utilities/undirected_graph_transformations.py
https://github.com/Murali-group/halp/blob/6eb27466ba84e2281e18f93b62aae5efb21ef8b3/halp/utilities/undirected_graph_transformations.py#L81-L107
def from_networkx_graph(nx_graph): """Returns an UndirectedHypergraph object that is the graph equivalent of the given NetworkX Graph object. :param nx_graph: the NetworkX undirected graph object to transform. :returns: UndirectedHypergraph -- H object equivalent to the NetworkX undirected graph. :raises: TypeError -- Transformation only applicable to undirected NetworkX graphs """ import networkx as nx if not isinstance(nx_graph, nx.Graph): raise TypeError("Transformation only applicable to undirected \ NetworkX graphs") G = UndirectedHypergraph() for node in nx_graph.nodes_iter(): G.add_node(node, copy.copy(nx_graph.node[node])) for edge in nx_graph.edges_iter(): G.add_hyperedge([edge[0], edge[1]], copy.copy(nx_graph[edge[0]][edge[1]])) return G
[ "def", "from_networkx_graph", "(", "nx_graph", ")", ":", "import", "networkx", "as", "nx", "if", "not", "isinstance", "(", "nx_graph", ",", "nx", ".", "Graph", ")", ":", "raise", "TypeError", "(", "\"Transformation only applicable to undirected \\\n ...
Returns an UndirectedHypergraph object that is the graph equivalent of the given NetworkX Graph object. :param nx_graph: the NetworkX undirected graph object to transform. :returns: UndirectedHypergraph -- H object equivalent to the NetworkX undirected graph. :raises: TypeError -- Transformation only applicable to undirected NetworkX graphs
[ "Returns", "an", "UndirectedHypergraph", "object", "that", "is", "the", "graph", "equivalent", "of", "the", "given", "NetworkX", "Graph", "object", "." ]
python
train
Jammy2211/PyAutoLens
autolens/lens/ray_tracing.py
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/lens/ray_tracing.py#L266-L317
def grid_at_redshift_from_image_plane_grid_and_redshift(self, image_plane_grid, redshift): """For an input grid of (y,x) arc-second image-plane coordinates, ray-trace the coordinates to any redshift in \ the strong lens configuration. This is performed using multi-plane ray-tracing and the existing redshifts and planes of the tracer. However, \ any redshift can be input even if a plane does not exist there, including redshifts before the first plane \ of the lensing system. Parameters ---------- image_plane_grid : ndsrray or grids.RegularGrid The image-plane grid which is traced to the redshift. redshift : float The redshift the image-plane grid is traced to. """ # TODO : We need to come up with a better abstraction for multi-plane lensing 0_0 image_plane_grid_stack = grids.GridStack(regular=image_plane_grid, sub=np.array([[0.0, 0.0]]), blurring=np.array([[0.0, 0.0]])) tracer = TracerMultiPlanes(galaxies=self.galaxies, image_plane_grid_stack=image_plane_grid_stack, border=None, cosmology=self.cosmology) for plane_index in range(0, len(self.plane_redshifts)): new_grid_stack = image_plane_grid_stack if redshift <= tracer.plane_redshifts[plane_index]: # If redshift is between two planes, we need to map over all previous planes coordinates / deflections. if plane_index > 0: for previous_plane_index in range(plane_index): scaling_factor = cosmology_util.scaling_factor_between_redshifts_from_redshifts_and_cosmology( redshift_0=tracer.plane_redshifts[previous_plane_index], redshift_1=redshift, redshift_final=tracer.plane_redshifts[-1], cosmology=tracer.cosmology) scaled_deflection_stack = lens_util.scaled_deflection_stack_from_plane_and_scaling_factor( plane=tracer.planes[previous_plane_index], scaling_factor=scaling_factor) new_grid_stack = \ lens_util.grid_stack_from_deflection_stack(grid_stack=new_grid_stack, deflection_stack=scaled_deflection_stack) # If redshift is before the first plane, no change to image pllane coordinates. elif plane_index == 0: return new_grid_stack.regular return new_grid_stack.regular
[ "def", "grid_at_redshift_from_image_plane_grid_and_redshift", "(", "self", ",", "image_plane_grid", ",", "redshift", ")", ":", "# TODO : We need to come up with a better abstraction for multi-plane lensing 0_0", "image_plane_grid_stack", "=", "grids", ".", "GridStack", "(", "regula...
For an input grid of (y,x) arc-second image-plane coordinates, ray-trace the coordinates to any redshift in \ the strong lens configuration. This is performed using multi-plane ray-tracing and the existing redshifts and planes of the tracer. However, \ any redshift can be input even if a plane does not exist there, including redshifts before the first plane \ of the lensing system. Parameters ---------- image_plane_grid : ndsrray or grids.RegularGrid The image-plane grid which is traced to the redshift. redshift : float The redshift the image-plane grid is traced to.
[ "For", "an", "input", "grid", "of", "(", "y", "x", ")", "arc", "-", "second", "image", "-", "plane", "coordinates", "ray", "-", "trace", "the", "coordinates", "to", "any", "redshift", "in", "\\", "the", "strong", "lens", "configuration", "." ]
python
valid
spyder-ide/spyder
spyder/plugins/editor/utils/editor.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/utils/editor.py#L278-L286
def cursor_position(self): """ Returns the QTextCursor position. The position is a tuple made up of the line number (0 based) and the column number (0 based). :return: tuple(line, column) """ return (self._editor.textCursor().blockNumber(), self._editor.textCursor().columnNumber())
[ "def", "cursor_position", "(", "self", ")", ":", "return", "(", "self", ".", "_editor", ".", "textCursor", "(", ")", ".", "blockNumber", "(", ")", ",", "self", ".", "_editor", ".", "textCursor", "(", ")", ".", "columnNumber", "(", ")", ")" ]
Returns the QTextCursor position. The position is a tuple made up of the line number (0 based) and the column number (0 based). :return: tuple(line, column)
[ "Returns", "the", "QTextCursor", "position", ".", "The", "position", "is", "a", "tuple", "made", "up", "of", "the", "line", "number", "(", "0", "based", ")", "and", "the", "column", "number", "(", "0", "based", ")", "." ]
python
train
IDSIA/sacred
sacred/run.py
https://github.com/IDSIA/sacred/blob/72633776bed9b5bddf93ae7d215188e61970973a/sacred/run.py#L142-L158
def add_resource(self, filename): """Add a file as a resource. In Sacred terminology a resource is a file that the experiment needed to access during a run. In case of a MongoObserver that means making sure the file is stored in the database (but avoiding duplicates) along its path and md5 sum. See also :py:meth:`sacred.Experiment.add_resource`. Parameters ---------- filename : str name of the file to be stored as a resource """ filename = os.path.abspath(filename) self._emit_resource_added(filename)
[ "def", "add_resource", "(", "self", ",", "filename", ")", ":", "filename", "=", "os", ".", "path", ".", "abspath", "(", "filename", ")", "self", ".", "_emit_resource_added", "(", "filename", ")" ]
Add a file as a resource. In Sacred terminology a resource is a file that the experiment needed to access during a run. In case of a MongoObserver that means making sure the file is stored in the database (but avoiding duplicates) along its path and md5 sum. See also :py:meth:`sacred.Experiment.add_resource`. Parameters ---------- filename : str name of the file to be stored as a resource
[ "Add", "a", "file", "as", "a", "resource", "." ]
python
train
Parsl/parsl
parsl/dataflow/dflow.py
https://github.com/Parsl/parsl/blob/d7afb3bc37f50dcf224ae78637944172edb35dac/parsl/dataflow/dflow.py#L586-L702
def submit(self, func, *args, executors='all', fn_hash=None, cache=False, **kwargs): """Add task to the dataflow system. If the app task has the executors attributes not set (default=='all') the task will be launched on a randomly selected executor from the list of executors. If the app task specifies a particular set of executors, it will be targeted at the specified executors. >>> IF all deps are met: >>> send to the runnable queue and launch the task >>> ELSE: >>> post the task in the pending queue Args: - func : A function object - *args : Args to the function KWargs : - executors (list or string) : List of executors this call could go to. Default='all' - fn_hash (Str) : Hash of the function and inputs Default=None - cache (Bool) : To enable memoization or not - kwargs (dict) : Rest of the kwargs to the fn passed as dict. Returns: (AppFuture) [DataFutures,] """ if self.cleanup_called: raise ValueError("Cannot submit to a DFK that has been cleaned up") task_id = self.task_count self.task_count += 1 if isinstance(executors, str) and executors.lower() == 'all': choices = list(e for e in self.executors if e != 'data_manager') elif isinstance(executors, list): choices = executors executor = random.choice(choices) # Transform remote input files to data futures args, kwargs = self._add_input_deps(executor, args, kwargs) task_def = {'depends': None, 'executor': executor, 'func': func, 'func_name': func.__name__, 'args': args, 'kwargs': kwargs, 'fn_hash': fn_hash, 'memoize': cache, 'callback': None, 'exec_fu': None, 'checkpoint': None, 'fail_count': 0, 'fail_history': [], 'env': None, 'status': States.unsched, 'id': task_id, 'time_submitted': None, 'time_returned': None, 'app_fu': None} if task_id in self.tasks: raise DuplicateTaskError( "internal consistency error: Task {0} already exists in task list".format(task_id)) else: self.tasks[task_id] = task_def # Get the dep count and a list of dependencies for the task dep_cnt, depends = self._gather_all_deps(args, kwargs) self.tasks[task_id]['depends'] = depends # Extract stdout and stderr to pass to AppFuture: task_stdout = kwargs.get('stdout') task_stderr = kwargs.get('stderr') logger.info("Task {} submitted for App {}, waiting on tasks {}".format(task_id, task_def['func_name'], [fu.tid for fu in depends])) self.tasks[task_id]['task_launch_lock'] = threading.Lock() app_fu = AppFuture(tid=task_id, stdout=task_stdout, stderr=task_stderr) self.tasks[task_id]['app_fu'] = app_fu app_fu.add_done_callback(partial(self.handle_app_update, task_id)) self.tasks[task_id]['status'] = States.pending logger.debug("Task {} set to pending state with AppFuture: {}".format(task_id, task_def['app_fu'])) # at this point add callbacks to all dependencies to do a launch_if_ready # call whenever a dependency completes. # we need to be careful about the order of setting the state to pending, # adding the callbacks, and caling launch_if_ready explicitly once always below. # I think as long as we call launch_if_ready once after setting pending, then # we can add the callback dependencies at any point: if the callbacks all fire # before then, they won't cause a launch, but the one below will. if they fire # after we set it pending, then the last one will cause a launch, and the # explicit one won't. for d in depends: def callback_adapter(dep_fut): self.launch_if_ready(task_id) try: d.add_done_callback(callback_adapter) except Exception as e: logger.error("add_done_callback got an exception {} which will be ignored".format(e)) self.launch_if_ready(task_id) return task_def['app_fu']
[ "def", "submit", "(", "self", ",", "func", ",", "*", "args", ",", "executors", "=", "'all'", ",", "fn_hash", "=", "None", ",", "cache", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "cleanup_called", ":", "raise", "ValueError", ...
Add task to the dataflow system. If the app task has the executors attributes not set (default=='all') the task will be launched on a randomly selected executor from the list of executors. If the app task specifies a particular set of executors, it will be targeted at the specified executors. >>> IF all deps are met: >>> send to the runnable queue and launch the task >>> ELSE: >>> post the task in the pending queue Args: - func : A function object - *args : Args to the function KWargs : - executors (list or string) : List of executors this call could go to. Default='all' - fn_hash (Str) : Hash of the function and inputs Default=None - cache (Bool) : To enable memoization or not - kwargs (dict) : Rest of the kwargs to the fn passed as dict. Returns: (AppFuture) [DataFutures,]
[ "Add", "task", "to", "the", "dataflow", "system", "." ]
python
valid
gwastro/pycbc
pycbc/population/rates_functions.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/population/rates_functions.py#L58-L95
def save_bkg_falloff(fname_statmap, fname_bank, path, rhomin, lo_mchirp, hi_mchirp): ''' Read the STATMAP files to derive snr falloff for the background events. Save the output to a txt file Bank file is also provided to restrict triggers to BBH templates. Parameters ---------- fname_statmap: string STATMAP file containing trigger information fname_bank: string File name of the template bank path: string Destination where txt file is saved rhomin: float Minimum value of SNR threhold (will need including ifar) lo_mchirp: float Minimum chirp mass for the template hi_mchirp: float Maximum chirp mass for template ''' with h5py.File(fname_bank, 'r') as bulk: mass1_bank = bulk['mass1'][:] mass2_bank = bulk['mass2'][:] full_data = process_full_data(fname_statmap, rhomin, mass1_bank, mass2_bank, lo_mchirp, hi_mchirp) max_bg_stat = np.max(full_data['cstat_back_exc']) bg_bins = np.linspace(rhomin, max_bg_stat, 76) bg_counts = np.histogram(full_data['cstat_back_exc'], weights=full_data['dec_factors'], bins=bg_bins)[0] zerolagstat = full_data['zerolagstat'] coincs = zerolagstat[zerolagstat >= rhomin] bkg = (bg_bins[:-1], bg_bins[1:], bg_counts) return bkg, coincs
[ "def", "save_bkg_falloff", "(", "fname_statmap", ",", "fname_bank", ",", "path", ",", "rhomin", ",", "lo_mchirp", ",", "hi_mchirp", ")", ":", "with", "h5py", ".", "File", "(", "fname_bank", ",", "'r'", ")", "as", "bulk", ":", "mass1_bank", "=", "bulk", "...
Read the STATMAP files to derive snr falloff for the background events. Save the output to a txt file Bank file is also provided to restrict triggers to BBH templates. Parameters ---------- fname_statmap: string STATMAP file containing trigger information fname_bank: string File name of the template bank path: string Destination where txt file is saved rhomin: float Minimum value of SNR threhold (will need including ifar) lo_mchirp: float Minimum chirp mass for the template hi_mchirp: float Maximum chirp mass for template
[ "Read", "the", "STATMAP", "files", "to", "derive", "snr", "falloff", "for", "the", "background", "events", ".", "Save", "the", "output", "to", "a", "txt", "file", "Bank", "file", "is", "also", "provided", "to", "restrict", "triggers", "to", "BBH", "templat...
python
train
aio-libs/aioftp
aioftp/server.py
https://github.com/aio-libs/aioftp/blob/b45395b1aba41301b898040acade7010e6878a08/aioftp/server.py#L433-L445
async def close(self): """ :py:func:`asyncio.coroutine` Shutdown the server and close all connections. """ self.server.close() tasks = [self.server.wait_closed()] for connection in self.connections.values(): connection._dispatcher.cancel() tasks.append(connection._dispatcher) logger.info("waiting for %d tasks", len(tasks)) await asyncio.wait(tasks)
[ "async", "def", "close", "(", "self", ")", ":", "self", ".", "server", ".", "close", "(", ")", "tasks", "=", "[", "self", ".", "server", ".", "wait_closed", "(", ")", "]", "for", "connection", "in", "self", ".", "connections", ".", "values", "(", "...
:py:func:`asyncio.coroutine` Shutdown the server and close all connections.
[ ":", "py", ":", "func", ":", "asyncio", ".", "coroutine" ]
python
valid
librosa/librosa
librosa/effects.py
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/effects.py#L101-L142
def harmonic(y, **kwargs): '''Extract harmonic elements from an audio time-series. Parameters ---------- y : np.ndarray [shape=(n,)] audio time series kwargs : additional keyword arguments. See `librosa.decompose.hpss` for details. Returns ------- y_harmonic : np.ndarray [shape=(n,)] audio time series of just the harmonic portion See Also -------- hpss : Separate harmonic and percussive components percussive : Extract only the percussive component librosa.decompose.hpss : HPSS for spectrograms Examples -------- >>> # Extract harmonic component >>> y, sr = librosa.load(librosa.util.example_audio_file()) >>> y_harmonic = librosa.effects.harmonic(y) >>> # Use a margin > 1.0 for greater harmonic separation >>> y_harmonic = librosa.effects.harmonic(y, margin=3.0) ''' # Compute the STFT matrix stft = core.stft(y) # Remove percussives stft_harm = decompose.hpss(stft, **kwargs)[0] # Invert the STFTs y_harm = util.fix_length(core.istft(stft_harm, dtype=y.dtype), len(y)) return y_harm
[ "def", "harmonic", "(", "y", ",", "*", "*", "kwargs", ")", ":", "# Compute the STFT matrix", "stft", "=", "core", ".", "stft", "(", "y", ")", "# Remove percussives", "stft_harm", "=", "decompose", ".", "hpss", "(", "stft", ",", "*", "*", "kwargs", ")", ...
Extract harmonic elements from an audio time-series. Parameters ---------- y : np.ndarray [shape=(n,)] audio time series kwargs : additional keyword arguments. See `librosa.decompose.hpss` for details. Returns ------- y_harmonic : np.ndarray [shape=(n,)] audio time series of just the harmonic portion See Also -------- hpss : Separate harmonic and percussive components percussive : Extract only the percussive component librosa.decompose.hpss : HPSS for spectrograms Examples -------- >>> # Extract harmonic component >>> y, sr = librosa.load(librosa.util.example_audio_file()) >>> y_harmonic = librosa.effects.harmonic(y) >>> # Use a margin > 1.0 for greater harmonic separation >>> y_harmonic = librosa.effects.harmonic(y, margin=3.0)
[ "Extract", "harmonic", "elements", "from", "an", "audio", "time", "-", "series", "." ]
python
test
mosdef-hub/mbuild
mbuild/formats/gsdwriter.py
https://github.com/mosdef-hub/mbuild/blob/dcb80a2becd5d0e6a7e3e7bcb1b59793c46a2dd3/mbuild/formats/gsdwriter.py#L242-L282
def _write_dihedral_information(gsd_file, structure): """Write the dihedrals in the system. Parameters ---------- gsd_file : The file object of the GSD file being written structure : parmed.Structure Parmed structure object holding system information """ gsd_file.dihedrals.N = len(structure.rb_torsions) unique_dihedral_types = set() for dihedral in structure.rb_torsions: t1, t2 = dihedral.atom1.type, dihedral.atom2.type t3, t4 = dihedral.atom3.type, dihedral.atom4.type if [t2, t3] == sorted([t2, t3], key=natural_sort): dihedral_type = ('-'.join((t1, t2, t3, t4))) else: dihedral_type = ('-'.join((t4, t3, t2, t1))) unique_dihedral_types.add(dihedral_type) unique_dihedral_types = sorted(list(unique_dihedral_types), key=natural_sort) gsd_file.dihedrals.types = unique_dihedral_types dihedral_typeids = [] dihedral_groups = [] for dihedral in structure.rb_torsions: t1, t2 = dihedral.atom1.type, dihedral.atom2.type t3, t4 = dihedral.atom3.type, dihedral.atom4.type if [t2, t3] == sorted([t2, t3], key=natural_sort): dihedral_type = ('-'.join((t1, t2, t3, t4))) else: dihedral_type = ('-'.join((t4, t3, t2, t1))) dihedral_typeids.append(unique_dihedral_types.index(dihedral_type)) dihedral_groups.append((dihedral.atom1.idx, dihedral.atom2.idx, dihedral.atom3.idx, dihedral.atom4.idx)) gsd_file.dihedrals.typeid = dihedral_typeids gsd_file.dihedrals.group = dihedral_groups
[ "def", "_write_dihedral_information", "(", "gsd_file", ",", "structure", ")", ":", "gsd_file", ".", "dihedrals", ".", "N", "=", "len", "(", "structure", ".", "rb_torsions", ")", "unique_dihedral_types", "=", "set", "(", ")", "for", "dihedral", "in", "structure...
Write the dihedrals in the system. Parameters ---------- gsd_file : The file object of the GSD file being written structure : parmed.Structure Parmed structure object holding system information
[ "Write", "the", "dihedrals", "in", "the", "system", "." ]
python
train
CygnusNetworks/pypureomapi
pypureomapi.py
https://github.com/CygnusNetworks/pypureomapi/blob/ff4459678ec023fd56e64ce518a86860efec26bf/pypureomapi.py#L1129-L1145
def lookup_host(self, name): """Look for a host object with given name and return the name, mac, and ip address @type name: str @rtype: dict or None @raises ValueError: @raises OmapiError: @raises OmapiErrorNotFound: if no host object with the given name could be found @raises OmapiErrorAttributeNotFound: if lease could be found, but objects lacks ip, mac or name @raises socket.error: """ res = self.lookup_by_host(name=name) try: return dict(ip=res["ip-address"], mac=res["hardware-address"], hostname=res["name"].decode('utf-8')) except KeyError: raise OmapiErrorAttributeNotFound()
[ "def", "lookup_host", "(", "self", ",", "name", ")", ":", "res", "=", "self", ".", "lookup_by_host", "(", "name", "=", "name", ")", "try", ":", "return", "dict", "(", "ip", "=", "res", "[", "\"ip-address\"", "]", ",", "mac", "=", "res", "[", "\"har...
Look for a host object with given name and return the name, mac, and ip address @type name: str @rtype: dict or None @raises ValueError: @raises OmapiError: @raises OmapiErrorNotFound: if no host object with the given name could be found @raises OmapiErrorAttributeNotFound: if lease could be found, but objects lacks ip, mac or name @raises socket.error:
[ "Look", "for", "a", "host", "object", "with", "given", "name", "and", "return", "the", "name", "mac", "and", "ip", "address" ]
python
train
Pytwitcher/pytwitcherapi
src/pytwitcherapi/session.py
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/session.py#L450-L466
def followed_streams(self, limit=25, offset=0): """Return the streams the current user follows. Needs authorization ``user_read``. :param limit: maximum number of results :type limit: :class:`int` :param offset: offset for pagination :type offset: :class:`int` :returns: A list of streams :rtype: :class:`list`of :class:`models.Stream` instances :raises: :class:`exceptions.NotAuthorizedError` """ r = self.kraken_request('GET', 'streams/followed', params={'limit': limit, 'offset': offset}) return models.Stream.wrap_search(r)
[ "def", "followed_streams", "(", "self", ",", "limit", "=", "25", ",", "offset", "=", "0", ")", ":", "r", "=", "self", ".", "kraken_request", "(", "'GET'", ",", "'streams/followed'", ",", "params", "=", "{", "'limit'", ":", "limit", ",", "'offset'", ":"...
Return the streams the current user follows. Needs authorization ``user_read``. :param limit: maximum number of results :type limit: :class:`int` :param offset: offset for pagination :type offset: :class:`int` :returns: A list of streams :rtype: :class:`list`of :class:`models.Stream` instances :raises: :class:`exceptions.NotAuthorizedError`
[ "Return", "the", "streams", "the", "current", "user", "follows", "." ]
python
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/enum_type_wrapper.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/enum_type_wrapper.py#L83-L89
def items(self): """Return a list of the (name, value) pairs of the enum. These are returned in the order they were defined in the .proto file. """ return [(value_descriptor.name, value_descriptor.number) for value_descriptor in self._enum_type.values]
[ "def", "items", "(", "self", ")", ":", "return", "[", "(", "value_descriptor", ".", "name", ",", "value_descriptor", ".", "number", ")", "for", "value_descriptor", "in", "self", ".", "_enum_type", ".", "values", "]" ]
Return a list of the (name, value) pairs of the enum. These are returned in the order they were defined in the .proto file.
[ "Return", "a", "list", "of", "the", "(", "name", "value", ")", "pairs", "of", "the", "enum", "." ]
python
train
ejeschke/ginga
ginga/canvas/CanvasObject.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/canvas/CanvasObject.py#L503-L516
def get_bbox(self, points=None): """ Get bounding box of this object. Returns ------- (p1, p2, p3, p4): a 4-tuple of the points in data coordinates, beginning with the lower-left and proceeding counter-clockwise. """ if points is None: x1, y1, x2, y2 = self.get_llur() return ((x1, y1), (x1, y2), (x2, y2), (x2, y1)) else: return trcalc.strip_z(trcalc.get_bounds(points))
[ "def", "get_bbox", "(", "self", ",", "points", "=", "None", ")", ":", "if", "points", "is", "None", ":", "x1", ",", "y1", ",", "x2", ",", "y2", "=", "self", ".", "get_llur", "(", ")", "return", "(", "(", "x1", ",", "y1", ")", ",", "(", "x1", ...
Get bounding box of this object. Returns ------- (p1, p2, p3, p4): a 4-tuple of the points in data coordinates, beginning with the lower-left and proceeding counter-clockwise.
[ "Get", "bounding", "box", "of", "this", "object", "." ]
python
train
numenta/htmresearch
htmresearch/support/csv_helper.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/support/csv_helper.py#L166-L252
def readDataAndReshuffle(args, categoriesInOrderOfInterest=None): """ Read data file specified in args, optionally reshuffle categories, print out some statistics, and return various data structures. This routine is pretty specific and only used in some simple test scripts. categoriesInOrderOfInterest (list) Optional list of integers representing the priority order of various categories. The categories in the original data file will be reshuffled to the order in this array, up to args.numLabels, if specified. Returns the tuple: (dataset, labelRefs, documentCategoryMap, documentTextMap) Return format: dataset = [ ["fox eats carrots", [0], docId], ["fox eats peppers", [0], docId], ["carrots are healthy", [1], docId], ["peppers is healthy", [1], docId], ] labelRefs = [Category0Name, Category1Name, ...] documentCategoryMap = { docId: [categoryIndex0, categoryIndex1, ...], docId: [categoryIndex0, categoryIndex1, ...], : } documentTextMap = { docId: documentText, docId: documentText, : } """ # Read data dataDict = readCSV(args.dataPath, 1) labelRefs, dataDict = mapLabelRefs(dataDict) if "numLabels" in args: numLabels = args.numLabels else: numLabels = len(labelRefs) if categoriesInOrderOfInterest is None: categoriesInOrderOfInterest = range(0,numLabels) else: categoriesInOrderOfInterest=categoriesInOrderOfInterest[0:numLabels] # Select data based on categories of interest. Shift category indices down # so we go from 0 to numLabels-1 dataSet = [] documentTextMap = {} counts = numpy.zeros(len(labelRefs)) for document in dataDict.itervalues(): try: docId = int(document[2]) except: raise RuntimeError("docId "+str(docId)+" is not an integer") oldCategoryIndex = document[1][0] documentTextMap[docId] = document[0] if oldCategoryIndex in categoriesInOrderOfInterest: newIndex = categoriesInOrderOfInterest.index(oldCategoryIndex) dataSet.append([document[0], [newIndex], docId]) counts[newIndex] += 1 # For each document, figure out which categories it belongs to # Include the shifted category index documentCategoryMap = {} for doc in dataDict.iteritems(): docId = int(doc[1][2]) oldCategoryIndex = doc[1][1][0] if oldCategoryIndex in categoriesInOrderOfInterest: newIndex = categoriesInOrderOfInterest.index(oldCategoryIndex) v = documentCategoryMap.get(docId, []) v.append(newIndex) documentCategoryMap[docId] = v labelRefs = [labelRefs[i] for i in categoriesInOrderOfInterest] print "Total number of unique documents",len(documentCategoryMap) print "Category counts: ",counts print "Categories in training/test data:", labelRefs return dataSet, labelRefs, documentCategoryMap, documentTextMap
[ "def", "readDataAndReshuffle", "(", "args", ",", "categoriesInOrderOfInterest", "=", "None", ")", ":", "# Read data", "dataDict", "=", "readCSV", "(", "args", ".", "dataPath", ",", "1", ")", "labelRefs", ",", "dataDict", "=", "mapLabelRefs", "(", "dataDict", "...
Read data file specified in args, optionally reshuffle categories, print out some statistics, and return various data structures. This routine is pretty specific and only used in some simple test scripts. categoriesInOrderOfInterest (list) Optional list of integers representing the priority order of various categories. The categories in the original data file will be reshuffled to the order in this array, up to args.numLabels, if specified. Returns the tuple: (dataset, labelRefs, documentCategoryMap, documentTextMap) Return format: dataset = [ ["fox eats carrots", [0], docId], ["fox eats peppers", [0], docId], ["carrots are healthy", [1], docId], ["peppers is healthy", [1], docId], ] labelRefs = [Category0Name, Category1Name, ...] documentCategoryMap = { docId: [categoryIndex0, categoryIndex1, ...], docId: [categoryIndex0, categoryIndex1, ...], : } documentTextMap = { docId: documentText, docId: documentText, : }
[ "Read", "data", "file", "specified", "in", "args", "optionally", "reshuffle", "categories", "print", "out", "some", "statistics", "and", "return", "various", "data", "structures", ".", "This", "routine", "is", "pretty", "specific", "and", "only", "used", "in", ...
python
train
rflamary/POT
ot/da.py
https://github.com/rflamary/POT/blob/c5108efc7b6702e1af3928bef1032e6b37734d1c/ot/da.py#L428-L636
def joint_OT_mapping_kernel(xs, xt, mu=1, eta=0.001, kerneltype='gaussian', sigma=1, bias=False, verbose=False, verbose2=False, numItermax=100, numInnerItermax=10, stopInnerThr=1e-6, stopThr=1e-5, log=False, **kwargs): """Joint OT and nonlinear mapping estimation with kernels as proposed in [8] The function solves the following optimization problem: .. math:: \min_{\gamma,L\in\mathcal{H}}\quad \|L(X_s) - n_s\gamma X_t\|^2_F + \mu<\gamma,M>_F + \eta \|L\|^2_\mathcal{H} s.t. \gamma 1 = a \gamma^T 1= b \gamma\geq 0 where : - M is the (ns,nt) squared euclidean cost matrix between samples in Xs and Xt (scaled by ns) - :math:`L` is a ns x d linear operator on a kernel matrix that approximates the barycentric mapping - a and b are uniform source and target weights The problem consist in solving jointly an optimal transport matrix :math:`\gamma` and the nonlinear mapping that fits the barycentric mapping :math:`n_s\gamma X_t`. One can also estimate a mapping with constant bias (see supplementary material of [8]) using the bias optional argument. The algorithm used for solving the problem is the block coordinate descent that alternates between updates of G (using conditionnal gradient) and the update of L using a classical kernel least square solver. Parameters ---------- xs : np.ndarray (ns,d) samples in the source domain xt : np.ndarray (nt,d) samples in the target domain mu : float,optional Weight for the linear OT loss (>0) eta : float, optional Regularization term for the linear mapping L (>0) bias : bool,optional Estimate linear mapping with constant bias kerneltype : str,optional kernel used by calling function ot.utils.kernel (gaussian by default) sigma : float, optional Gaussian kernel bandwidth. numItermax : int, optional Max number of BCD iterations stopThr : float, optional Stop threshold on relative loss decrease (>0) numInnerItermax : int, optional Max number of iterations (inner CG solver) stopInnerThr : float, optional Stop threshold on error (inner CG solver) (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True Returns ------- gamma : (ns x nt) ndarray Optimal transportation matrix for the given parameters L : (ns x d) ndarray Nonlinear mapping matrix (ns+1 x d if bias) log : dict log dictionary return only if log==True in parameters References ---------- .. [8] M. Perrot, N. Courty, R. Flamary, A. Habrard, "Mapping estimation for discrete optimal transport", Neural Information Processing Systems (NIPS), 2016. See Also -------- ot.lp.emd : Unregularized OT ot.optim.cg : General regularized OT """ ns, nt = xs.shape[0], xt.shape[0] K = kernel(xs, xs, method=kerneltype, sigma=sigma) if bias: K1 = np.hstack((K, np.ones((ns, 1)))) Id = np.eye(ns + 1) Id[-1] = 0 Kp = np.eye(ns + 1) Kp[:ns, :ns] = K # ls regu # K0 = K1.T.dot(K1)+eta*I # Kreg=I # RKHS regul K0 = K1.T.dot(K1) + eta * Kp Kreg = Kp else: K1 = K Id = np.eye(ns) # ls regul # K0 = K1.T.dot(K1)+eta*I # Kreg=I # proper kernel ridge K0 = K + eta * Id Kreg = K if log: log = {'err': []} a, b = unif(ns), unif(nt) M = dist(xs, xt) * ns G = emd(a, b, M) vloss = [] def loss(L, G): """Compute full loss""" return np.sum((K1.dot(L) - ns * G.dot(xt))**2) + mu * \ np.sum(G * M) + eta * np.trace(L.T.dot(Kreg).dot(L)) def solve_L_nobias(G): """ solve L problem with fixed G (least square)""" xst = ns * G.dot(xt) return np.linalg.solve(K0, xst) def solve_L_bias(G): """ solve L problem with fixed G (least square)""" xst = ns * G.dot(xt) return np.linalg.solve(K0, K1.T.dot(xst)) def solve_G(L, G0): """Update G with CG algorithm""" xsi = K1.dot(L) def f(G): return np.sum((xsi - ns * G.dot(xt))**2) def df(G): return -2 * ns * (xsi - ns * G.dot(xt)).dot(xt.T) G = cg(a, b, M, 1.0 / mu, f, df, G0=G0, numItermax=numInnerItermax, stopThr=stopInnerThr) return G if bias: solve_L = solve_L_bias else: solve_L = solve_L_nobias L = solve_L(G) vloss.append(loss(L, G)) if verbose: print('{:5s}|{:12s}|{:8s}'.format( 'It.', 'Loss', 'Delta loss') + '\n' + '-' * 32) print('{:5d}|{:8e}|{:8e}'.format(0, vloss[-1], 0)) # init loop if numItermax > 0: loop = 1 else: loop = 0 it = 0 while loop: it += 1 # update G G = solve_G(L, G) # update L L = solve_L(G) vloss.append(loss(L, G)) if it >= numItermax: loop = 0 if abs(vloss[-1] - vloss[-2]) / abs(vloss[-2]) < stopThr: loop = 0 if verbose: if it % 20 == 0: print('{:5s}|{:12s}|{:8s}'.format( 'It.', 'Loss', 'Delta loss') + '\n' + '-' * 32) print('{:5d}|{:8e}|{:8e}'.format( it, vloss[-1], (vloss[-1] - vloss[-2]) / abs(vloss[-2]))) if log: log['loss'] = vloss return G, L, log else: return G, L
[ "def", "joint_OT_mapping_kernel", "(", "xs", ",", "xt", ",", "mu", "=", "1", ",", "eta", "=", "0.001", ",", "kerneltype", "=", "'gaussian'", ",", "sigma", "=", "1", ",", "bias", "=", "False", ",", "verbose", "=", "False", ",", "verbose2", "=", "False...
Joint OT and nonlinear mapping estimation with kernels as proposed in [8] The function solves the following optimization problem: .. math:: \min_{\gamma,L\in\mathcal{H}}\quad \|L(X_s) - n_s\gamma X_t\|^2_F + \mu<\gamma,M>_F + \eta \|L\|^2_\mathcal{H} s.t. \gamma 1 = a \gamma^T 1= b \gamma\geq 0 where : - M is the (ns,nt) squared euclidean cost matrix between samples in Xs and Xt (scaled by ns) - :math:`L` is a ns x d linear operator on a kernel matrix that approximates the barycentric mapping - a and b are uniform source and target weights The problem consist in solving jointly an optimal transport matrix :math:`\gamma` and the nonlinear mapping that fits the barycentric mapping :math:`n_s\gamma X_t`. One can also estimate a mapping with constant bias (see supplementary material of [8]) using the bias optional argument. The algorithm used for solving the problem is the block coordinate descent that alternates between updates of G (using conditionnal gradient) and the update of L using a classical kernel least square solver. Parameters ---------- xs : np.ndarray (ns,d) samples in the source domain xt : np.ndarray (nt,d) samples in the target domain mu : float,optional Weight for the linear OT loss (>0) eta : float, optional Regularization term for the linear mapping L (>0) bias : bool,optional Estimate linear mapping with constant bias kerneltype : str,optional kernel used by calling function ot.utils.kernel (gaussian by default) sigma : float, optional Gaussian kernel bandwidth. numItermax : int, optional Max number of BCD iterations stopThr : float, optional Stop threshold on relative loss decrease (>0) numInnerItermax : int, optional Max number of iterations (inner CG solver) stopInnerThr : float, optional Stop threshold on error (inner CG solver) (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True Returns ------- gamma : (ns x nt) ndarray Optimal transportation matrix for the given parameters L : (ns x d) ndarray Nonlinear mapping matrix (ns+1 x d if bias) log : dict log dictionary return only if log==True in parameters References ---------- .. [8] M. Perrot, N. Courty, R. Flamary, A. Habrard, "Mapping estimation for discrete optimal transport", Neural Information Processing Systems (NIPS), 2016. See Also -------- ot.lp.emd : Unregularized OT ot.optim.cg : General regularized OT
[ "Joint", "OT", "and", "nonlinear", "mapping", "estimation", "with", "kernels", "as", "proposed", "in", "[", "8", "]" ]
python
train
openego/eDisGo
edisgo/tools/pypsa_io.py
https://github.com/openego/eDisGo/blob/e6245bdaf236f9c49dbda5a18c1c458290f41e2b/edisgo/tools/pypsa_io.py#L1825-L1858
def update_pypsa_generator_timeseries(network, generators_to_update=None, timesteps=None): """ Updates generator time series in pypsa representation. This function overwrites p_set and q_set of generators_t attribute of pypsa network. Be aware that if you call this function with `timesteps` and thus overwrite current time steps it may lead to inconsistencies in the pypsa network since only generator time series are updated but none of the other time series or the snapshots attribute of the pypsa network. Use the function :func:`update_pypsa_timeseries` to change the time steps you want to analyse in the power flow analysis. This function will also raise an error when a generator that is currently not in the pypsa representation is added. Parameters ---------- network : Network The eDisGo grid topology model overall container generators_to_update : :obj:`list`, optional List with all generators (of type :class:`~.grid.components.Generator`) that need to be updated. If None all generators are updated depending on mode. See :meth:`~.tools.pypsa_io.to_pypsa` for more information. timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or :pandas:`pandas.Timestamp<timestamp>` Timesteps specifies which time steps of the generator time series to export to pypsa representation. If None all time steps currently existing in pypsa representation are updated. If not None current time steps are overwritten by given time steps. Default: None. """ _update_pypsa_timeseries_by_type( network, type='generator', components_to_update=generators_to_update, timesteps=timesteps)
[ "def", "update_pypsa_generator_timeseries", "(", "network", ",", "generators_to_update", "=", "None", ",", "timesteps", "=", "None", ")", ":", "_update_pypsa_timeseries_by_type", "(", "network", ",", "type", "=", "'generator'", ",", "components_to_update", "=", "gener...
Updates generator time series in pypsa representation. This function overwrites p_set and q_set of generators_t attribute of pypsa network. Be aware that if you call this function with `timesteps` and thus overwrite current time steps it may lead to inconsistencies in the pypsa network since only generator time series are updated but none of the other time series or the snapshots attribute of the pypsa network. Use the function :func:`update_pypsa_timeseries` to change the time steps you want to analyse in the power flow analysis. This function will also raise an error when a generator that is currently not in the pypsa representation is added. Parameters ---------- network : Network The eDisGo grid topology model overall container generators_to_update : :obj:`list`, optional List with all generators (of type :class:`~.grid.components.Generator`) that need to be updated. If None all generators are updated depending on mode. See :meth:`~.tools.pypsa_io.to_pypsa` for more information. timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or :pandas:`pandas.Timestamp<timestamp>` Timesteps specifies which time steps of the generator time series to export to pypsa representation. If None all time steps currently existing in pypsa representation are updated. If not None current time steps are overwritten by given time steps. Default: None.
[ "Updates", "generator", "time", "series", "in", "pypsa", "representation", "." ]
python
train
Nic30/hwt
hwt/hdl/statements.py
https://github.com/Nic30/hwt/blob/8cbb399e326da3b22c233b98188a9d08dec057e6/hwt/hdl/statements.py#L595-L607
def _get_stm_with_branches(stm_it): """ :return: first statement with rank > 0 or None if iterator empty """ last = None while last is None or last.rank == 0: try: last = next(stm_it) except StopIteration: last = None break return last
[ "def", "_get_stm_with_branches", "(", "stm_it", ")", ":", "last", "=", "None", "while", "last", "is", "None", "or", "last", ".", "rank", "==", "0", ":", "try", ":", "last", "=", "next", "(", "stm_it", ")", "except", "StopIteration", ":", "last", "=", ...
:return: first statement with rank > 0 or None if iterator empty
[ ":", "return", ":", "first", "statement", "with", "rank", ">", "0", "or", "None", "if", "iterator", "empty" ]
python
test
eallik/spinoff
spinoff/actor/uri.py
https://github.com/eallik/spinoff/blob/06b00d6b86c7422c9cb8f9a4b2915906e92b7d52/spinoff/actor/uri.py#L69-L95
def parse(cls, addr): """Parses a new `Uri` instance from a string representation of a URI. >>> u1 = Uri.parse('/foo/bar') >>> u1.node, u1.steps, u1.path, u1.name (None, ['', 'foo', 'bar'], '/foo/bar', 'bar') >>> u2 = Uri.parse('somenode:123/foo/bar') >>> u2.node, u1.steps, u2.path, ur2.name ('somenode:123', ['', 'foo', 'bar'], '/foo/bar', 'bar') >>> u1 = Uri.parse('foo/bar') >>> u1.node, u1.steps, u1.path, u1.name (None, ['foo', 'bar'], 'foo/bar', 'bar') """ if addr.endswith('/'): raise ValueError("Uris must not end in '/'") # pragma: no cover parts = addr.split('/') if ':' in parts[0]: node, parts[0] = parts[0], '' else: node = None ret = None # Uri(name=None, parent=None, node=node) if node else None for step in parts: ret = Uri(name=step, parent=ret, node=node) node = None # only set the node on the root Uri return ret
[ "def", "parse", "(", "cls", ",", "addr", ")", ":", "if", "addr", ".", "endswith", "(", "'/'", ")", ":", "raise", "ValueError", "(", "\"Uris must not end in '/'\"", ")", "# pragma: no cover", "parts", "=", "addr", ".", "split", "(", "'/'", ")", "if", "':'...
Parses a new `Uri` instance from a string representation of a URI. >>> u1 = Uri.parse('/foo/bar') >>> u1.node, u1.steps, u1.path, u1.name (None, ['', 'foo', 'bar'], '/foo/bar', 'bar') >>> u2 = Uri.parse('somenode:123/foo/bar') >>> u2.node, u1.steps, u2.path, ur2.name ('somenode:123', ['', 'foo', 'bar'], '/foo/bar', 'bar') >>> u1 = Uri.parse('foo/bar') >>> u1.node, u1.steps, u1.path, u1.name (None, ['foo', 'bar'], 'foo/bar', 'bar')
[ "Parses", "a", "new", "Uri", "instance", "from", "a", "string", "representation", "of", "a", "URI", "." ]
python
train
mgaitan/waliki
waliki/management/commands/moin_migration_cleanup.py
https://github.com/mgaitan/waliki/blob/5baaf6f043275920a1174ff233726f7ff4bfb5cf/waliki/management/commands/moin_migration_cleanup.py#L21-L29
def clean_meta(rst_content): """remove moinmoin metada from the top of the file""" rst = rst_content.split('\n') for i, line in enumerate(rst): if line.startswith('#'): continue break return '\n'.join(rst[i:])
[ "def", "clean_meta", "(", "rst_content", ")", ":", "rst", "=", "rst_content", ".", "split", "(", "'\\n'", ")", "for", "i", ",", "line", "in", "enumerate", "(", "rst", ")", ":", "if", "line", ".", "startswith", "(", "'#'", ")", ":", "continue", "break...
remove moinmoin metada from the top of the file
[ "remove", "moinmoin", "metada", "from", "the", "top", "of", "the", "file" ]
python
train
saulpw/visidata
visidata/canvas.py
https://github.com/saulpw/visidata/blob/32771e0cea6c24fc7902683d14558391395c591f/visidata/canvas.py#L491-L494
def zoomTo(self, bbox): 'set visible area to bbox, maintaining aspectRatio if applicable' self.fixPoint(self.plotviewBox.xymin, bbox.xymin) self.zoomlevel=max(bbox.w/self.canvasBox.w, bbox.h/self.canvasBox.h)
[ "def", "zoomTo", "(", "self", ",", "bbox", ")", ":", "self", ".", "fixPoint", "(", "self", ".", "plotviewBox", ".", "xymin", ",", "bbox", ".", "xymin", ")", "self", ".", "zoomlevel", "=", "max", "(", "bbox", ".", "w", "/", "self", ".", "canvasBox",...
set visible area to bbox, maintaining aspectRatio if applicable
[ "set", "visible", "area", "to", "bbox", "maintaining", "aspectRatio", "if", "applicable" ]
python
train
inasafe/inasafe
safe/plugin.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/plugin.py#L236-L248
def _create_minimum_needs_action(self): """Create action for minimum needs dialog.""" icon = resources_path('img', 'icons', 'show-minimum-needs.svg') self.action_minimum_needs = QAction( QIcon(icon), self.tr('Minimum Needs Calculator'), self.iface.mainWindow()) self.action_minimum_needs.setStatusTip(self.tr( 'Open InaSAFE minimum needs calculator')) self.action_minimum_needs.setWhatsThis(self.tr( 'Open InaSAFE minimum needs calculator')) self.action_minimum_needs.triggered.connect(self.show_minimum_needs) self.add_action( self.action_minimum_needs, add_to_toolbar=self.full_toolbar)
[ "def", "_create_minimum_needs_action", "(", "self", ")", ":", "icon", "=", "resources_path", "(", "'img'", ",", "'icons'", ",", "'show-minimum-needs.svg'", ")", "self", ".", "action_minimum_needs", "=", "QAction", "(", "QIcon", "(", "icon", ")", ",", "self", "...
Create action for minimum needs dialog.
[ "Create", "action", "for", "minimum", "needs", "dialog", "." ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L12923-L12931
def extended_sys_state_send(self, vtol_state, landed_state, force_mavlink1=False): ''' Provides state for additional features vtol_state : The VTOL state if applicable. Is set to MAV_VTOL_STATE_UNDEFINED if UAV is not in VTOL configuration. (uint8_t) landed_state : The landed state. Is set to MAV_LANDED_STATE_UNDEFINED if landed state is unknown. (uint8_t) ''' return self.send(self.extended_sys_state_encode(vtol_state, landed_state), force_mavlink1=force_mavlink1)
[ "def", "extended_sys_state_send", "(", "self", ",", "vtol_state", ",", "landed_state", ",", "force_mavlink1", "=", "False", ")", ":", "return", "self", ".", "send", "(", "self", ".", "extended_sys_state_encode", "(", "vtol_state", ",", "landed_state", ")", ",", ...
Provides state for additional features vtol_state : The VTOL state if applicable. Is set to MAV_VTOL_STATE_UNDEFINED if UAV is not in VTOL configuration. (uint8_t) landed_state : The landed state. Is set to MAV_LANDED_STATE_UNDEFINED if landed state is unknown. (uint8_t)
[ "Provides", "state", "for", "additional", "features" ]
python
train
dereneaton/ipyrad
ipyrad/assemble/consens_se.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/consens_se.py#L807-L845
def calculate_depths(data, samples, lbview): """ check whether mindepth has changed, and thus whether clusters_hidepth needs to be recalculated, and get new maxlen for new highdepth clusts. if mindepth not changed then nothing changes. """ ## send jobs to be processed on engines start = time.time() printstr = " calculating depths | {} | s5 |" recaljobs = {} maxlens = [] for sample in samples: recaljobs[sample.name] = lbview.apply(recal_hidepth, *(data, sample)) ## block until finished while 1: ready = [i.ready() for i in recaljobs.values()] elapsed = datetime.timedelta(seconds=int(time.time()-start)) progressbar(len(ready), sum(ready), printstr.format(elapsed), spacer=data._spacer) time.sleep(0.1) if len(ready) == sum(ready): print("") break ## check for failures and collect results modsamples = [] for sample in samples: if not recaljobs[sample.name].successful(): LOGGER.error(" sample %s failed: %s", sample.name, recaljobs[sample.name].exception()) else: modsample, _, maxlen, _, _ = recaljobs[sample.name].result() modsamples.append(modsample) maxlens.append(maxlen) ## reset global maxlen if something changed data._hackersonly["max_fragment_length"] = int(max(maxlens)) + 4 return samples
[ "def", "calculate_depths", "(", "data", ",", "samples", ",", "lbview", ")", ":", "## send jobs to be processed on engines", "start", "=", "time", ".", "time", "(", ")", "printstr", "=", "\" calculating depths | {} | s5 |\"", "recaljobs", "=", "{", "}", "maxlens",...
check whether mindepth has changed, and thus whether clusters_hidepth needs to be recalculated, and get new maxlen for new highdepth clusts. if mindepth not changed then nothing changes.
[ "check", "whether", "mindepth", "has", "changed", "and", "thus", "whether", "clusters_hidepth", "needs", "to", "be", "recalculated", "and", "get", "new", "maxlen", "for", "new", "highdepth", "clusts", ".", "if", "mindepth", "not", "changed", "then", "nothing", ...
python
valid
KieranWynn/pyquaternion
pyquaternion/quaternion.py
https://github.com/KieranWynn/pyquaternion/blob/d2aad7f3fb0d4b9cc23aa72b390e9b2e1273eae9/pyquaternion/quaternion.py#L888-L918
def intermediates(cls, q0, q1, n, include_endpoints=False): """Generator method to get an iterable sequence of `n` evenly spaced quaternion rotations between any two existing quaternion endpoints lying on the unit radius hypersphere. This is a convenience function that is based on `Quaternion.slerp()` as defined above. This is a class method and is called as a method of the class itself rather than on a particular instance. Params: q_start: initial endpoint rotation as a Quaternion object q_end: final endpoint rotation as a Quaternion object n: number of intermediate quaternion objects to include within the interval include_endpoints: [optional] if set to `True`, the sequence of intermediates will be 'bookended' by `q_start` and `q_end`, resulting in a sequence length of `n + 2`. If set to `False`, endpoints are not included. Defaults to `False`. Yields: A generator object iterating over a sequence of intermediate quaternion objects. Note: This feature only makes sense when interpolating between unit quaternions (those lying on the unit radius hypersphere). Calling this method will implicitly normalise the endpoints to unit quaternions if they are not already unit length. """ step_size = 1.0 / (n + 1) if include_endpoints: steps = [i * step_size for i in range(0, n + 2)] else: steps = [i * step_size for i in range(1, n + 1)] for step in steps: yield cls.slerp(q0, q1, step)
[ "def", "intermediates", "(", "cls", ",", "q0", ",", "q1", ",", "n", ",", "include_endpoints", "=", "False", ")", ":", "step_size", "=", "1.0", "/", "(", "n", "+", "1", ")", "if", "include_endpoints", ":", "steps", "=", "[", "i", "*", "step_size", "...
Generator method to get an iterable sequence of `n` evenly spaced quaternion rotations between any two existing quaternion endpoints lying on the unit radius hypersphere. This is a convenience function that is based on `Quaternion.slerp()` as defined above. This is a class method and is called as a method of the class itself rather than on a particular instance. Params: q_start: initial endpoint rotation as a Quaternion object q_end: final endpoint rotation as a Quaternion object n: number of intermediate quaternion objects to include within the interval include_endpoints: [optional] if set to `True`, the sequence of intermediates will be 'bookended' by `q_start` and `q_end`, resulting in a sequence length of `n + 2`. If set to `False`, endpoints are not included. Defaults to `False`. Yields: A generator object iterating over a sequence of intermediate quaternion objects. Note: This feature only makes sense when interpolating between unit quaternions (those lying on the unit radius hypersphere). Calling this method will implicitly normalise the endpoints to unit quaternions if they are not already unit length.
[ "Generator", "method", "to", "get", "an", "iterable", "sequence", "of", "n", "evenly", "spaced", "quaternion", "rotations", "between", "any", "two", "existing", "quaternion", "endpoints", "lying", "on", "the", "unit", "radius", "hypersphere", "." ]
python
train
broadinstitute/fiss
firecloud/fiss.py
https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L171-L188
def space_set_acl(args): """ Assign an ACL role to list of users for a workspace """ acl_updates = [{"email": user, "accessLevel": args.role} for user in args.users] r = fapi.update_workspace_acl(args.project, args.workspace, acl_updates) fapi._check_response_code(r, 200) errors = r.json()['usersNotFound'] if len(errors): eprint("Unable to assign role for unrecognized users:") for user in errors: eprint("\t{0}".format(user['email'])) return 1 if fcconfig.verbosity: print("Successfully updated {0} role(s)".format(len(acl_updates))) return 0
[ "def", "space_set_acl", "(", "args", ")", ":", "acl_updates", "=", "[", "{", "\"email\"", ":", "user", ",", "\"accessLevel\"", ":", "args", ".", "role", "}", "for", "user", "in", "args", ".", "users", "]", "r", "=", "fapi", ".", "update_workspace_acl", ...
Assign an ACL role to list of users for a workspace
[ "Assign", "an", "ACL", "role", "to", "list", "of", "users", "for", "a", "workspace" ]
python
train
insightindustry/validator-collection
validator_collection/validators.py
https://github.com/insightindustry/validator-collection/blob/8c8047a0fa36cc88a021771279898278c4cc98e3/validator_collection/validators.py#L1082-L1219
def timezone(value, allow_empty = False, positive = True, **kwargs): """Validate that ``value`` is a valid :class:`tzinfo <python:datetime.tzinfo>`. .. caution:: This does **not** verify whether the value is a timezone that actually exists, nor can it resolve timezone names (e.g. ``'Eastern'`` or ``'CET'``). For that kind of functionality, we recommend you utilize: `pytz <https://pypi.python.org/pypi/pytz>`_ :param value: The value to validate. :type value: :class:`str <python:str>` / :class:`tzinfo <python:datetime.tzinfo>` / numeric / :obj:`None <python:None>` :param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value`` is empty. If ``False``, raises a :class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if ``value`` is empty. Defaults to ``False``. :type allow_empty: :class:`bool <python:bool>` :param positive: Indicates whether the ``value`` is positive or negative (only has meaning if ``value`` is a string). Defaults to ``True``. :type positive: :class:`bool <python:bool>` :returns: ``value`` / :obj:`None <python:None>` :rtype: :class:`tzinfo <python:datetime.tzinfo>` / :obj:`None <python:None>` :raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False`` :raises CannotCoerceError: if ``value`` cannot be coerced to :class:`tzinfo <python:datetime.tzinfo>` and is not :obj:`None <python:None>` :raises PositiveOffsetMismatchError: if ``positive`` is ``True``, but the offset indicated by ``value`` is actually negative :raises NegativeOffsetMismatchError: if ``positive`` is ``False``, but the offset indicated by ``value`` is actually positive """ # pylint: disable=too-many-branches original_value = value if not value and not allow_empty: raise errors.EmptyValueError('value (%s) was empty' % value) elif not value: return None if not isinstance(value, tzinfo_types): raise errors.CannotCoerceError( 'value (%s) must be a tzinfo, ' 'UTC offset in seconds expressed as a number, ' 'UTC offset expressed as string of form +HH:MM, ' 'but was %s' % (value, type(value)) ) elif isinstance(value, datetime_.datetime): value = value.tzinfo elif isinstance(value, datetime_.date): return None elif isinstance(value, datetime_.time): return value.tzinfo elif isinstance(value, timestamp_types): return None elif isinstance(value, str): if '+' not in value and '-' not in value: try: datetime_value = datetime(value, force_run = True) # pylint: disable=E1123 return datetime_value.tzinfo except TypeError: raise errors.CannotCoerceError( 'value (%s) must be a tzinfo, ' 'UTC offset in seconds expressed as a number, ' 'UTC offset expressed as string of form +HH:MM, ' 'but was %s' % (value, type(value)) ) elif '-' in value: try: datetime_value = datetime(value, force_run = True) # pylint: disable=E1123 return datetime_value.tzinfo except TypeError: pass if '+' in value and not positive: raise errors.NegativeOffsetMismatchError( 'expected a negative UTC offset but value is positive' ) elif '-' in value and positive and len(value) == 6: positive = False elif '-' in value and positive: raise errors.PositiveOffsetMismatchError( 'expected a positive UTC offset but value is negative' ) if '+' in value: value = value[value.find('+'):] elif '-' in value: value = value[value.rfind('-'):] value = value[1:] offset_components = value.split(':') if len(offset_components) != 2: raise errors.CannotCoerceError( 'value (%s) must be a tzinfo, ' 'UTC offset in seconds expressed as a number, ' 'UTC offset expressed as string of form +HH:MM, ' 'but was %s' % (value, type(value)) ) hour = int(offset_components[0]) minutes = int(offset_components[1]) value = (hour * 60 * 60) + (minutes * 60) if not positive: value = 0 - value if isinstance(value, numeric_types): if value > 0: positive = True elif value < 0: positive = False elif value == 0: return None offset = datetime_.timedelta(seconds = value) if is_py2: value = TimeZone(offset = offset) elif is_py3: try: value = TimeZone(offset) except ValueError: raise errors.UTCOffsetError( 'value (%s) cannot exceed +/- 24h' % original_value ) else: raise NotImplementedError() return value
[ "def", "timezone", "(", "value", ",", "allow_empty", "=", "False", ",", "positive", "=", "True", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=too-many-branches", "original_value", "=", "value", "if", "not", "value", "and", "not", "allow_empty", ":", ...
Validate that ``value`` is a valid :class:`tzinfo <python:datetime.tzinfo>`. .. caution:: This does **not** verify whether the value is a timezone that actually exists, nor can it resolve timezone names (e.g. ``'Eastern'`` or ``'CET'``). For that kind of functionality, we recommend you utilize: `pytz <https://pypi.python.org/pypi/pytz>`_ :param value: The value to validate. :type value: :class:`str <python:str>` / :class:`tzinfo <python:datetime.tzinfo>` / numeric / :obj:`None <python:None>` :param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value`` is empty. If ``False``, raises a :class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if ``value`` is empty. Defaults to ``False``. :type allow_empty: :class:`bool <python:bool>` :param positive: Indicates whether the ``value`` is positive or negative (only has meaning if ``value`` is a string). Defaults to ``True``. :type positive: :class:`bool <python:bool>` :returns: ``value`` / :obj:`None <python:None>` :rtype: :class:`tzinfo <python:datetime.tzinfo>` / :obj:`None <python:None>` :raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False`` :raises CannotCoerceError: if ``value`` cannot be coerced to :class:`tzinfo <python:datetime.tzinfo>` and is not :obj:`None <python:None>` :raises PositiveOffsetMismatchError: if ``positive`` is ``True``, but the offset indicated by ``value`` is actually negative :raises NegativeOffsetMismatchError: if ``positive`` is ``False``, but the offset indicated by ``value`` is actually positive
[ "Validate", "that", "value", "is", "a", "valid", ":", "class", ":", "tzinfo", "<python", ":", "datetime", ".", "tzinfo", ">", "." ]
python
train
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L10299-L10319
def recsph(rectan): """ Convert from rectangular coordinates to spherical coordinates. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/recrad_c.html :param rectan: Rectangular coordinates of a point. :type rectan: 3-Element Array of floats :return: Distance from the origin, Angle from the positive Z-axis, Longitude in radians. :rtype: tuple """ rectan = stypes.toDoubleVector(rectan) r = ctypes.c_double() colat = ctypes.c_double() lon = ctypes.c_double() libspice.recsph_c(rectan, ctypes.byref(r), ctypes.byref(colat), ctypes.byref(lon)) return r.value, colat.value, lon.value
[ "def", "recsph", "(", "rectan", ")", ":", "rectan", "=", "stypes", ".", "toDoubleVector", "(", "rectan", ")", "r", "=", "ctypes", ".", "c_double", "(", ")", "colat", "=", "ctypes", ".", "c_double", "(", ")", "lon", "=", "ctypes", ".", "c_double", "("...
Convert from rectangular coordinates to spherical coordinates. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/recrad_c.html :param rectan: Rectangular coordinates of a point. :type rectan: 3-Element Array of floats :return: Distance from the origin, Angle from the positive Z-axis, Longitude in radians. :rtype: tuple
[ "Convert", "from", "rectangular", "coordinates", "to", "spherical", "coordinates", "." ]
python
train
btcspry/3d-wallet-generator
gen_3dwallet/qr_tools.py
https://github.com/btcspry/3d-wallet-generator/blob/ae54800ec072f9d8259e5fe96a448d36b58b7b59/gen_3dwallet/qr_tools.py#L5-L33
def getQRArray(text, errorCorrection): """ Takes in text and errorCorrection (letter), returns 2D array of the QR code""" # White is True (1) # Black is False (0) # ECC: L7, M15, Q25, H30 # Create the object qr = pyqrcode.create(text, error=errorCorrection) # Get the terminal representation and split by lines (get rid of top and bottom white spaces) plainOut = qr.terminal().split("\n")[5:-5] # Initialize the output 2D list out = [] for line in plainOut: thisOut = [] for char in line: if char == u'7': # This is white thisOut.append(1) elif char == u'4': # This is black, it's part of the u'49' thisOut.append(0) # Finally add everything to the output, stipping whitespaces at start and end out.append(thisOut[4:-4]) # Everything is done, return the qr code list return out
[ "def", "getQRArray", "(", "text", ",", "errorCorrection", ")", ":", "# White is True (1)", "# Black is False (0)", "# ECC: L7, M15, Q25, H30", "# Create the object", "qr", "=", "pyqrcode", ".", "create", "(", "text", ",", "error", "=", "errorCorrection", ")", "# Get t...
Takes in text and errorCorrection (letter), returns 2D array of the QR code
[ "Takes", "in", "text", "and", "errorCorrection", "(", "letter", ")", "returns", "2D", "array", "of", "the", "QR", "code" ]
python
train
cloudtools/stacker
stacker/util.py
https://github.com/cloudtools/stacker/blob/ad6013a03a560c46ba3c63c4d153336273e6da5d/stacker/util.py#L130-L148
def get_soa_record(client, zone_id, zone_name): """Gets the SOA record for zone_name from zone_id. Args: client (:class:`botocore.client.Route53`): The connection used to interact with Route53's API. zone_id (string): The AWS Route53 zone id of the hosted zone to query. zone_name (string): The name of the DNS hosted zone to create. Returns: :class:`stacker.util.SOARecord`: An object representing the parsed SOA record returned from AWS Route53. """ response = client.list_resource_record_sets(HostedZoneId=zone_id, StartRecordName=zone_name, StartRecordType="SOA", MaxItems="1") return SOARecord(response["ResourceRecordSets"][0])
[ "def", "get_soa_record", "(", "client", ",", "zone_id", ",", "zone_name", ")", ":", "response", "=", "client", ".", "list_resource_record_sets", "(", "HostedZoneId", "=", "zone_id", ",", "StartRecordName", "=", "zone_name", ",", "StartRecordType", "=", "\"SOA\"", ...
Gets the SOA record for zone_name from zone_id. Args: client (:class:`botocore.client.Route53`): The connection used to interact with Route53's API. zone_id (string): The AWS Route53 zone id of the hosted zone to query. zone_name (string): The name of the DNS hosted zone to create. Returns: :class:`stacker.util.SOARecord`: An object representing the parsed SOA record returned from AWS Route53.
[ "Gets", "the", "SOA", "record", "for", "zone_name", "from", "zone_id", "." ]
python
train
caffeinehit/django-oauth2-provider
provider/views.py
https://github.com/caffeinehit/django-oauth2-provider/blob/6b5bc0d3ad706d2aaa47fa476f38406cddd01236/provider/views.py#L202-L220
def _validate_client(self, request, data): """ :return: ``tuple`` - ``(client or False, data or error)`` """ client = self.get_client(data.get('client_id')) if client is None: raise OAuthError({ 'error': 'unauthorized_client', 'error_description': _("An unauthorized client tried to access" " your resources.") }) form = self.get_request_form(client, data) if not form.is_valid(): raise OAuthError(form.errors) return client, form.cleaned_data
[ "def", "_validate_client", "(", "self", ",", "request", ",", "data", ")", ":", "client", "=", "self", ".", "get_client", "(", "data", ".", "get", "(", "'client_id'", ")", ")", "if", "client", "is", "None", ":", "raise", "OAuthError", "(", "{", "'error'...
:return: ``tuple`` - ``(client or False, data or error)``
[ ":", "return", ":", "tuple", "-", "(", "client", "or", "False", "data", "or", "error", ")" ]
python
train
waqasbhatti/astrobase
astrobase/timeutils.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/timeutils.py#L442-L469
def jd_to_datetime(jd, returniso=False): '''This converts a UTC JD to a Python `datetime` object or ISO date string. Parameters ---------- jd : float The Julian date measured at UTC. returniso : bool If False, returns a naive Python `datetime` object corresponding to `jd`. If True, returns the ISO format string corresponding to the date and time at UTC from `jd`. Returns ------- datetime or str Depending on the value of `returniso`. ''' tt = astime.Time(jd, format='jd', scale='utc') if returniso: return tt.iso else: return tt.datetime
[ "def", "jd_to_datetime", "(", "jd", ",", "returniso", "=", "False", ")", ":", "tt", "=", "astime", ".", "Time", "(", "jd", ",", "format", "=", "'jd'", ",", "scale", "=", "'utc'", ")", "if", "returniso", ":", "return", "tt", ".", "iso", "else", ":",...
This converts a UTC JD to a Python `datetime` object or ISO date string. Parameters ---------- jd : float The Julian date measured at UTC. returniso : bool If False, returns a naive Python `datetime` object corresponding to `jd`. If True, returns the ISO format string corresponding to the date and time at UTC from `jd`. Returns ------- datetime or str Depending on the value of `returniso`.
[ "This", "converts", "a", "UTC", "JD", "to", "a", "Python", "datetime", "object", "or", "ISO", "date", "string", "." ]
python
valid
mfcloud/python-zvm-sdk
smtLayer/makeVM.py
https://github.com/mfcloud/python-zvm-sdk/blob/de9994ceca764f5460ce51bd74237986341d8e3c/smtLayer/makeVM.py#L309-L370
def showOperandLines(rh): """ Produce help output related to operands. Input: Request Handle """ if rh.function == 'HELP': rh.printLn("N", " For the MakeVM function:") else: rh.printLn("N", "Sub-Functions(s):") rh.printLn("N", " directory - " + "Create a virtual machine in the z/VM user directory.") rh.printLn("N", " help - Displays this help information.") rh.printLn("N", " version - " + "show the version of the makeVM function") if rh.subfunction != '': rh.printLn("N", "Operand(s):") rh.printLn("N", " --cpus <cpuCnt> - " + "Specifies the desired number of virtual CPUs the") rh.printLn("N", " " + "guest will have.") rh.printLn("N", " --maxcpu <maxCpuCnt> - " + "Specifies the maximum number of virtual CPUs the") rh.printLn("N", " " + "guest is allowed to define.") rh.printLn("N", " --ipl <ipl> - " + "Specifies an IPL disk or NSS for the virtual") rh.printLn("N", " " + "machine's directory entry.") rh.printLn("N", " --logonby <byUsers> - " + "Specifies a list of up to 8 z/VM userids who can log") rh.printLn("N", " " + "on to the virtual machine using their id and password.") rh.printLn("N", " --maxMemSize <maxMem> - " + "Specifies the maximum memory the virtual machine") rh.printLn("N", " " + "is allowed to define.") rh.printLn("N", " --setReservedMem - " + "Set the additional memory space (maxMemSize - priMemSize)") rh.printLn("N", " " + "as reserved memory of the virtual machine.") rh.printLn("N", " <password> - " + "Specifies the password for the new virtual") rh.printLn("N", " " + "machine.") rh.printLn("N", " <priMemSize> - " + "Specifies the initial memory size for the new virtual") rh.printLn("N", " " + "machine.") rh.printLn("N", " <privClasses> - " + "Specifies the privilege classes for the new virtual") rh.printLn("N", " " + "machine.") rh.printLn("N", " --profile <profName> - " + "Specifies the z/VM PROFILE to include in the") rh.printLn("N", " " + "virtual machine's directory entry.") rh.printLn("N", " <userid> - " + "Userid of the virtual machine to create.") return
[ "def", "showOperandLines", "(", "rh", ")", ":", "if", "rh", ".", "function", "==", "'HELP'", ":", "rh", ".", "printLn", "(", "\"N\"", ",", "\" For the MakeVM function:\"", ")", "else", ":", "rh", ".", "printLn", "(", "\"N\"", ",", "\"Sub-Functions(s):\"", ...
Produce help output related to operands. Input: Request Handle
[ "Produce", "help", "output", "related", "to", "operands", "." ]
python
train
persandstrom/python-verisure
verisure/session.py
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L605-L621
def set_heat_pump_feature(self, device_label, feature): """ Set heatpump mode Args: feature: 'QUIET', 'ECONAVI', or 'POWERFUL' """ response = None try: response = requests.put( urls.set_heatpump_feature(self._giid, device_label, feature), headers={ 'Accept': 'application/json', 'Content-Type': 'application/json', 'Cookie': 'vid={}'.format(self._vid)}) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
[ "def", "set_heat_pump_feature", "(", "self", ",", "device_label", ",", "feature", ")", ":", "response", "=", "None", "try", ":", "response", "=", "requests", ".", "put", "(", "urls", ".", "set_heatpump_feature", "(", "self", ".", "_giid", ",", "device_label"...
Set heatpump mode Args: feature: 'QUIET', 'ECONAVI', or 'POWERFUL'
[ "Set", "heatpump", "mode", "Args", ":", "feature", ":", "QUIET", "ECONAVI", "or", "POWERFUL" ]
python
train
basho/riak-python-client
riak/transports/pool.py
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/transports/pool.py#L177-L207
def transaction(self, _filter=None, default=None, yield_resource=False): """ transaction(_filter=None, default=None) Claims a resource from the pool for use in a thread-safe, reentrant manner (as part of a with statement). Resources are created as needed when all members of the pool are claimed or the pool is empty. :param _filter: a filter that can be used to select a member of the pool :type _filter: callable :param default: a value that will be used instead of calling :meth:`create_resource` if a new resource needs to be created :param yield_resource: set to True to yield the Resource object itself :type yield_resource: boolean """ resource = self.acquire(_filter=_filter, default=default) try: if yield_resource: yield resource else: yield resource.object if resource.errored: self.delete_resource(resource) except BadResource: self.delete_resource(resource) raise finally: self.release(resource)
[ "def", "transaction", "(", "self", ",", "_filter", "=", "None", ",", "default", "=", "None", ",", "yield_resource", "=", "False", ")", ":", "resource", "=", "self", ".", "acquire", "(", "_filter", "=", "_filter", ",", "default", "=", "default", ")", "t...
transaction(_filter=None, default=None) Claims a resource from the pool for use in a thread-safe, reentrant manner (as part of a with statement). Resources are created as needed when all members of the pool are claimed or the pool is empty. :param _filter: a filter that can be used to select a member of the pool :type _filter: callable :param default: a value that will be used instead of calling :meth:`create_resource` if a new resource needs to be created :param yield_resource: set to True to yield the Resource object itself :type yield_resource: boolean
[ "transaction", "(", "_filter", "=", "None", "default", "=", "None", ")" ]
python
train
dictatorlib/dictator
dictator/__init__.py
https://github.com/dictatorlib/dictator/blob/b77b1709b6fff174f13b0f0c5dbe740b4c07d712/dictator/__init__.py#L208-L235
def get(self, key, default=None): """Return the value at key ``key``, or default value ``default`` which is None by default. >>> dc = Dictator() >>> dc['l0'] = [1, 2, 3, 4] >>> dc.get('l0') ['1', '2', '3', '4'] >>> dc['l0'] ['1', '2', '3', '4'] >>> dc.clear() :param key: key of value to return :type key: str :param default: value of any type to return of key doesn't exist. :type default: Any :return: value of given key :rtype: Any """ try: value = self.__getitem__(key) except KeyError: value = None # Py3 Redis compatibiility if isinstance(value, bytes): value = value.decode() return value or default
[ "def", "get", "(", "self", ",", "key", ",", "default", "=", "None", ")", ":", "try", ":", "value", "=", "self", ".", "__getitem__", "(", "key", ")", "except", "KeyError", ":", "value", "=", "None", "# Py3 Redis compatibiility", "if", "isinstance", "(", ...
Return the value at key ``key``, or default value ``default`` which is None by default. >>> dc = Dictator() >>> dc['l0'] = [1, 2, 3, 4] >>> dc.get('l0') ['1', '2', '3', '4'] >>> dc['l0'] ['1', '2', '3', '4'] >>> dc.clear() :param key: key of value to return :type key: str :param default: value of any type to return of key doesn't exist. :type default: Any :return: value of given key :rtype: Any
[ "Return", "the", "value", "at", "key", "key", "or", "default", "value", "default", "which", "is", "None", "by", "default", "." ]
python
train
timothycrosley/concentration
concentration/run.py
https://github.com/timothycrosley/concentration/blob/5d07a79cdf56054c42b6e2d1c95ea51bc6678fc4/concentration/run.py#L26-L38
def improve(): """Disables access to websites that are defined as 'distractors'""" with open(settings.HOSTS_FILE, "r+") as hosts_file: contents = hosts_file.read() if not settings.START_TOKEN in contents and not settings.END_TOKEN in contents: hosts_file.write(settings.START_TOKEN + "\n") for site in set(settings.DISTRACTORS): hosts_file.write("{0}\t{1}\n".format(settings.REDIRECT_TO, site)) for sub_domain in settings.SUB_DOMAINS: hosts_file.write("{0}\t{1}.{2}\n".format(settings.REDIRECT_TO, sub_domain, site)) hosts_file.write(settings.END_TOKEN + "\n") reset_network("Concentration is now improved :D!")
[ "def", "improve", "(", ")", ":", "with", "open", "(", "settings", ".", "HOSTS_FILE", ",", "\"r+\"", ")", "as", "hosts_file", ":", "contents", "=", "hosts_file", ".", "read", "(", ")", "if", "not", "settings", ".", "START_TOKEN", "in", "contents", "and", ...
Disables access to websites that are defined as 'distractors
[ "Disables", "access", "to", "websites", "that", "are", "defined", "as", "distractors" ]
python
train
odlgroup/odl
doc/source/guide/code/functional_indepth_example.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/doc/source/guide/code/functional_indepth_example.py#L34-L56
def gradient(self): """The gradient operator.""" # First we store the functional in a variable functional = self # The class corresponding to the gradient operator. class MyGradientOperator(odl.Operator): """Class implementing the gradient operator.""" def __init__(self): """Initialize a new instance.""" super(MyGradientOperator, self).__init__( domain=functional.domain, range=functional.domain) def _call(self, x): """Evaluate the gradient.""" # Here we can access the store functional from a few lines # above return 2.0 * x + functional.y return MyGradientOperator()
[ "def", "gradient", "(", "self", ")", ":", "# First we store the functional in a variable", "functional", "=", "self", "# The class corresponding to the gradient operator.", "class", "MyGradientOperator", "(", "odl", ".", "Operator", ")", ":", "\"\"\"Class implementing the gradi...
The gradient operator.
[ "The", "gradient", "operator", "." ]
python
train
Accelize/pycosio
pycosio/storage/s3.py
https://github.com/Accelize/pycosio/blob/1cc1f8fdf5394d92918b7bae2bfa682169ccc48c/pycosio/storage/s3.py#L113-L127
def _get_client(self): """ S3 Boto3 client Returns: boto3.session.Session.client: client """ client_kwargs = self._storage_parameters.get('client', dict()) # Handles unsecure mode if self._unsecure: client_kwargs = client_kwargs.copy() client_kwargs['use_ssl'] = False return self._get_session().client("s3", **client_kwargs)
[ "def", "_get_client", "(", "self", ")", ":", "client_kwargs", "=", "self", ".", "_storage_parameters", ".", "get", "(", "'client'", ",", "dict", "(", ")", ")", "# Handles unsecure mode", "if", "self", ".", "_unsecure", ":", "client_kwargs", "=", "client_kwargs...
S3 Boto3 client Returns: boto3.session.Session.client: client
[ "S3", "Boto3", "client" ]
python
train
attm2x/m2x-python
m2x/v2/streams.py
https://github.com/attm2x/m2x-python/blob/df83f590114692b1f96577148b7ba260065905bb/m2x/v2/streams.py#L76-L90
def add_value(self, value, timestamp=None): """ Method for `Update Data Stream Value <https://m2x.att.com/developer/documentation/v2/device#Update-Data-Stream-Value>`_ endpoint. :param value: The updated stream value :param timestamp: The (optional) timestamp for the upadted value :return: The API response, see M2X API docs for details :rtype: dict :raises: :class:`~requests.exceptions.HTTPError` if an error occurs when sending the HTTP request """ data = {'value': value} if timestamp: data['timestamp'] = timestamp return self.api.put(self.subpath('/value'), data=data)
[ "def", "add_value", "(", "self", ",", "value", ",", "timestamp", "=", "None", ")", ":", "data", "=", "{", "'value'", ":", "value", "}", "if", "timestamp", ":", "data", "[", "'timestamp'", "]", "=", "timestamp", "return", "self", ".", "api", ".", "put...
Method for `Update Data Stream Value <https://m2x.att.com/developer/documentation/v2/device#Update-Data-Stream-Value>`_ endpoint. :param value: The updated stream value :param timestamp: The (optional) timestamp for the upadted value :return: The API response, see M2X API docs for details :rtype: dict :raises: :class:`~requests.exceptions.HTTPError` if an error occurs when sending the HTTP request
[ "Method", "for", "Update", "Data", "Stream", "Value", "<https", ":", "//", "m2x", ".", "att", ".", "com", "/", "developer", "/", "documentation", "/", "v2", "/", "device#Update", "-", "Data", "-", "Stream", "-", "Value", ">", "_", "endpoint", "." ]
python
test
basilfx/flask-daapserver
daapserver/provider.py
https://github.com/basilfx/flask-daapserver/blob/ca595fcbc5b657cba826eccd3be5cebba0a1db0e/daapserver/provider.py#L356-L382
def get_item_data(self, session, item, byte_range=None): """ Return a file pointer to the item file. Assumes `item.file_name` points to the file on disk. """ # Parse byte range if byte_range is not None: begin, end = parse_byte_range(byte_range, max_byte=item.file_size) else: begin, end = 0, item.file_size # Open the file fp = open(item.file_name, "rb+") if not begin: return fp, item.file_type, item.file_size elif begin and not end: fp.seek(begin) return fp, item.file_type, item.file_size elif begin and end: fp.seek(begin) data = fp.read(end - begin) result = cStringIO.StringIO(data) return result, item.file_type, item.file_size
[ "def", "get_item_data", "(", "self", ",", "session", ",", "item", ",", "byte_range", "=", "None", ")", ":", "# Parse byte range", "if", "byte_range", "is", "not", "None", ":", "begin", ",", "end", "=", "parse_byte_range", "(", "byte_range", ",", "max_byte", ...
Return a file pointer to the item file. Assumes `item.file_name` points to the file on disk.
[ "Return", "a", "file", "pointer", "to", "the", "item", "file", ".", "Assumes", "item", ".", "file_name", "points", "to", "the", "file", "on", "disk", "." ]
python
train
GoogleCloudPlatform/python-repo-tools
gcp_devrel/tools/requirements.py
https://github.com/GoogleCloudPlatform/python-repo-tools/blob/87422ba91814529848a2b8bf8be4294283a3e041/gcp_devrel/tools/requirements.py#L176-L188
def update_command(args): """Updates all dependencies the specified requirements file.""" updated = update_requirements_file( args.requirements_file, args.skip_packages) if updated: print('Updated requirements in {}:'.format(args.requirements_file)) for item in updated: print(' * {} from {} to {}.'.format(*item)) else: print('All dependencies in {} are up-to-date.'.format( args.requirements_file))
[ "def", "update_command", "(", "args", ")", ":", "updated", "=", "update_requirements_file", "(", "args", ".", "requirements_file", ",", "args", ".", "skip_packages", ")", "if", "updated", ":", "print", "(", "'Updated requirements in {}:'", ".", "format", "(", "a...
Updates all dependencies the specified requirements file.
[ "Updates", "all", "dependencies", "the", "specified", "requirements", "file", "." ]
python
train
zkbt/the-friendly-stars
thefriendlystars/constellations/constellation.py
https://github.com/zkbt/the-friendly-stars/blob/50d3f979e79e63c66629065c75595696dc79802e/thefriendlystars/constellations/constellation.py#L183-L223
def from_text(cls, filename, **kwargs): ''' Create a constellation by reading a catalog in from a text file, as long as it's formated as in to_text() with identifiers, coordinates, magnitudes. Parameters ---------- filename : str The filename to read in. **kwargs are passed to astropy.io.ascii.read() ''' # FIXME -- add something here to parse id, mag, errors from the table? # load the table t = ascii.read(filename, **kwargs) ''' # which columns is the coordinates? i_coordinates = t.colnames.index('ra') # everything before the coordinates is an identifier identifiers = Table(t.columns[:i_coordinates]) # the complete coordinates are stored in one c = t.columns[i_coordinates:i_coordinates+6] coordinates = coord.SkyCoord(**c) coordinates.obstime=Time(cls.epoch, format='decimalyear') # everything after coordinates is magnitudes magnitudes = Table(t.columns[i_coordinates+1:]) newtable = hstack([Table(identifiers), Table({'coordinates':coordinates}), Table(magnitudes)]) ''' this = cls(t) this.speak('loaded constellation from {}'.format(filename)) return this
[ "def", "from_text", "(", "cls", ",", "filename", ",", "*", "*", "kwargs", ")", ":", "# FIXME -- add something here to parse id, mag, errors from the table?", "# load the table", "t", "=", "ascii", ".", "read", "(", "filename", ",", "*", "*", "kwargs", ")", "'''\n ...
Create a constellation by reading a catalog in from a text file, as long as it's formated as in to_text() with identifiers, coordinates, magnitudes. Parameters ---------- filename : str The filename to read in. **kwargs are passed to astropy.io.ascii.read()
[ "Create", "a", "constellation", "by", "reading", "a", "catalog", "in", "from", "a", "text", "file", "as", "long", "as", "it", "s", "formated", "as", "in", "to_text", "()", "with", "identifiers", "coordinates", "magnitudes", "." ]
python
train
senaite/senaite.core
bika/lims/catalog/catalog_utilities.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/catalog/catalog_utilities.py#L202-L247
def _map_content_types(archetype_tool, catalogs_definition): """ Updates the mapping for content_types against catalogs :archetype_tool: an archetype_tool object :catalogs_definition: a dictionary like { CATALOG_ID: { 'types': ['ContentType', ...], 'indexes': { 'UID': 'FieldIndex', ... }, 'columns': [ 'Title', ... ] } } """ # This will be a dictionari like {'content_type':['catalog_id', ...]} ct_map = {} # This list will contain the atalog ids to be rebuild to_reindex = [] # getting the dictionary of mapped content_types in the catalog map_types = archetype_tool.catalog_map for catalog_id in catalogs_definition.keys(): catalog_info = catalogs_definition.get(catalog_id, {}) # Mapping the catalog with the defined types types = catalog_info.get('types', []) for t in types: tmp_l = ct_map.get(t, []) tmp_l.append(catalog_id) ct_map[t] = tmp_l # Mapping for t in ct_map.keys(): catalogs_list = ct_map[t] # Getting the previus mapping perv_catalogs_list = archetype_tool.catalog_map.get(t, []) # If the mapping has changed, update it set1 = set(catalogs_list) set2 = set(perv_catalogs_list) if set1 != set2: archetype_tool.setCatalogsByType(t, catalogs_list) # Adding to reindex only the catalogs that have changed to_reindex = to_reindex + list(set1 - set2) + list(set2 - set1) return to_reindex
[ "def", "_map_content_types", "(", "archetype_tool", ",", "catalogs_definition", ")", ":", "# This will be a dictionari like {'content_type':['catalog_id', ...]}", "ct_map", "=", "{", "}", "# This list will contain the atalog ids to be rebuild", "to_reindex", "=", "[", "]", "# get...
Updates the mapping for content_types against catalogs :archetype_tool: an archetype_tool object :catalogs_definition: a dictionary like { CATALOG_ID: { 'types': ['ContentType', ...], 'indexes': { 'UID': 'FieldIndex', ... }, 'columns': [ 'Title', ... ] } }
[ "Updates", "the", "mapping", "for", "content_types", "against", "catalogs", ":", "archetype_tool", ":", "an", "archetype_tool", "object", ":", "catalogs_definition", ":", "a", "dictionary", "like", "{", "CATALOG_ID", ":", "{", "types", ":", "[", "ContentType", "...
python
train
eaton-lab/toytree
toytree/utils.py
https://github.com/eaton-lab/toytree/blob/0347ed2098acc5f707fadf52a0ecd411a6d1859c/toytree/utils.py#L30-L41
def node_scale_root_height(self, treeheight=1): """ Returns a toytree copy with all nodes scaled so that the root height equals the value entered for treeheight. """ # make tree height = 1 * treeheight ctree = self._ttree.copy() _height = ctree.treenode.height for node in ctree.treenode.traverse(): node.dist = (node.dist / _height) * treeheight ctree._coords.update() return ctree
[ "def", "node_scale_root_height", "(", "self", ",", "treeheight", "=", "1", ")", ":", "# make tree height = 1 * treeheight", "ctree", "=", "self", ".", "_ttree", ".", "copy", "(", ")", "_height", "=", "ctree", ".", "treenode", ".", "height", "for", "node", "i...
Returns a toytree copy with all nodes scaled so that the root height equals the value entered for treeheight.
[ "Returns", "a", "toytree", "copy", "with", "all", "nodes", "scaled", "so", "that", "the", "root", "height", "equals", "the", "value", "entered", "for", "treeheight", "." ]
python
train
pypa/pipenv
pipenv/patched/notpip/_vendor/pkg_resources/__init__.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_vendor/pkg_resources/__init__.py#L277-L285
def with_context(self, required_by): """ If required_by is non-empty, return a version of self that is a ContextualVersionConflict. """ if not required_by: return self args = self.args + (required_by,) return ContextualVersionConflict(*args)
[ "def", "with_context", "(", "self", ",", "required_by", ")", ":", "if", "not", "required_by", ":", "return", "self", "args", "=", "self", ".", "args", "+", "(", "required_by", ",", ")", "return", "ContextualVersionConflict", "(", "*", "args", ")" ]
If required_by is non-empty, return a version of self that is a ContextualVersionConflict.
[ "If", "required_by", "is", "non", "-", "empty", "return", "a", "version", "of", "self", "that", "is", "a", "ContextualVersionConflict", "." ]
python
train
santoshphilip/eppy
eppy/modeleditor.py
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L616-L634
def initread(self, idfname): """ Use the current IDD and read an IDF from file. If the IDD has not yet been initialised then this is done first. Parameters ---------- idf_name : str Path to an IDF file. """ with open(idfname, 'r') as _: # raise nonexistent file error early if idfname doesn't exist pass iddfhandle = StringIO(iddcurrent.iddtxt) if self.getiddname() == None: self.setiddname(iddfhandle) self.idfname = idfname self.read()
[ "def", "initread", "(", "self", ",", "idfname", ")", ":", "with", "open", "(", "idfname", ",", "'r'", ")", "as", "_", ":", "# raise nonexistent file error early if idfname doesn't exist", "pass", "iddfhandle", "=", "StringIO", "(", "iddcurrent", ".", "iddtxt", "...
Use the current IDD and read an IDF from file. If the IDD has not yet been initialised then this is done first. Parameters ---------- idf_name : str Path to an IDF file.
[ "Use", "the", "current", "IDD", "and", "read", "an", "IDF", "from", "file", ".", "If", "the", "IDD", "has", "not", "yet", "been", "initialised", "then", "this", "is", "done", "first", "." ]
python
train
vmagamedov/grpclib
grpclib/server.py
https://github.com/vmagamedov/grpclib/blob/e4a0af8d2802297586cf8d67d2d3e65f31c09dae/grpclib/server.py#L542-L551
async def wait_closed(self): """Coroutine to wait until all existing request handlers will exit properly. """ if self._server is None: raise RuntimeError('Server is not started') await self._server.wait_closed() if self._handlers: await asyncio.wait({h.wait_closed() for h in self._handlers}, loop=self._loop)
[ "async", "def", "wait_closed", "(", "self", ")", ":", "if", "self", ".", "_server", "is", "None", ":", "raise", "RuntimeError", "(", "'Server is not started'", ")", "await", "self", ".", "_server", ".", "wait_closed", "(", ")", "if", "self", ".", "_handler...
Coroutine to wait until all existing request handlers will exit properly.
[ "Coroutine", "to", "wait", "until", "all", "existing", "request", "handlers", "will", "exit", "properly", "." ]
python
train
SoftwareDefinedBuildings/XBOS
apps/Data_quality_analysis/Model_Data.py
https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Model_Data.py#L456-L497
def best_model_fit(self): """ Fit data to optimal model and return its metrics. Returns ------- dict Best model's metrics """ self.best_model.fit(self.baseline_in, self.baseline_out) self.y_true = self.baseline_out # Pandas Series self.y_pred = self.best_model.predict(self.baseline_in) # numpy.ndarray # Set all negative values to zero since energy > 0 self.y_pred[self.y_pred < 0] = 0 # n and k values for adj r2 score self.n_test = self.baseline_in.shape[0] # Number of points in data sample self.k_test = self.baseline_in.shape[1] # Number of variables in model, excluding the constant # Store best model's metrics self.best_metrics['name'] = self.best_model_name self.best_metrics['r2'] = r2_score(self.y_true, self.y_pred) self.best_metrics['mse'] = mean_squared_error(self.y_true, self.y_pred) self.best_metrics['rmse'] = math.sqrt(self.best_metrics['mse']) self.best_metrics['adj_r2'] = self.adj_r2(self.best_metrics['r2'], self.n_test, self.k_test) # Normalized Mean Bias Error numerator = sum(self.y_true - self.y_pred) denominator = (self.n_test - self.k_test) * (sum(self.y_true) / len(self.y_true)) self.best_metrics['nmbe'] = numerator / denominator # MAPE can't have 0 values in baseline_out -> divide by zero error self.baseline_out_copy = self.baseline_out[self.baseline_out != 0] self.baseline_in_copy = self.baseline_in[self.baseline_in.index.isin(self.baseline_out_copy.index)] self.y_true_copy = self.baseline_out_copy # Pandas Series self.y_pred_copy = self.best_model.predict(self.baseline_in_copy) # numpy.ndarray self.best_metrics['mape'] = np.mean(np.abs((self.y_true_copy - self.y_pred_copy) / self.y_true_copy)) * 100 return self.best_metrics
[ "def", "best_model_fit", "(", "self", ")", ":", "self", ".", "best_model", ".", "fit", "(", "self", ".", "baseline_in", ",", "self", ".", "baseline_out", ")", "self", ".", "y_true", "=", "self", ".", "baseline_out", "# Pandas Series", "self", ".", "y_pred"...
Fit data to optimal model and return its metrics. Returns ------- dict Best model's metrics
[ "Fit", "data", "to", "optimal", "model", "and", "return", "its", "metrics", "." ]
python
train
MacHu-GWU/angora-project
angora/dataIO/pk.py
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/dataIO/pk.py#L253-L326
def safe_dump_pk(obj, abspath, pk_protocol=pk_protocol, compress=False, enable_verbose=True): """A stable version of dump_pk, silently overwrite existing file. When your program been interrupted, you lose nothing. Typically if your program is interrupted by any reason, it only leaves a incomplete file. If you use replace=True, then you also lose your old file. So a bettr way is to: 1. dump pickle to a temp file. 2. when it's done, rename it to #abspath, overwrite the old one. This way guarantee atomic write. :param obj: Picklable Python Object. :param abspath: ``save as`` path, file extension has to be ``.pickle`` or ``.gz`` (for compressed Pickle). :type abspath: string :param pk_protocol: (default your python version) use 2, to make a py2.x/3.x compatible pickle file. But 3 is faster. :type pk_protocol: int :param compress: (default False) If ``True``, use GNU program gzip to compress the Pickle file. Disk usage can be greatly reduced. But you have to use :func:`load_pk(abspath, compress=True)<load_pk>` in loading. :type compress: boolean :param enable_verbose: (default True) Trigger for message. :type enable_verbose: boolean Usage:: >>> from weatherlab.lib.dataIO.pk import safe_dump_pk >>> pk = {"a": 1, "b": 2} >>> safe_dump_pk(pk, "test.pickle") Dumping to test.pickle... Complete! Elapse 0.001763 sec **中文文档** 在对文件进行写入时, 如果程序中断, 则会留下一个不完整的文件。如果你使用了覆盖式 写入, 则你同时也丢失了原文件。所以为了保证写操作的原子性(要么全部完成, 要么全部 都不完成), 更好的方法是: 首先将文件写入一个临时文件中, 完成后再讲文件重命名, 覆盖旧文件。这样即使中途程序被中断, 也仅仅是留下了一个未完成的临时文件而已, 不会 影响原文件。 参数列表 :param obj: 可Pickle化的Python对象 :param abspath: 写入文件的路径。扩展名必须为 ``.pickle`` 或 ``.gz`` , 其中gz用于被压 缩的Pickle :type abspath: ``字符串`` :param pk_protocol: (默认 等于你Python的大版本号) 使用2可以使得保存的文件能被 py2.x/3.x都能读取。但是协议3的速度更快, 体积更小, 性能更高。 :type pk_protocol: ``整数`` :param compress: (默认 False) 当为 ``True`` 时, 使用开源压缩标准gzip压缩Pickle文件。 通常能让文件大小缩小10-20倍不等。如要读取文件, 则需要使用函数 :func:`load_pk(abspath, compress=True)<load_pk>`. :type compress: ``布尔值`` :param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭. :type enable_verbose: ``布尔值`` """ abspath = str(abspath) # try stringlize temp_abspath = "%s.tmp" % abspath dump_pk(obj, temp_abspath, pk_protocol=pk_protocol, replace=True, compress=compress, enable_verbose=enable_verbose) shutil.move(temp_abspath, abspath)
[ "def", "safe_dump_pk", "(", "obj", ",", "abspath", ",", "pk_protocol", "=", "pk_protocol", ",", "compress", "=", "False", ",", "enable_verbose", "=", "True", ")", ":", "abspath", "=", "str", "(", "abspath", ")", "# try stringlize", "temp_abspath", "=", "\"%s...
A stable version of dump_pk, silently overwrite existing file. When your program been interrupted, you lose nothing. Typically if your program is interrupted by any reason, it only leaves a incomplete file. If you use replace=True, then you also lose your old file. So a bettr way is to: 1. dump pickle to a temp file. 2. when it's done, rename it to #abspath, overwrite the old one. This way guarantee atomic write. :param obj: Picklable Python Object. :param abspath: ``save as`` path, file extension has to be ``.pickle`` or ``.gz`` (for compressed Pickle). :type abspath: string :param pk_protocol: (default your python version) use 2, to make a py2.x/3.x compatible pickle file. But 3 is faster. :type pk_protocol: int :param compress: (default False) If ``True``, use GNU program gzip to compress the Pickle file. Disk usage can be greatly reduced. But you have to use :func:`load_pk(abspath, compress=True)<load_pk>` in loading. :type compress: boolean :param enable_verbose: (default True) Trigger for message. :type enable_verbose: boolean Usage:: >>> from weatherlab.lib.dataIO.pk import safe_dump_pk >>> pk = {"a": 1, "b": 2} >>> safe_dump_pk(pk, "test.pickle") Dumping to test.pickle... Complete! Elapse 0.001763 sec **中文文档** 在对文件进行写入时, 如果程序中断, 则会留下一个不完整的文件。如果你使用了覆盖式 写入, 则你同时也丢失了原文件。所以为了保证写操作的原子性(要么全部完成, 要么全部 都不完成), 更好的方法是: 首先将文件写入一个临时文件中, 完成后再讲文件重命名, 覆盖旧文件。这样即使中途程序被中断, 也仅仅是留下了一个未完成的临时文件而已, 不会 影响原文件。 参数列表 :param obj: 可Pickle化的Python对象 :param abspath: 写入文件的路径。扩展名必须为 ``.pickle`` 或 ``.gz`` , 其中gz用于被压 缩的Pickle :type abspath: ``字符串`` :param pk_protocol: (默认 等于你Python的大版本号) 使用2可以使得保存的文件能被 py2.x/3.x都能读取。但是协议3的速度更快, 体积更小, 性能更高。 :type pk_protocol: ``整数`` :param compress: (默认 False) 当为 ``True`` 时, 使用开源压缩标准gzip压缩Pickle文件。 通常能让文件大小缩小10-20倍不等。如要读取文件, 则需要使用函数 :func:`load_pk(abspath, compress=True)<load_pk>`. :type compress: ``布尔值`` :param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭. :type enable_verbose: ``布尔值``
[ "A", "stable", "version", "of", "dump_pk", "silently", "overwrite", "existing", "file", "." ]
python
train
edx/edx-enterprise
enterprise/admin/forms.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/admin/forms.py#L233-L267
def clean(self): """ Clean fields that depend on each other. In this case, the form can be used to link single user or bulk link multiple users. These are mutually exclusive modes, so this method checks that only one field is passed. """ cleaned_data = super(ManageLearnersForm, self).clean() # Here we take values from `data` (and not `cleaned_data`) as we need raw values - field clean methods # might "invalidate" the value and set it to None, while all we care here is if it was provided at all or not email_or_username = self.data.get(self.Fields.EMAIL_OR_USERNAME, None) bulk_upload_csv = self.files.get(self.Fields.BULK_UPLOAD, None) if not email_or_username and not bulk_upload_csv: raise ValidationError(ValidationMessages.NO_FIELDS_SPECIFIED) if email_or_username and bulk_upload_csv: raise ValidationError(ValidationMessages.BOTH_FIELDS_SPECIFIED) if email_or_username: mode = self.Modes.MODE_SINGULAR else: mode = self.Modes.MODE_BULK cleaned_data[self.Fields.MODE] = mode cleaned_data[self.Fields.NOTIFY] = self.clean_notify() self._validate_course() self._validate_program() if self.data.get(self.Fields.PROGRAM, None) and self.data.get(self.Fields.COURSE, None): raise ValidationError(ValidationMessages.COURSE_AND_PROGRAM_ERROR) return cleaned_data
[ "def", "clean", "(", "self", ")", ":", "cleaned_data", "=", "super", "(", "ManageLearnersForm", ",", "self", ")", ".", "clean", "(", ")", "# Here we take values from `data` (and not `cleaned_data`) as we need raw values - field clean methods", "# might \"invalidate\" the value ...
Clean fields that depend on each other. In this case, the form can be used to link single user or bulk link multiple users. These are mutually exclusive modes, so this method checks that only one field is passed.
[ "Clean", "fields", "that", "depend", "on", "each", "other", "." ]
python
valid
christophertbrown/bioscripts
ctbBio/ncbi_download.py
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/ncbi_download.py#L158-L195
def getFTPs(accessions, ftp, search, exclude, convert = False, threads = 1, attempt = 1, max_attempts = 2): """ download genome info from NCBI """ info = wget(ftp)[0] allMatches = [] for genome in open(info, encoding = 'utf8'): genome = str(genome) matches, genomeInfo = check(genome, accessions) if genomeInfo is not False: f = genomeInfo[0] + search Gftp = genomeInfo[19] Gftp = Gftp + '/' + search allMatches.extend(matches) yield (Gftp, f, exclude, matches) # print accessions that could not be matched # and whether or not they could be converted (optional) newAccs = [] missing = accessions.difference(set(allMatches)) if convert is True: pool = Pool(threads) pool = pool.imap_unordered(searchAccession, missing) for newAcc in tqdm(pool, total = len(missing)): status, accession, newAcc = newAcc if status is True: newAccs.append(newAcc) print('not found:', accession, '->', newAcc) else: for accession in missing: print('not found:', accession) # re-try after converting accessions (optional) if len(newAccs) > 0 and attempt <= max_attempts: print('convert accession attempt', attempt) attempt += 1 for hit in getFTPs(set(newAccs), ftp, search, exclude, convert, threads = 1, attempt = attempt): yield hit
[ "def", "getFTPs", "(", "accessions", ",", "ftp", ",", "search", ",", "exclude", ",", "convert", "=", "False", ",", "threads", "=", "1", ",", "attempt", "=", "1", ",", "max_attempts", "=", "2", ")", ":", "info", "=", "wget", "(", "ftp", ")", "[", ...
download genome info from NCBI
[ "download", "genome", "info", "from", "NCBI" ]
python
train
JarryShaw/PyPCAPKit
src/reassembly/ip.py
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/reassembly/ip.py#L109-L165
def reassembly(self, info): """Reassembly procedure. Positional arguments: * info -- Info, info dict of packets to be reassembled """ BUFID = info.bufid # Buffer Identifier FO = info.fo # Fragment Offset IHL = info.ihl # Internet Header Length MF = info.mf # More Fragments flag TL = info.tl # Total Length # when non-fragmented (possibly discarded) packet received if not FO and not MF: if BUFID in self._buffer: self._dtgram += self.submit(self._buffer[BUFID]) del self._buffer[BUFID] return # initialise buffer with BUFID if BUFID not in self._buffer: self._buffer[BUFID] = dict( TDL=0, # Total Data Length RCVBT=bytearray(8191), # Fragment Received Bit Table index=list(), # index record header=bytearray(), # header buffer datagram=bytearray(65535), # data buffer ) # append packet index self._buffer[BUFID]['index'].append(info.num) # put data into data buffer start = FO stop = TL - IHL + FO self._buffer[BUFID]['datagram'][start:stop] = info.payload # set RCVBT bits (in 8 octets) start = FO // 8 stop = FO // 8 + (TL - IHL + 7) // 8 self._buffer[BUFID]['RCVBT'][start:stop] = b'\x01' * (stop - start + 1) # get total data length (header excludes) if not MF: TDL = TL - IHL + FO # put header into header buffer if not FO: self._buffer[BUFID]['header'] = info.header # when datagram is reassembled in whole start = 0 stop = (TDL + 7) // 8 if TDL and all(self._buffer[BUFID]['RCVBT'][start:stop]): self._dtgram += self.submit(self._buffer[BUFID], checked=True) del self._buffer[BUFID]
[ "def", "reassembly", "(", "self", ",", "info", ")", ":", "BUFID", "=", "info", ".", "bufid", "# Buffer Identifier", "FO", "=", "info", ".", "fo", "# Fragment Offset", "IHL", "=", "info", ".", "ihl", "# Internet Header Length", "MF", "=", "info", ".", "mf",...
Reassembly procedure. Positional arguments: * info -- Info, info dict of packets to be reassembled
[ "Reassembly", "procedure", "." ]
python
train
bjmorgan/vasppy
vasppy/poscar.py
https://github.com/bjmorgan/vasppy/blob/cc2d1449697b17ee1c43715a02cddcb1139a6834/vasppy/poscar.py#L261-L272
def stoichiometry( self ): """ Stoichiometry for this POSCAR, as a Counter. e.g. AB_2O_4 -> Counter( { 'A': 1, 'B': 2, O: 4 } ) Args: None Returns: None """ return Counter( { label: number for label, number in zip( self.atoms, self.atom_numbers ) } )
[ "def", "stoichiometry", "(", "self", ")", ":", "return", "Counter", "(", "{", "label", ":", "number", "for", "label", ",", "number", "in", "zip", "(", "self", ".", "atoms", ",", "self", ".", "atom_numbers", ")", "}", ")" ]
Stoichiometry for this POSCAR, as a Counter. e.g. AB_2O_4 -> Counter( { 'A': 1, 'B': 2, O: 4 } ) Args: None Returns: None
[ "Stoichiometry", "for", "this", "POSCAR", "as", "a", "Counter", ".", "e", ".", "g", ".", "AB_2O_4", "-", ">", "Counter", "(", "{", "A", ":", "1", "B", ":", "2", "O", ":", "4", "}", ")", "Args", ":", "None" ]
python
train
jazzband/django-ddp
dddp/__init__.py
https://github.com/jazzband/django-ddp/blob/1e1954b06fe140346acea43582515991685e4e01/dddp/__init__.py#L69-L81
def as_dict(self, **kwargs): """Return an error dict for self.args and kwargs.""" error, reason, details, err_kwargs = self.args result = { key: val for key, val in { 'error': error, 'reason': reason, 'details': details, }.items() if val is not None } result.update(err_kwargs) result.update(kwargs) return result
[ "def", "as_dict", "(", "self", ",", "*", "*", "kwargs", ")", ":", "error", ",", "reason", ",", "details", ",", "err_kwargs", "=", "self", ".", "args", "result", "=", "{", "key", ":", "val", "for", "key", ",", "val", "in", "{", "'error'", ":", "er...
Return an error dict for self.args and kwargs.
[ "Return", "an", "error", "dict", "for", "self", ".", "args", "and", "kwargs", "." ]
python
test
markomanninen/abnum
remarkuple/table.py
https://github.com/markomanninen/abnum/blob/9bfc8f06f34d9a51aab038638f87e2bb5f9f4c99/remarkuple/table.py#L7-L86
def table(*args, **kw): """ Table function presents the idea of extending tags for simpler generation of some html element groups. Table has several group of tags in well defined structure. Caption header should be right after table and before thead for example. Colgroup, tfoot, tbody, tr, td and th elements has certain order which are handled by extended table class. Same idea could be used to create unordered lists and menu or other custom html widgets. Use in this way: ´from remarkuple import helper as h, table´ """ class table(type(helper.table())): """ Extend base table tag class """ def __init__(self, *args, **kw): super(self.__class__, self).__init__(*args, **kw) def addCaption(self, caption, **kw): if not self.__dict__.has_key('caption'): self.__dict__['caption'] = helper.caption(**kw) self.__dict__['caption'].addContent(caption) return self def addColGroup(self, *cols, **kw): """ http://www.w3.org/TR/CSS2/tables.html#columns """ if not self.__dict__.has_key('colgroup'): self.__dict__['colgroup'] = helper.colgroup(**kw) for col in cols: self.__dict__['colgroup'].addContent(col) return self def addHeadRow(self, *trs, **kw): if not self.__dict__.has_key('thead'): self.__dict__['thead'] = helper.thead(**kw) for tr in trs: self.__dict__['thead'].addContent(tr) return self def addFootRow(self, *trs, **kw): if not self.__dict__.has_key('tfoot'): self.__dict__['tfoot'] = helper.tfoot(**kw) for tr in trs: self.__dict__['tfoot'].addContent(tr) return self def addBodyRow(self, *trs, **kw): """ Body rows can be collected under same element, or under separate body tags via addBodyRows """ if not self.__dict__.has_key('tbody'): self.__dict__['tbody'] = helper.tbody(**kw) for tr in trs: self.__dict__['tbody'].addContent(tr) return self def addBodyRows(self, *trs, **kw): """ See above """ if not self.__dict__.has_key('tbodys'): self.__dict__['tbodys'] = [] self.__dict__['tbodys'].append(helper.tbody(*trs, **kw)) return self def __str__(self): if self.__dict__.has_key('caption'): self.addContent(self.__dict__['caption']) if self.__dict__.has_key('colgroup'): self.addContent(self.__dict__['colgroup']) if self.__dict__.has_key('thead'): self.addContent(self.__dict__['thead']) if self.__dict__.has_key('tfoot'): self.addContent(self.__dict__['tfoot']) if self.__dict__.has_key('tbody'): self.addContent(self.__dict__['tbody']) if self.__dict__.has_key('tbodys'): map(self.addContent, self.__dict__['tbodys']) return super(self.__class__, self).__str__() return table(*args, **kw)
[ "def", "table", "(", "*", "args", ",", "*", "*", "kw", ")", ":", "class", "table", "(", "type", "(", "helper", ".", "table", "(", ")", ")", ")", ":", "\"\"\" Extend base table tag class \"\"\"", "def", "__init__", "(", "self", ",", "*", "args", ",", ...
Table function presents the idea of extending tags for simpler generation of some html element groups. Table has several group of tags in well defined structure. Caption header should be right after table and before thead for example. Colgroup, tfoot, tbody, tr, td and th elements has certain order which are handled by extended table class. Same idea could be used to create unordered lists and menu or other custom html widgets. Use in this way: ´from remarkuple import helper as h, table´
[ "Table", "function", "presents", "the", "idea", "of", "extending", "tags", "for", "simpler", "generation", "of", "some", "html", "element", "groups", ".", "Table", "has", "several", "group", "of", "tags", "in", "well", "defined", "structure", ".", "Caption", ...
python
train
shoebot/shoebot
extensions/lib/shoebotit/gtk3_utils.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/extensions/lib/shoebotit/gtk3_utils.py#L162-L167
def is_venv(directory, executable='python'): """ :param directory: base directory of python environment """ path=os.path.join(directory, 'bin', executable) return os.path.isfile(path)
[ "def", "is_venv", "(", "directory", ",", "executable", "=", "'python'", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "'bin'", ",", "executable", ")", "return", "os", ".", "path", ".", "isfile", "(", "path", ")" ]
:param directory: base directory of python environment
[ ":", "param", "directory", ":", "base", "directory", "of", "python", "environment" ]
python
valid
ska-sa/hypercube
hypercube/base_cube.py
https://github.com/ska-sa/hypercube/blob/6564a9e65ccd9ed7e7a71bd643f183e1ec645b29/hypercube/base_cube.py#L774-L809
def slice_iter(self, *dim_strides, **kwargs): """ Recursively iterate over the (dimension, stride) tuples specified in dim_strides, returning the chunk start offsets for each specified dimensions. For example, the following call effectively produces 2 loops over the 'ntime' and 'nchan' dimensions in chunks of 10 and 4 respectively. .. code-block:: python A = np.ones(size=(100, 4)) for ts, cs in cube.endpoint_iter(('ntime', 10), ('nchan', 4)) A[ts, cs].sum() for i cube.endpoint_iter(('ntime', 10), ('nchan', 4)) A[i].sum() Parameters ---------- *dim_strides : list list of (dimension, stride) tuples Returns ------- iterator Iterator producing a tuple of slices for each dimension :code:`(slice(d0_low, d0_high, 1), slice(d1_low, d1_high,1))` """ def _create_slices(*args): return tuple(slice(s,e,1) for (s, e) in args) return (_create_slices(*s) for s in self.endpoint_iter( *dim_strides, **kwargs))
[ "def", "slice_iter", "(", "self", ",", "*", "dim_strides", ",", "*", "*", "kwargs", ")", ":", "def", "_create_slices", "(", "*", "args", ")", ":", "return", "tuple", "(", "slice", "(", "s", ",", "e", ",", "1", ")", "for", "(", "s", ",", "e", ")...
Recursively iterate over the (dimension, stride) tuples specified in dim_strides, returning the chunk start offsets for each specified dimensions. For example, the following call effectively produces 2 loops over the 'ntime' and 'nchan' dimensions in chunks of 10 and 4 respectively. .. code-block:: python A = np.ones(size=(100, 4)) for ts, cs in cube.endpoint_iter(('ntime', 10), ('nchan', 4)) A[ts, cs].sum() for i cube.endpoint_iter(('ntime', 10), ('nchan', 4)) A[i].sum() Parameters ---------- *dim_strides : list list of (dimension, stride) tuples Returns ------- iterator Iterator producing a tuple of slices for each dimension :code:`(slice(d0_low, d0_high, 1), slice(d1_low, d1_high,1))`
[ "Recursively", "iterate", "over", "the", "(", "dimension", "stride", ")", "tuples", "specified", "in", "dim_strides", "returning", "the", "chunk", "start", "offsets", "for", "each", "specified", "dimensions", "." ]
python
train
wtsi-hgi/python-git-subrepo
gitsubrepo/subrepo.py
https://github.com/wtsi-hgi/python-git-subrepo/blob/bb2eb2bd9a7e51b862298ddb4168cc5b8633dad0/gitsubrepo/subrepo.py#L144-L159
def pull(directory: str) -> Commit: """ Pulls the subrepo that has been cloned into the given directory. :param directory: the directory containing the subrepo :return: the commit the subrepo is on """ if not os.path.exists(directory): raise ValueError(f"No subrepo found in \"{directory}\"") try: result = run([GIT_COMMAND, _GIT_SUBREPO_COMMAND, _GIT_SUBREPO_PULL_COMMAND, _GIT_SUBREPO_VERBOSE_FLAG, get_directory_relative_to_git_root(directory)], execution_directory=get_git_root_directory(directory)) except RunException as e: if "Can't pull subrepo. Working tree has changes" in e.stderr: raise UnstagedChangeException() from e return status(directory)[2]
[ "def", "pull", "(", "directory", ":", "str", ")", "->", "Commit", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "directory", ")", ":", "raise", "ValueError", "(", "f\"No subrepo found in \\\"{directory}\\\"\"", ")", "try", ":", "result", "=", "r...
Pulls the subrepo that has been cloned into the given directory. :param directory: the directory containing the subrepo :return: the commit the subrepo is on
[ "Pulls", "the", "subrepo", "that", "has", "been", "cloned", "into", "the", "given", "directory", ".", ":", "param", "directory", ":", "the", "directory", "containing", "the", "subrepo", ":", "return", ":", "the", "commit", "the", "subrepo", "is", "on" ]
python
train
GeospatialPython/pyshp
shapefile.py
https://github.com/GeospatialPython/pyshp/blob/71231ddc5aa54f155d4f0563c56006fffbfc84e7/shapefile.py#L635-L650
def load(self, shapefile=None): """Opens a shapefile from a filename or file-like object. Normally this method would be called by the constructor with the file name as an argument.""" if shapefile: (shapeName, ext) = os.path.splitext(shapefile) self.shapeName = shapeName self.load_shp(shapeName) self.load_shx(shapeName) self.load_dbf(shapeName) if not (self.shp or self.dbf): raise ShapefileException("Unable to open %s.dbf or %s.shp." % (shapeName, shapeName)) if self.shp: self.__shpHeader() if self.dbf: self.__dbfHeader()
[ "def", "load", "(", "self", ",", "shapefile", "=", "None", ")", ":", "if", "shapefile", ":", "(", "shapeName", ",", "ext", ")", "=", "os", ".", "path", ".", "splitext", "(", "shapefile", ")", "self", ".", "shapeName", "=", "shapeName", "self", ".", ...
Opens a shapefile from a filename or file-like object. Normally this method would be called by the constructor with the file name as an argument.
[ "Opens", "a", "shapefile", "from", "a", "filename", "or", "file", "-", "like", "object", ".", "Normally", "this", "method", "would", "be", "called", "by", "the", "constructor", "with", "the", "file", "name", "as", "an", "argument", "." ]
python
train
confluentinc/confluent-kafka-python
confluent_kafka/__init__.py
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/confluent_kafka/__init__.py#L47-L102
def _resolve_plugins(plugins): """ Resolve embedded plugins from the wheel's library directory. For internal module use only. :param str plugins: The plugin.library.paths value """ import os from sys import platform # Location of __init__.py and the embedded library directory basedir = os.path.dirname(__file__) if platform in ('win32', 'cygwin'): paths_sep = ';' ext = '.dll' libdir = basedir elif platform in ('linux', 'linux2'): paths_sep = ':' ext = '.so' libdir = os.path.join(basedir, '.libs') elif platform == 'darwin': paths_sep = ':' ext = '.dylib' libdir = os.path.join(basedir, '.dylibs') else: # Unknown platform, there are probably no embedded plugins. return plugins if not os.path.isdir(libdir): # No embedded library directory, probably not a wheel installation. return plugins resolved = [] for plugin in plugins.split(paths_sep): if '/' in plugin or '\\' in plugin: # Path specified, leave unchanged resolved.append(plugin) continue # See if the plugin can be found in the wheel's # embedded library directory. # The user might not have supplied a file extension, so try both. good = None for file in [plugin, plugin + ext]: fpath = os.path.join(libdir, file) if os.path.isfile(fpath): good = fpath break if good is not None: resolved.append(good) else: resolved.append(plugin) return paths_sep.join(resolved)
[ "def", "_resolve_plugins", "(", "plugins", ")", ":", "import", "os", "from", "sys", "import", "platform", "# Location of __init__.py and the embedded library directory", "basedir", "=", "os", ".", "path", ".", "dirname", "(", "__file__", ")", "if", "platform", "in",...
Resolve embedded plugins from the wheel's library directory. For internal module use only. :param str plugins: The plugin.library.paths value
[ "Resolve", "embedded", "plugins", "from", "the", "wheel", "s", "library", "directory", "." ]
python
train
vmware/pyvmomi
pyVmomi/DynamicTypeManagerHelper.py
https://github.com/vmware/pyvmomi/blob/3ffcb23bf77d757175c0d5216ba9a25345d824cd/pyVmomi/DynamicTypeManagerHelper.py#L145-L151
def _ConvertAnnotations(self, annotations): """ Convert annotations to pyVmomi flags """ flags = 0 if annotations: for annotation in annotations: flags |= self._mapFlags.get(annotation.name, 0) return flags
[ "def", "_ConvertAnnotations", "(", "self", ",", "annotations", ")", ":", "flags", "=", "0", "if", "annotations", ":", "for", "annotation", "in", "annotations", ":", "flags", "|=", "self", ".", "_mapFlags", ".", "get", "(", "annotation", ".", "name", ",", ...
Convert annotations to pyVmomi flags
[ "Convert", "annotations", "to", "pyVmomi", "flags" ]
python
train
CalebBell/fluids
fluids/two_phase_voidage.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/two_phase_voidage.py#L517-L567
def Nishino_Yamazaki(x, rhol, rhog): r'''Calculates void fraction in two-phase flow according to the model presented in [1]_ as shown in [2]_. .. math:: \alpha = 1 - \left(\frac{1-x}{x}\frac{\rho_g}{\rho_l}\right)^{0.5} \alpha_h^{0.5} Parameters ---------- x : float Quality at the specific tube interval [] rhol : float Density of the liquid [kg/m^3] rhog : float Density of the gas [kg/m^3] Returns ------- alpha : float Void fraction (area of gas / total area of channel), [-] Notes ----- [1]_ is in Japanese. [3]_ either shows this model as iterative in terms of voidage, or forgot to add a H subscript to its second voidage term; the second is believed more likely. Examples -------- >>> Nishino_Yamazaki(.4, 800, 2.5) 0.931694583962682 References ---------- .. [1] Nishino, Haruo, and Yasaburo Yamazaki. "A New Method of Evaluating Steam Volume Fractions in Boiling Systems." Journal of the Atomic Energy Society of Japan / Atomic Energy Society of Japan 5, no. 1 (1963): 39-46. doi:10.3327/jaesj.5.39. .. [2] Xu, Yu, and Xiande Fang. "Correlations of Void Fraction for Two- Phase Refrigerant Flow in Pipes." Applied Thermal Engineering 64, no. 1-2 (March 2014): 242–51. doi:10.1016/j.applthermaleng.2013.12.032. .. [3] Woldesemayat, Melkamu A., and Afshin J. Ghajar. "Comparison of Void Fraction Correlations for Different Flow Patterns in Horizontal and Upward Inclined Pipes." International Journal of Multiphase Flow 33, no. 4 (April 2007): 347-370. doi:10.1016/j.ijmultiphaseflow.2006.09.004. ''' alpha_h = homogeneous(x, rhol, rhog) return 1 - ((1-x)*rhog/x/rhol)**0.5*alpha_h**0.5
[ "def", "Nishino_Yamazaki", "(", "x", ",", "rhol", ",", "rhog", ")", ":", "alpha_h", "=", "homogeneous", "(", "x", ",", "rhol", ",", "rhog", ")", "return", "1", "-", "(", "(", "1", "-", "x", ")", "*", "rhog", "/", "x", "/", "rhol", ")", "**", ...
r'''Calculates void fraction in two-phase flow according to the model presented in [1]_ as shown in [2]_. .. math:: \alpha = 1 - \left(\frac{1-x}{x}\frac{\rho_g}{\rho_l}\right)^{0.5} \alpha_h^{0.5} Parameters ---------- x : float Quality at the specific tube interval [] rhol : float Density of the liquid [kg/m^3] rhog : float Density of the gas [kg/m^3] Returns ------- alpha : float Void fraction (area of gas / total area of channel), [-] Notes ----- [1]_ is in Japanese. [3]_ either shows this model as iterative in terms of voidage, or forgot to add a H subscript to its second voidage term; the second is believed more likely. Examples -------- >>> Nishino_Yamazaki(.4, 800, 2.5) 0.931694583962682 References ---------- .. [1] Nishino, Haruo, and Yasaburo Yamazaki. "A New Method of Evaluating Steam Volume Fractions in Boiling Systems." Journal of the Atomic Energy Society of Japan / Atomic Energy Society of Japan 5, no. 1 (1963): 39-46. doi:10.3327/jaesj.5.39. .. [2] Xu, Yu, and Xiande Fang. "Correlations of Void Fraction for Two- Phase Refrigerant Flow in Pipes." Applied Thermal Engineering 64, no. 1-2 (March 2014): 242–51. doi:10.1016/j.applthermaleng.2013.12.032. .. [3] Woldesemayat, Melkamu A., and Afshin J. Ghajar. "Comparison of Void Fraction Correlations for Different Flow Patterns in Horizontal and Upward Inclined Pipes." International Journal of Multiphase Flow 33, no. 4 (April 2007): 347-370. doi:10.1016/j.ijmultiphaseflow.2006.09.004.
[ "r", "Calculates", "void", "fraction", "in", "two", "-", "phase", "flow", "according", "to", "the", "model", "presented", "in", "[", "1", "]", "_", "as", "shown", "in", "[", "2", "]", "_", "." ]
python
train
fedora-infra/fedmsg
fedmsg/meta/__init__.py
https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/meta/__init__.py#L251-L260
def msg2long_form(msg, processor, **config): """ Return a 'long form' text representation of a message. For most message, this will just default to the terse subtitle, but for some messages a long paragraph-structured block of text may be returned. """ result = processor.long_form(msg, **config) if not result: result = processor.subtitle(msg, **config) return result
[ "def", "msg2long_form", "(", "msg", ",", "processor", ",", "*", "*", "config", ")", ":", "result", "=", "processor", ".", "long_form", "(", "msg", ",", "*", "*", "config", ")", "if", "not", "result", ":", "result", "=", "processor", ".", "subtitle", ...
Return a 'long form' text representation of a message. For most message, this will just default to the terse subtitle, but for some messages a long paragraph-structured block of text may be returned.
[ "Return", "a", "long", "form", "text", "representation", "of", "a", "message", "." ]
python
train
Azure/azure-sdk-for-python
azure-applicationinsights/azure/applicationinsights/operations/events_operations.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-applicationinsights/azure/applicationinsights/operations/events_operations.py#L36-L143
def get_by_type( self, app_id, event_type, timespan=None, filter=None, search=None, orderby=None, select=None, skip=None, top=None, format=None, count=None, apply=None, custom_headers=None, raw=False, **operation_config): """Execute OData query. Executes an OData query for events. :param app_id: ID of the application. This is Application ID from the API Access settings blade in the Azure portal. :type app_id: str :param event_type: The type of events to query; either a standard event type (`traces`, `customEvents`, `pageViews`, `requests`, `dependencies`, `exceptions`, `availabilityResults`) or `$all` to query across all event types. Possible values include: '$all', 'traces', 'customEvents', 'pageViews', 'browserTimings', 'requests', 'dependencies', 'exceptions', 'availabilityResults', 'performanceCounters', 'customMetrics' :type event_type: str or ~azure.applicationinsights.models.EventType :param timespan: Optional. The timespan over which to retrieve events. This is an ISO8601 time period value. This timespan is applied in addition to any that are specified in the Odata expression. :type timespan: str :param filter: An expression used to filter the returned events :type filter: str :param search: A free-text search expression to match for whether a particular event should be returned :type search: str :param orderby: A comma-separated list of properties with \\"asc\\" (the default) or \\"desc\\" to control the order of returned events :type orderby: str :param select: Limits the properties to just those requested on each returned event :type select: str :param skip: The number of items to skip over before returning events :type skip: int :param top: The number of events to return :type top: int :param format: Format for the returned events :type format: str :param count: Request a count of matching items included with the returned events :type count: bool :param apply: An expression used for aggregation over returned events :type apply: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: EventsResults or ClientRawResponse if raw=true :rtype: ~azure.applicationinsights.models.EventsResults or ~msrest.pipeline.ClientRawResponse :raises: :class:`ErrorResponseException<azure.applicationinsights.models.ErrorResponseException>` """ # Construct URL url = self.get_by_type.metadata['url'] path_format_arguments = { 'appId': self._serialize.url("app_id", app_id, 'str'), 'eventType': self._serialize.url("event_type", event_type, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} if timespan is not None: query_parameters['timespan'] = self._serialize.query("timespan", timespan, 'str') if filter is not None: query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') if search is not None: query_parameters['$search'] = self._serialize.query("search", search, 'str') if orderby is not None: query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str') if select is not None: query_parameters['$select'] = self._serialize.query("select", select, 'str') if skip is not None: query_parameters['$skip'] = self._serialize.query("skip", skip, 'int') if top is not None: query_parameters['$top'] = self._serialize.query("top", top, 'int') if format is not None: query_parameters['$format'] = self._serialize.query("format", format, 'str') if count is not None: query_parameters['$count'] = self._serialize.query("count", count, 'bool') if apply is not None: query_parameters['$apply'] = self._serialize.query("apply", apply, 'str') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if custom_headers: header_parameters.update(custom_headers) # Construct and send request request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('EventsResults', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
[ "def", "get_by_type", "(", "self", ",", "app_id", ",", "event_type", ",", "timespan", "=", "None", ",", "filter", "=", "None", ",", "search", "=", "None", ",", "orderby", "=", "None", ",", "select", "=", "None", ",", "skip", "=", "None", ",", "top", ...
Execute OData query. Executes an OData query for events. :param app_id: ID of the application. This is Application ID from the API Access settings blade in the Azure portal. :type app_id: str :param event_type: The type of events to query; either a standard event type (`traces`, `customEvents`, `pageViews`, `requests`, `dependencies`, `exceptions`, `availabilityResults`) or `$all` to query across all event types. Possible values include: '$all', 'traces', 'customEvents', 'pageViews', 'browserTimings', 'requests', 'dependencies', 'exceptions', 'availabilityResults', 'performanceCounters', 'customMetrics' :type event_type: str or ~azure.applicationinsights.models.EventType :param timespan: Optional. The timespan over which to retrieve events. This is an ISO8601 time period value. This timespan is applied in addition to any that are specified in the Odata expression. :type timespan: str :param filter: An expression used to filter the returned events :type filter: str :param search: A free-text search expression to match for whether a particular event should be returned :type search: str :param orderby: A comma-separated list of properties with \\"asc\\" (the default) or \\"desc\\" to control the order of returned events :type orderby: str :param select: Limits the properties to just those requested on each returned event :type select: str :param skip: The number of items to skip over before returning events :type skip: int :param top: The number of events to return :type top: int :param format: Format for the returned events :type format: str :param count: Request a count of matching items included with the returned events :type count: bool :param apply: An expression used for aggregation over returned events :type apply: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: EventsResults or ClientRawResponse if raw=true :rtype: ~azure.applicationinsights.models.EventsResults or ~msrest.pipeline.ClientRawResponse :raises: :class:`ErrorResponseException<azure.applicationinsights.models.ErrorResponseException>`
[ "Execute", "OData", "query", "." ]
python
test
spyder-ide/spyder
spyder/widgets/tabs.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/widgets/tabs.py#L208-L228
def dropEvent(self, event): """Override Qt method""" mimeData = event.mimeData() index_from = int(mimeData.data("source-index")) index_to = self.tabAt(event.pos()) if index_to == -1: index_to = self.count() if int(mimeData.data("tabbar-id")) != id(self): tabwidget_from = to_text_string(mimeData.data("tabwidget-id")) # We pass self object ID as a QString, because otherwise it would # depend on the platform: long for 64bit, int for 32bit. Replacing # by long all the time is not working on some 32bit platforms # (see Issue 1094, Issue 1098) self.sig_move_tab[(str, int, int)].emit(tabwidget_from, index_from, index_to) event.acceptProposedAction() elif index_from != index_to: self.sig_move_tab.emit(index_from, index_to) event.acceptProposedAction() QTabBar.dropEvent(self, event)
[ "def", "dropEvent", "(", "self", ",", "event", ")", ":", "mimeData", "=", "event", ".", "mimeData", "(", ")", "index_from", "=", "int", "(", "mimeData", ".", "data", "(", "\"source-index\"", ")", ")", "index_to", "=", "self", ".", "tabAt", "(", "event"...
Override Qt method
[ "Override", "Qt", "method" ]
python
train
PmagPy/PmagPy
SPD/lib/lib_ptrm_statistics.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/SPD/lib/lib_ptrm_statistics.py#L73-L83
def get_DRAT(delta_x_prime, delta_y_prime, max_ptrm_check): """ Input: TRM length of best fit line (delta_x_prime), NRM length of best fit line, max_ptrm_check Output: DRAT (maximum difference produced by a ptrm check normed by best fit line), length best fit line """ L = numpy.sqrt(delta_x_prime**2 + delta_y_prime**2) DRAT = (old_div(max_ptrm_check, L)) * 100 return DRAT, L
[ "def", "get_DRAT", "(", "delta_x_prime", ",", "delta_y_prime", ",", "max_ptrm_check", ")", ":", "L", "=", "numpy", ".", "sqrt", "(", "delta_x_prime", "**", "2", "+", "delta_y_prime", "**", "2", ")", "DRAT", "=", "(", "old_div", "(", "max_ptrm_check", ",", ...
Input: TRM length of best fit line (delta_x_prime), NRM length of best fit line, max_ptrm_check Output: DRAT (maximum difference produced by a ptrm check normed by best fit line), length best fit line
[ "Input", ":", "TRM", "length", "of", "best", "fit", "line", "(", "delta_x_prime", ")", "NRM", "length", "of", "best", "fit", "line", "max_ptrm_check", "Output", ":", "DRAT", "(", "maximum", "difference", "produced", "by", "a", "ptrm", "check", "normed", "b...
python
train
user-cont/conu
conu/utils/__init__.py
https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/utils/__init__.py#L112-L141
def run_cmd(cmd, return_output=False, ignore_status=False, log_output=True, **kwargs): """ run provided command on host system using the same user as you invoked this code, raises subprocess.CalledProcessError if it fails :param cmd: list of str :param return_output: bool, return output of the command :param ignore_status: bool, do not fail in case nonzero return code :param log_output: bool, if True, log output to debug log :param kwargs: pass keyword arguments to subprocess.check_* functions; for more info, please check `help(subprocess.Popen)` :return: None or str """ logger.debug('command: "%s"' % ' '.join(cmd)) process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, **kwargs) output = process.communicate()[0] if log_output: logger.debug(output) if process.returncode > 0: if ignore_status: if return_output: return output else: return process.returncode else: raise subprocess.CalledProcessError(cmd=cmd, returncode=process.returncode) if return_output: return output
[ "def", "run_cmd", "(", "cmd", ",", "return_output", "=", "False", ",", "ignore_status", "=", "False", ",", "log_output", "=", "True", ",", "*", "*", "kwargs", ")", ":", "logger", ".", "debug", "(", "'command: \"%s\"'", "%", "' '", ".", "join", "(", "cm...
run provided command on host system using the same user as you invoked this code, raises subprocess.CalledProcessError if it fails :param cmd: list of str :param return_output: bool, return output of the command :param ignore_status: bool, do not fail in case nonzero return code :param log_output: bool, if True, log output to debug log :param kwargs: pass keyword arguments to subprocess.check_* functions; for more info, please check `help(subprocess.Popen)` :return: None or str
[ "run", "provided", "command", "on", "host", "system", "using", "the", "same", "user", "as", "you", "invoked", "this", "code", "raises", "subprocess", ".", "CalledProcessError", "if", "it", "fails" ]
python
train
cogniteev/docido-python-sdk
docido_sdk/toolbox/collections_ext.py
https://github.com/cogniteev/docido-python-sdk/blob/58ecb6c6f5757fd40c0601657ab18368da7ddf33/docido_sdk/toolbox/collections_ext.py#L133-L163
def flatten_dict(d, prefix='', sep='.'): """In place dict flattening. """ def apply_and_resolve_conflicts(dest, item, prefix): for k, v in flatten_dict(item, prefix=prefix, sep=sep).items(): new_key = k i = 2 while new_key in d: new_key = '{key}{sep}{index}'.format(key=k, sep=sep, index=i) i += 1 dest[new_key] = v for key in list(d.keys()): if any(unicode(prefix)): new_key = u'{p}{sep}{key}'.format(p=prefix, key=key, sep=sep) else: new_key = key if isinstance(d[key], (dict, collections.Mapping)): apply_and_resolve_conflicts(d, d.pop(key), new_key) elif isinstance(d[key], six.string_types): d[new_key] = d.pop(key) elif isinstance(d[key], (list, collections.Mapping)): array = d.pop(key) for i in range(len(array)): index_key = '{key}{sep}{i}'.format(key=key, sep=sep, i=i) while index_key in d: i += 1 apply_and_resolve_conflicts(d, array[i], index_key) else: d[new_key] = d.pop(key) return d
[ "def", "flatten_dict", "(", "d", ",", "prefix", "=", "''", ",", "sep", "=", "'.'", ")", ":", "def", "apply_and_resolve_conflicts", "(", "dest", ",", "item", ",", "prefix", ")", ":", "for", "k", ",", "v", "in", "flatten_dict", "(", "item", ",", "prefi...
In place dict flattening.
[ "In", "place", "dict", "flattening", "." ]
python
train
mongodb/mongo-python-driver
pymongo/database.py
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/database.py#L344-L410
def create_collection(self, name, codec_options=None, read_preference=None, write_concern=None, read_concern=None, session=None, **kwargs): """Create a new :class:`~pymongo.collection.Collection` in this database. Normally collection creation is automatic. This method should only be used to specify options on creation. :class:`~pymongo.errors.CollectionInvalid` will be raised if the collection already exists. Options should be passed as keyword arguments to this method. Supported options vary with MongoDB release. Some examples include: - "size": desired initial size for the collection (in bytes). For capped collections this size is the max size of the collection. - "capped": if True, this is a capped collection - "max": maximum number of objects if capped (optional) See the MongoDB documentation for a full list of supported options by server version. :Parameters: - `name`: the name of the collection to create - `codec_options` (optional): An instance of :class:`~bson.codec_options.CodecOptions`. If ``None`` (the default) the :attr:`codec_options` of this :class:`Database` is used. - `read_preference` (optional): The read preference to use. If ``None`` (the default) the :attr:`read_preference` of this :class:`Database` is used. - `write_concern` (optional): An instance of :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the default) the :attr:`write_concern` of this :class:`Database` is used. - `read_concern` (optional): An instance of :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the default) the :attr:`read_concern` of this :class:`Database` is used. - `collation` (optional): An instance of :class:`~pymongo.collation.Collation`. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - `**kwargs` (optional): additional keyword arguments will be passed as options for the create collection command .. versionchanged:: 3.6 Added ``session`` parameter. .. versionchanged:: 3.4 Added the collation option. .. versionchanged:: 3.0 Added the codec_options, read_preference, and write_concern options. .. versionchanged:: 2.2 Removed deprecated argument: options """ with self.__client._tmp_session(session) as s: if name in self.list_collection_names( filter={"name": name}, session=s): raise CollectionInvalid("collection %s already exists" % name) return Collection(self, name, True, codec_options, read_preference, write_concern, read_concern, session=s, **kwargs)
[ "def", "create_collection", "(", "self", ",", "name", ",", "codec_options", "=", "None", ",", "read_preference", "=", "None", ",", "write_concern", "=", "None", ",", "read_concern", "=", "None", ",", "session", "=", "None", ",", "*", "*", "kwargs", ")", ...
Create a new :class:`~pymongo.collection.Collection` in this database. Normally collection creation is automatic. This method should only be used to specify options on creation. :class:`~pymongo.errors.CollectionInvalid` will be raised if the collection already exists. Options should be passed as keyword arguments to this method. Supported options vary with MongoDB release. Some examples include: - "size": desired initial size for the collection (in bytes). For capped collections this size is the max size of the collection. - "capped": if True, this is a capped collection - "max": maximum number of objects if capped (optional) See the MongoDB documentation for a full list of supported options by server version. :Parameters: - `name`: the name of the collection to create - `codec_options` (optional): An instance of :class:`~bson.codec_options.CodecOptions`. If ``None`` (the default) the :attr:`codec_options` of this :class:`Database` is used. - `read_preference` (optional): The read preference to use. If ``None`` (the default) the :attr:`read_preference` of this :class:`Database` is used. - `write_concern` (optional): An instance of :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the default) the :attr:`write_concern` of this :class:`Database` is used. - `read_concern` (optional): An instance of :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the default) the :attr:`read_concern` of this :class:`Database` is used. - `collation` (optional): An instance of :class:`~pymongo.collation.Collation`. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - `**kwargs` (optional): additional keyword arguments will be passed as options for the create collection command .. versionchanged:: 3.6 Added ``session`` parameter. .. versionchanged:: 3.4 Added the collation option. .. versionchanged:: 3.0 Added the codec_options, read_preference, and write_concern options. .. versionchanged:: 2.2 Removed deprecated argument: options
[ "Create", "a", "new", ":", "class", ":", "~pymongo", ".", "collection", ".", "Collection", "in", "this", "database", "." ]
python
train