repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
tgbugs/pyontutils
ilxutils/ilxutils/nltklib.py
https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/nltklib.py#L36-L70
def sentence_similarity(sentence1, sentence2): """ compute the sentence similarity using Wordnet """ # Tokenize and tag sentence1 = pos_tag(word_tokenize(sentence1)) sentence2 = pos_tag(word_tokenize(sentence2)) # Get the synsets for the tagged words synsets1 = [tagged_to_synset(*tagged_word) for tagged_word in sentence1] synsets2 = [tagged_to_synset(*tagged_word) for tagged_word in sentence2] # Filter out the Nones synsets1 = [ss for ss in synsets1 if ss] synsets2 = [ss for ss in synsets2 if ss] #print(synsets1) #print(synsets2) score, count = 0.0, 0.0 # For each word in the first sentence for synset in synsets1: # Get the similarity value of the most similar word in the other sentence best_score=[synset.path_similarity(ss) for ss in synsets2 if synset.path_similarity(ss)] # Check that the similarity could have been computed if best_score: score += max(best_score) count += 1 # Average the values if count > 0: score /= count else: score = 0 return score
[ "def", "sentence_similarity", "(", "sentence1", ",", "sentence2", ")", ":", "# Tokenize and tag", "sentence1", "=", "pos_tag", "(", "word_tokenize", "(", "sentence1", ")", ")", "sentence2", "=", "pos_tag", "(", "word_tokenize", "(", "sentence2", ")", ")", "# Get...
compute the sentence similarity using Wordnet
[ "compute", "the", "sentence", "similarity", "using", "Wordnet" ]
python
train
MIT-LCP/wfdb-python
wfdb/io/record.py
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/record.py#L235-L255
def _adjust_datetime(self, sampfrom): """ Adjust date and time fields to reflect user input if possible. Helper function for the `_arrange_fields` of both Record and MultiRecord objects. """ if sampfrom: dt_seconds = sampfrom / self.fs if self.base_date and self.base_time: self.base_datetime = datetime.datetime.combine(self.base_date, self.base_time) self.base_datetime += datetime.timedelta(seconds=dt_seconds) self.base_date = self.base_datetime.date() self.base_time = self.base_datetime.time() # We can calculate the time even if there is no date elif self.base_time: tmp_datetime = datetime.datetime.combine( datetime.datetime.today().date(), self.base_time) self.base_time = (tmp_datetime + datetime.timedelta(seconds=dt_seconds)).time()
[ "def", "_adjust_datetime", "(", "self", ",", "sampfrom", ")", ":", "if", "sampfrom", ":", "dt_seconds", "=", "sampfrom", "/", "self", ".", "fs", "if", "self", ".", "base_date", "and", "self", ".", "base_time", ":", "self", ".", "base_datetime", "=", "dat...
Adjust date and time fields to reflect user input if possible. Helper function for the `_arrange_fields` of both Record and MultiRecord objects.
[ "Adjust", "date", "and", "time", "fields", "to", "reflect", "user", "input", "if", "possible", "." ]
python
train
bitesofcode/projexui
projexui/wizards/xscaffoldwizard/xscaffoldwizard.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/wizards/xscaffoldwizard/xscaffoldwizard.py#L136-L156
def propertyWidgetMap(self): """ Returns the mapping for this page between its widgets and its scaffold property. :return {<projex.scaffold.Property>: <QtGui.QWidget>, ..} """ out = {} scaffold = self.scaffold() # initialize the scaffold properties for widget in self.findChildren(QtGui.QWidget): propname = unwrapVariant(widget.property('propertyName')) if not propname: continue prop = scaffold.property(propname) if not prop: continue out[prop] = widget return out
[ "def", "propertyWidgetMap", "(", "self", ")", ":", "out", "=", "{", "}", "scaffold", "=", "self", ".", "scaffold", "(", ")", "# initialize the scaffold properties\r", "for", "widget", "in", "self", ".", "findChildren", "(", "QtGui", ".", "QWidget", ")", ":",...
Returns the mapping for this page between its widgets and its scaffold property. :return {<projex.scaffold.Property>: <QtGui.QWidget>, ..}
[ "Returns", "the", "mapping", "for", "this", "page", "between", "its", "widgets", "and", "its", "scaffold", "property", ".", ":", "return", "{", "<projex", ".", "scaffold", ".", "Property", ">", ":", "<QtGui", ".", "QWidget", ">", "..", "}" ]
python
train
Xython/Linq.py
linq/standard/dict.py
https://github.com/Xython/Linq.py/blob/ffb65f92f1df0d8161d5f835f5947554f6f33d72/linq/standard/dict.py#L163-L180
def Drop(self: dict, n): """ [ { 'self': [1, 2, 3, 4, 5], 'n': 3, 'assert': lambda ret: list(ret) == [1, 2] } ] """ n = len(self) - n if n <= 0: yield from self.items() else: for i, e in enumerate(self.items()): if i == n: break yield e
[ "def", "Drop", "(", "self", ":", "dict", ",", "n", ")", ":", "n", "=", "len", "(", "self", ")", "-", "n", "if", "n", "<=", "0", ":", "yield", "from", "self", ".", "items", "(", ")", "else", ":", "for", "i", ",", "e", "in", "enumerate", "(",...
[ { 'self': [1, 2, 3, 4, 5], 'n': 3, 'assert': lambda ret: list(ret) == [1, 2] } ]
[ "[", "{", "self", ":", "[", "1", "2", "3", "4", "5", "]", "n", ":", "3", "assert", ":", "lambda", "ret", ":", "list", "(", "ret", ")", "==", "[", "1", "2", "]", "}", "]" ]
python
train
ansible/tower-cli
tower_cli/resources/job.py
https://github.com/ansible/tower-cli/blob/a2b151fed93c47725018d3034848cb3a1814bed7/tower_cli/resources/job.py#L79-L252
def launch(self, job_template=None, monitor=False, wait=False, timeout=None, no_input=True, extra_vars=None, **kwargs): """Launch a new job based on a job template. Creates a new job in Ansible Tower, immediately starts it, and returns back an ID in order for its status to be monitored. =====API DOCS===== Launch a new job based on a job template. :param job_template: Primary key or name of the job template to launch new job. :type job_template: str :param monitor: Flag that if set, immediately calls ``monitor`` on the newly launched job rather than exiting with a success. :type monitor: bool :param wait: Flag that if set, monitor the status of the job, but do not print while job is in progress. :type wait: bool :param timeout: If provided with ``monitor`` flag set, this attempt will time out after the given number of seconds. :type timeout: int :param no_input: Flag that if set, suppress any requests for input. :type no_input: bool :param extra_vars: yaml formatted texts that contains extra variables to pass on. :type extra_vars: array of strings :param diff_mode: Specify diff mode for job template to run. :type diff_mode: bool :param limit: Specify host limit for job template to run. :type limit: str :param tags: Specify tagged actions in the playbook to run. :type tags: str :param skip_tags: Specify tagged actions in the playbook to omit. :type skip_tags: str :param job_type: Specify job type for job template to run. :type job_type: str :param verbosity: Specify verbosity of the playbook run. :type verbosity: int :param inventory: Specify machine credential for job template to run. :type inventory: str :param credential: Specify machine credential for job template to run. :type credential: str :returns: Result of subsequent ``monitor`` call if ``monitor`` flag is on; Result of subsequent ``wait`` call if ``wait`` flag is on; Result of subsequent ``status`` call if none of the two flags are on. :rtype: dict =====API DOCS===== """ # Get the job template from Ansible Tower. # This is used as the baseline for starting the job. tags = kwargs.get('tags', None) jt_resource = get_resource('job_template') jt = jt_resource.get(job_template) # Update the job data by adding an automatically-generated job name, # and removing the ID. data = {} if tags: data['job_tags'] = tags # Initialize an extra_vars list that starts with the job template # preferences first, if they exist extra_vars_list = [] if 'extra_vars' in data and len(data['extra_vars']) > 0: # But only do this for versions before 2.3 debug.log('Getting version of Tower.', header='details') r = client.get('/config/') if LooseVersion(r.json()['version']) < LooseVersion('2.4'): extra_vars_list = [data['extra_vars']] # Add the runtime extra_vars to this list if extra_vars: extra_vars_list += list(extra_vars) # accept tuples # If the job template requires prompting for extra variables, # do so (unless --no-input is set). if jt.get('ask_variables_on_launch', False) and not no_input \ and not extra_vars: # If JT extra_vars are JSON, echo them to user as YAML initial = parser.process_extra_vars( [jt['extra_vars']], force_json=False ) initial = '\n'.join(( '# Specify extra variables (if any) here as YAML.', '# Lines beginning with "#" denote comments.', initial, )) extra_vars = click.edit(initial) or '' if extra_vars != initial: extra_vars_list = [extra_vars] # Data is starting out with JT variables, and we only want to # include extra_vars that come from the algorithm here. data.pop('extra_vars', None) # Replace/populate data fields if prompted. modified = set() for resource in PROMPT_LIST: if jt.pop('ask_' + resource + '_on_launch', False) and not no_input: resource_object = kwargs.get(resource, None) if type(resource_object) == types.Related: resource_class = get_resource(resource) resource_object = resource_class.get(resource).pop('id', None) if resource_object is None: debug.log('{0} is asked at launch but not provided'. format(resource), header='warning') elif resource != 'tags': data[resource] = resource_object modified.add(resource) # Dump extra_vars into JSON string for launching job if len(extra_vars_list) > 0: data['extra_vars'] = parser.process_extra_vars( extra_vars_list, force_json=True ) # Create the new job in Ansible Tower. start_data = {} endpoint = '/job_templates/%d/launch/' % jt['id'] if 'extra_vars' in data and len(data['extra_vars']) > 0: start_data['extra_vars'] = data['extra_vars'] if tags: start_data['job_tags'] = data['job_tags'] for resource in PROMPT_LIST: if resource in modified: start_data[resource] = data[resource] # There's a non-trivial chance that we are going to need some # additional information to start the job; in particular, many jobs # rely on passwords entered at run-time. # # If there are any such passwords on this job, ask for them now. debug.log('Asking for information necessary to start the job.', header='details') job_start_info = client.get(endpoint).json() for password in job_start_info.get('passwords_needed_to_start', []): start_data[password] = getpass('Password for %s: ' % password) # Actually start the job. debug.log('Launching the job.', header='details') self._pop_none(kwargs) kwargs.update(start_data) job_started = client.post(endpoint, data=kwargs) # Get the job ID from the result. job_id = job_started.json()['id'] # If returning json indicates any ignored fields, display it in # verbose mode. if job_started.text == '': ignored_fields = {} else: ignored_fields = job_started.json().get('ignored_fields', {}) has_ignored_fields = False for key, value in ignored_fields.items(): if value and value != '{}': if not has_ignored_fields: debug.log('List of ignored fields on the server side:', header='detail') has_ignored_fields = True debug.log('{0}: {1}'.format(key, value)) # Get some information about the running job to print result = self.status(pk=job_id, detail=True) result['changed'] = True # If we were told to monitor the job once it started, then call # monitor from here. if monitor: return self.monitor(job_id, timeout=timeout) elif wait: return self.wait(job_id, timeout=timeout) return result
[ "def", "launch", "(", "self", ",", "job_template", "=", "None", ",", "monitor", "=", "False", ",", "wait", "=", "False", ",", "timeout", "=", "None", ",", "no_input", "=", "True", ",", "extra_vars", "=", "None", ",", "*", "*", "kwargs", ")", ":", "...
Launch a new job based on a job template. Creates a new job in Ansible Tower, immediately starts it, and returns back an ID in order for its status to be monitored. =====API DOCS===== Launch a new job based on a job template. :param job_template: Primary key or name of the job template to launch new job. :type job_template: str :param monitor: Flag that if set, immediately calls ``monitor`` on the newly launched job rather than exiting with a success. :type monitor: bool :param wait: Flag that if set, monitor the status of the job, but do not print while job is in progress. :type wait: bool :param timeout: If provided with ``monitor`` flag set, this attempt will time out after the given number of seconds. :type timeout: int :param no_input: Flag that if set, suppress any requests for input. :type no_input: bool :param extra_vars: yaml formatted texts that contains extra variables to pass on. :type extra_vars: array of strings :param diff_mode: Specify diff mode for job template to run. :type diff_mode: bool :param limit: Specify host limit for job template to run. :type limit: str :param tags: Specify tagged actions in the playbook to run. :type tags: str :param skip_tags: Specify tagged actions in the playbook to omit. :type skip_tags: str :param job_type: Specify job type for job template to run. :type job_type: str :param verbosity: Specify verbosity of the playbook run. :type verbosity: int :param inventory: Specify machine credential for job template to run. :type inventory: str :param credential: Specify machine credential for job template to run. :type credential: str :returns: Result of subsequent ``monitor`` call if ``monitor`` flag is on; Result of subsequent ``wait`` call if ``wait`` flag is on; Result of subsequent ``status`` call if none of the two flags are on. :rtype: dict =====API DOCS=====
[ "Launch", "a", "new", "job", "based", "on", "a", "job", "template", "." ]
python
valid
riptano/ccm
ccmlib/node.py
https://github.com/riptano/ccm/blob/275699f79d102b5039b79cc17fa6305dccf18412/ccmlib/node.py#L451-L519
def watch_log_for(self, exprs, from_mark=None, timeout=600, process=None, verbose=False, filename='system.log'): """ Watch the log until one or more (regular) expression are found. This methods when all the expressions have been found or the method timeouts (a TimeoutError is then raised). On successful completion, a list of pair (line matched, match object) is returned. """ start = time.time() tofind = [exprs] if isinstance(exprs, string_types) else exprs tofind = [re.compile(e) for e in tofind] matchings = [] reads = "" if len(tofind) == 0: return None log_file = os.path.join(self.get_path(), 'logs', filename) output_read = False while not os.path.exists(log_file): time.sleep(.5) if start + timeout < time.time(): raise TimeoutError(time.strftime("%d %b %Y %H:%M:%S", time.gmtime()) + " [" + self.name + "] Timed out waiting for {} to be created.".format(log_file)) if process and not output_read: process.poll() if process.returncode is not None: self.print_process_output(self.name, process, verbose) output_read = True if process.returncode != 0: raise RuntimeError() # Shouldn't reuse RuntimeError but I'm lazy with open(log_file) as f: if from_mark: f.seek(from_mark) while True: # First, if we have a process to check, then check it. # Skip on Windows - stdout/stderr is cassandra.bat if not common.is_win() and not output_read: if process: process.poll() if process.returncode is not None: self.print_process_output(self.name, process, verbose) output_read = True if process.returncode != 0: raise RuntimeError() # Shouldn't reuse RuntimeError but I'm lazy line = f.readline() if line: reads = reads + line for e in tofind: m = e.search(line) if m: matchings.append((line, m)) tofind.remove(e) if len(tofind) == 0: return matchings[0] if isinstance(exprs, string_types) else matchings else: # yep, it's ugly time.sleep(1) if start + timeout < time.time(): raise TimeoutError(time.strftime("%d %b %Y %H:%M:%S", time.gmtime()) + " [" + self.name + "] Missing: " + str([e.pattern for e in tofind]) + ":\n" + reads[:50] + ".....\nSee {} for remainder".format(filename)) if process: if common.is_win(): if not self.is_running(): return None else: process.poll() if process.returncode == 0: return None
[ "def", "watch_log_for", "(", "self", ",", "exprs", ",", "from_mark", "=", "None", ",", "timeout", "=", "600", ",", "process", "=", "None", ",", "verbose", "=", "False", ",", "filename", "=", "'system.log'", ")", ":", "start", "=", "time", ".", "time", ...
Watch the log until one or more (regular) expression are found. This methods when all the expressions have been found or the method timeouts (a TimeoutError is then raised). On successful completion, a list of pair (line matched, match object) is returned.
[ "Watch", "the", "log", "until", "one", "or", "more", "(", "regular", ")", "expression", "are", "found", ".", "This", "methods", "when", "all", "the", "expressions", "have", "been", "found", "or", "the", "method", "timeouts", "(", "a", "TimeoutError", "is",...
python
train
a1ezzz/wasp-general
wasp_general/network/clients/webdav.py
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/network/clients/webdav.py#L136-L146
def change_directory(self, path, *args, **kwargs): """ :meth:`.WNetworkClientProto.change_directory` method implementation """ client = self.dav_client() previous_path = self.session_path() try: if client.is_dir(self.session_path(path)) is False: raise ValueError('Unable to change current working directory to non-directory entry') except Exception: self.session_path(previous_path) raise
[ "def", "change_directory", "(", "self", ",", "path", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "client", "=", "self", ".", "dav_client", "(", ")", "previous_path", "=", "self", ".", "session_path", "(", ")", "try", ":", "if", "client", "."...
:meth:`.WNetworkClientProto.change_directory` method implementation
[ ":", "meth", ":", ".", "WNetworkClientProto", ".", "change_directory", "method", "implementation" ]
python
train
holmes-app/holmes-alf
holmesalf/wrapper.py
https://github.com/holmes-app/holmes-alf/blob/4bf891831390ecfae818cf37d8ffc3a76fe9f1ec/holmesalf/wrapper.py#L37-L45
def async_client(self): """Asynchronous OAuth 2.0 Bearer client""" if not self._async_client: self._async_client = AlfAsyncClient( token_endpoint=self.config.get('OAUTH_TOKEN_ENDPOINT'), client_id=self.config.get('OAUTH_CLIENT_ID'), client_secret=self.config.get('OAUTH_CLIENT_SECRET') ) return self._async_client
[ "def", "async_client", "(", "self", ")", ":", "if", "not", "self", ".", "_async_client", ":", "self", ".", "_async_client", "=", "AlfAsyncClient", "(", "token_endpoint", "=", "self", ".", "config", ".", "get", "(", "'OAUTH_TOKEN_ENDPOINT'", ")", ",", "client...
Asynchronous OAuth 2.0 Bearer client
[ "Asynchronous", "OAuth", "2", ".", "0", "Bearer", "client" ]
python
train
kellerza/pyqwikswitch
pyqwikswitch/threaded.py
https://github.com/kellerza/pyqwikswitch/blob/9d4f080048221eaee93e3eefcf641919ff1af586/pyqwikswitch/threaded.py#L122-L135
def update_from_devices(self): """Retrieve a list of &devices and values.""" # _LOGGER.warning("update from devices") try: rest = requests.get(URL_DEVICES.format(self._url)) if rest.status_code != 200: _LOGGER.error("Devices returned %s", rest.status_code) return False self.devices.update_devices(rest.json()) return True except requests.exceptions.ConnectionError as conn_err: _LOGGER.error("Could not connect: %s", conn_err) except Exception as err: # pylint: disable=broad-except _LOGGER.error(err)
[ "def", "update_from_devices", "(", "self", ")", ":", "# _LOGGER.warning(\"update from devices\")", "try", ":", "rest", "=", "requests", ".", "get", "(", "URL_DEVICES", ".", "format", "(", "self", ".", "_url", ")", ")", "if", "rest", ".", "status_code", "!=", ...
Retrieve a list of &devices and values.
[ "Retrieve", "a", "list", "of", "&devices", "and", "values", "." ]
python
train
mitsei/dlkit
dlkit/json_/grading/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/grading/objects.py#L223-L235
def clear_input_score_start_range(self): """Clears the input score start. raise: NoAccess - ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.grading.GradeSystemForm.clear_lowest_numeric_score if (self.get_input_score_start_range_metadata().is_read_only() or self.get_input_score_start_range_metadata().is_required()): raise errors.NoAccess() self._my_map['inputScoreStartRange'] = self._input_score_start_range_default
[ "def", "clear_input_score_start_range", "(", "self", ")", ":", "# Implemented from template for osid.grading.GradeSystemForm.clear_lowest_numeric_score", "if", "(", "self", ".", "get_input_score_start_range_metadata", "(", ")", ".", "is_read_only", "(", ")", "or", "self", "."...
Clears the input score start. raise: NoAccess - ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.*
[ "Clears", "the", "input", "score", "start", "." ]
python
train
apple/turicreate
src/unity/python/turicreate/toolkits/recommender/util.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L1763-L1772
def _get_popularity_baseline(self): """ Returns a new popularity model matching the data set this model was trained with. Can be used for comparison purposes. """ response = self.__proxy__.get_popularity_baseline() from .popularity_recommender import PopularityRecommender return PopularityRecommender(response)
[ "def", "_get_popularity_baseline", "(", "self", ")", ":", "response", "=", "self", ".", "__proxy__", ".", "get_popularity_baseline", "(", ")", "from", ".", "popularity_recommender", "import", "PopularityRecommender", "return", "PopularityRecommender", "(", "response", ...
Returns a new popularity model matching the data set this model was trained with. Can be used for comparison purposes.
[ "Returns", "a", "new", "popularity", "model", "matching", "the", "data", "set", "this", "model", "was", "trained", "with", ".", "Can", "be", "used", "for", "comparison", "purposes", "." ]
python
train
StackStorm/pybind
pybind/nos/v7_2_0/interface/hundredgigabitethernet/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v7_2_0/interface/hundredgigabitethernet/__init__.py#L544-L567
def _set_protected_port(self, v, load=False): """ Setter method for protected_port, mapped from YANG variable /interface/hundredgigabitethernet/protected_port (container) If this variable is read-only (config: false) in the source YANG file, then _set_protected_port is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_protected_port() directly. YANG Description: Protected mode on a switchport """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=protected_port.protected_port, is_container='container', presence=False, yang_name="protected-port", rest_name="protected-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Protected mode on a switchport', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_MODE_SWITCHPORT_CONFIG', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """protected_port must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=protected_port.protected_port, is_container='container', presence=False, yang_name="protected-port", rest_name="protected-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Protected mode on a switchport', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_MODE_SWITCHPORT_CONFIG', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""", }) self.__protected_port = t if hasattr(self, '_set'): self._set()
[ "def", "_set_protected_port", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", ...
Setter method for protected_port, mapped from YANG variable /interface/hundredgigabitethernet/protected_port (container) If this variable is read-only (config: false) in the source YANG file, then _set_protected_port is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_protected_port() directly. YANG Description: Protected mode on a switchport
[ "Setter", "method", "for", "protected_port", "mapped", "from", "YANG", "variable", "/", "interface", "/", "hundredgigabitethernet", "/", "protected_port", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ...
python
train
carljm/django-adminfiles
adminfiles/listeners.py
https://github.com/carljm/django-adminfiles/blob/b01dc7be266305d575c11d5ff9a37ccac04a78c2/adminfiles/listeners.py#L15-L27
def _get_field(instance, field): """ This is here to support ``MarkupField``. It's a little ugly to have that support baked-in; other option would be to have a generic way (via setting?) to override how attribute values are fetched from content model instances. """ value = getattr(instance, field) if hasattr(value, 'raw'): value = value.raw return value
[ "def", "_get_field", "(", "instance", ",", "field", ")", ":", "value", "=", "getattr", "(", "instance", ",", "field", ")", "if", "hasattr", "(", "value", ",", "'raw'", ")", ":", "value", "=", "value", ".", "raw", "return", "value" ]
This is here to support ``MarkupField``. It's a little ugly to have that support baked-in; other option would be to have a generic way (via setting?) to override how attribute values are fetched from content model instances.
[ "This", "is", "here", "to", "support", "MarkupField", ".", "It", "s", "a", "little", "ugly", "to", "have", "that", "support", "baked", "-", "in", ";", "other", "option", "would", "be", "to", "have", "a", "generic", "way", "(", "via", "setting?", ")", ...
python
train
cloudsmith-io/cloudsmith-cli
cloudsmith_cli/core/ratelimits.py
https://github.com/cloudsmith-io/cloudsmith-cli/blob/5bc245ca5d0bfa85380be48e7c206b4c86cc6c8e/cloudsmith_cli/core/ratelimits.py#L85-L97
def maybe_rate_limit(client, headers, atexit=False): """Optionally pause the process based on suggested rate interval.""" # pylint: disable=fixme # pylint: disable=global-statement # FIXME: Yes, I know this is not great. We'll fix it later. :-) global LAST_CLIENT, LAST_HEADERS if LAST_CLIENT and LAST_HEADERS: # Wait based on previous client/headers rate_limit(LAST_CLIENT, LAST_HEADERS, atexit=atexit) LAST_CLIENT = copy.copy(client) LAST_HEADERS = copy.copy(headers)
[ "def", "maybe_rate_limit", "(", "client", ",", "headers", ",", "atexit", "=", "False", ")", ":", "# pylint: disable=fixme", "# pylint: disable=global-statement", "# FIXME: Yes, I know this is not great. We'll fix it later. :-)", "global", "LAST_CLIENT", ",", "LAST_HEADERS", "if...
Optionally pause the process based on suggested rate interval.
[ "Optionally", "pause", "the", "process", "based", "on", "suggested", "rate", "interval", "." ]
python
train
SKA-ScienceDataProcessor/integration-prototype
sip/examples/flask_processing_controller/app/old.db/mock/init.py
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/examples/flask_processing_controller/app/old.db/mock/init.py#L18-L23
def generate_scheduling_block_id(num_blocks, project='test'): """Generate a scheduling_block id""" _date = strftime("%Y%m%d", gmtime()) _project = project for i in range(num_blocks): yield '{}-{}-sbi{:03d}'.format(_date, _project, i)
[ "def", "generate_scheduling_block_id", "(", "num_blocks", ",", "project", "=", "'test'", ")", ":", "_date", "=", "strftime", "(", "\"%Y%m%d\"", ",", "gmtime", "(", ")", ")", "_project", "=", "project", "for", "i", "in", "range", "(", "num_blocks", ")", ":"...
Generate a scheduling_block id
[ "Generate", "a", "scheduling_block", "id" ]
python
train
baverman/supplement
supplement/remote.py
https://github.com/baverman/supplement/blob/955002fe5a5749c9f0d89002f0006ec4fcd35bc9/supplement/remote.py#L139-L158
def get_docstring(self, project_path, source, position, filename): """Return signature and docstring for current cursor call context Some examples of call context:: func(| func(arg| func(arg,| func(arg, func2(| # call context is func2 Signature and docstring can be None :param project_path: absolute project path :param source: unicode or byte string code source :param position: character or byte cursor position :param filename: absolute path of file with source code :returns: tuple (signarure, docstring) """ return self._call('get_docstring', project_path, source, position, filename)
[ "def", "get_docstring", "(", "self", ",", "project_path", ",", "source", ",", "position", ",", "filename", ")", ":", "return", "self", ".", "_call", "(", "'get_docstring'", ",", "project_path", ",", "source", ",", "position", ",", "filename", ")" ]
Return signature and docstring for current cursor call context Some examples of call context:: func(| func(arg| func(arg,| func(arg, func2(| # call context is func2 Signature and docstring can be None :param project_path: absolute project path :param source: unicode or byte string code source :param position: character or byte cursor position :param filename: absolute path of file with source code :returns: tuple (signarure, docstring)
[ "Return", "signature", "and", "docstring", "for", "current", "cursor", "call", "context" ]
python
train
MisterY/pydatum
pydatum/datum.py
https://github.com/MisterY/pydatum/blob/4b39f43040e31a95bcf219603b6429078a9ba3c2/pydatum/datum.py#L151-L155
def set_value(self, value: datetime): """ Sets the current value """ assert isinstance(value, datetime) self.value = value
[ "def", "set_value", "(", "self", ",", "value", ":", "datetime", ")", ":", "assert", "isinstance", "(", "value", ",", "datetime", ")", "self", ".", "value", "=", "value" ]
Sets the current value
[ "Sets", "the", "current", "value" ]
python
train
pypa/pipenv
pipenv/vendor/pyparsing.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pyparsing.py#L4846-L4889
def traceParseAction(f): """Decorator for debugging parse actions. When the parse action is called, this decorator will print ``">> entering method-name(line:<current_source_line>, <parse_location>, <matched_tokens>)"``. When the parse action completes, the decorator will print ``"<<"`` followed by the returned value, or any exception that the parse action raised. Example:: wd = Word(alphas) @traceParseAction def remove_duplicate_chars(tokens): return ''.join(sorted(set(''.join(tokens)))) wds = OneOrMore(wd).setParseAction(remove_duplicate_chars) print(wds.parseString("slkdjs sld sldd sdlf sdljf")) prints:: >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) <<leaving remove_duplicate_chars (ret: 'dfjkls') ['dfjkls'] """ f = _trim_arity(f) def z(*paArgs): thisFunc = f.__name__ s,l,t = paArgs[-3:] if len(paArgs)>3: thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) ) try: ret = f(*paArgs) except Exception as exc: sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) ) raise sys.stderr.write( "<<leaving %s (ret: %r)\n" % (thisFunc,ret) ) return ret try: z.__name__ = f.__name__ except AttributeError: pass return z
[ "def", "traceParseAction", "(", "f", ")", ":", "f", "=", "_trim_arity", "(", "f", ")", "def", "z", "(", "*", "paArgs", ")", ":", "thisFunc", "=", "f", ".", "__name__", "s", ",", "l", ",", "t", "=", "paArgs", "[", "-", "3", ":", "]", "if", "le...
Decorator for debugging parse actions. When the parse action is called, this decorator will print ``">> entering method-name(line:<current_source_line>, <parse_location>, <matched_tokens>)"``. When the parse action completes, the decorator will print ``"<<"`` followed by the returned value, or any exception that the parse action raised. Example:: wd = Word(alphas) @traceParseAction def remove_duplicate_chars(tokens): return ''.join(sorted(set(''.join(tokens)))) wds = OneOrMore(wd).setParseAction(remove_duplicate_chars) print(wds.parseString("slkdjs sld sldd sdlf sdljf")) prints:: >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) <<leaving remove_duplicate_chars (ret: 'dfjkls') ['dfjkls']
[ "Decorator", "for", "debugging", "parse", "actions", "." ]
python
train
ValvePython/steam
steam/game_servers.py
https://github.com/ValvePython/steam/blob/2de1364c47598410b572114e6129eab8fff71d5b/steam/game_servers.py#L452-L513
def a2s_players(server_addr, timeout=2, challenge=0): """Get list of players and their info :param server_addr: (ip, port) for the server :type server_addr: tuple :param timeout: (optional) timeout in seconds :type timeout: float :param challenge: (optional) challenge number :type challenge: int :raises: :class:`RuntimeError`, :class:`socket.timeout` :returns: a list of players :rtype: :class:`list` """ ss = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) ss.connect(server_addr) ss.settimeout(timeout) # request challenge number header = None if challenge in (-1, 0): ss.send(_pack('<lci', -1, b'U', challenge)) try: data = ss.recv(512) _, header, challenge = _unpack_from('<lcl', data) except: ss.close() raise if header not in b'AD': # work around for CSGO sending only max players raise RuntimeError("Unexpected challenge response - %s" % repr(header)) # request player info if header == b'D': # work around for CSGO sending only max players data = StructReader(data) else: ss.send(_pack('<lci', -1, b'U', challenge)) try: data = StructReader(_handle_a2s_response(ss)) finally: ss.close() header, num_players = data.unpack('<4xcB') if header != b'D': raise RuntimeError("Invalid reponse header - %s" % repr(header)) players = [] while len(players) < num_players: player = dict() player['index'] = data.unpack('<B')[0] player['name'] = data.read_cstring() player['score'], player['duration'] = data.unpack('<lf') players.append(player) if data.rlen() / 8 == num_players: # assume the ship server for player in players: player['deaths'], player['money'] = data.unpack('<ll') return players
[ "def", "a2s_players", "(", "server_addr", ",", "timeout", "=", "2", ",", "challenge", "=", "0", ")", ":", "ss", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_DGRAM", ")", "ss", ".", "connect", "(", "server_addr"...
Get list of players and their info :param server_addr: (ip, port) for the server :type server_addr: tuple :param timeout: (optional) timeout in seconds :type timeout: float :param challenge: (optional) challenge number :type challenge: int :raises: :class:`RuntimeError`, :class:`socket.timeout` :returns: a list of players :rtype: :class:`list`
[ "Get", "list", "of", "players", "and", "their", "info" ]
python
train
peterdemin/pip-compile-multi
pipcompilemulti/verify.py
https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/verify.py#L15-L40
def verify_environments(): """ For each environment verify hash comments and report failures. If any failure occured, exit with code 1. """ env_confs = discover( os.path.join( OPTIONS['base_dir'], '*.' + OPTIONS['in_ext'], ) ) success = True for conf in env_confs: env = Environment(name=conf['name']) current_comment = generate_hash_comment(env.infile) existing_comment = parse_hash_comment(env.outfile) if current_comment == existing_comment: logger.info("OK - %s was generated from %s.", env.outfile, env.infile) else: logger.error("ERROR! %s was not regenerated after changes in %s.", env.outfile, env.infile) logger.error("Expecting: %s", current_comment.strip()) logger.error("Found: %s", existing_comment.strip()) success = False return success
[ "def", "verify_environments", "(", ")", ":", "env_confs", "=", "discover", "(", "os", ".", "path", ".", "join", "(", "OPTIONS", "[", "'base_dir'", "]", ",", "'*.'", "+", "OPTIONS", "[", "'in_ext'", "]", ",", ")", ")", "success", "=", "True", "for", "...
For each environment verify hash comments and report failures. If any failure occured, exit with code 1.
[ "For", "each", "environment", "verify", "hash", "comments", "and", "report", "failures", ".", "If", "any", "failure", "occured", "exit", "with", "code", "1", "." ]
python
train
driftx/Telephus
telephus/cassandra/Cassandra.py
https://github.com/driftx/Telephus/blob/860a03a0fafe71605e1a4316dfdd8d0c29094703/telephus/cassandra/Cassandra.py#L1583-L1591
def trace_next_query(self, ): """ Enables tracing for the next query in this connection and returns the UUID for that trace session The next query will be traced idependently of trace probability and the returned UUID can be used to query the trace keyspace """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_trace_next_query() return d
[ "def", "trace_next_query", "(", "self", ",", ")", ":", "self", ".", "_seqid", "+=", "1", "d", "=", "self", ".", "_reqs", "[", "self", ".", "_seqid", "]", "=", "defer", ".", "Deferred", "(", ")", "self", ".", "send_trace_next_query", "(", ")", "return...
Enables tracing for the next query in this connection and returns the UUID for that trace session The next query will be traced idependently of trace probability and the returned UUID can be used to query the trace keyspace
[ "Enables", "tracing", "for", "the", "next", "query", "in", "this", "connection", "and", "returns", "the", "UUID", "for", "that", "trace", "session", "The", "next", "query", "will", "be", "traced", "idependently", "of", "trace", "probability", "and", "the", "...
python
train
Nic30/hwt
hwt/hdl/switchContainer.py
https://github.com/Nic30/hwt/blob/8cbb399e326da3b22c233b98188a9d08dec057e6/hwt/hdl/switchContainer.py#L163-L177
def _merge_with_other_stm(self, other: "IfContainer") -> None: """ Merge other statement to this statement """ merge = self._merge_statement_lists newCases = [] for (c, caseA), (_, caseB) in zip(self.cases, other.cases): newCases.append((c, merge(caseA, caseB))) self.cases = newCases if self.default is not None: self.default = merge(self.default, other.default) self._on_merge(other)
[ "def", "_merge_with_other_stm", "(", "self", ",", "other", ":", "\"IfContainer\"", ")", "->", "None", ":", "merge", "=", "self", ".", "_merge_statement_lists", "newCases", "=", "[", "]", "for", "(", "c", ",", "caseA", ")", ",", "(", "_", ",", "caseB", ...
Merge other statement to this statement
[ "Merge", "other", "statement", "to", "this", "statement" ]
python
test
MisterY/gnucash-portfolio
gnucash_portfolio/securitiesaggregate.py
https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/securitiesaggregate.py#L225-L234
def get_income_in_period(self, start: datetime, end: datetime) -> Decimal: """ Returns all income in the given period """ accounts = self.get_income_accounts() income = Decimal(0) for acct in accounts: acc_agg = AccountAggregate(self.book, acct) acc_bal = acc_agg.get_balance_in_period(start, end) income += acc_bal return income
[ "def", "get_income_in_period", "(", "self", ",", "start", ":", "datetime", ",", "end", ":", "datetime", ")", "->", "Decimal", ":", "accounts", "=", "self", ".", "get_income_accounts", "(", ")", "income", "=", "Decimal", "(", "0", ")", "for", "acct", "in"...
Returns all income in the given period
[ "Returns", "all", "income", "in", "the", "given", "period" ]
python
train
kmedian/illmat
illmat/illmat.py
https://github.com/kmedian/illmat/blob/2755e7c7da67378a0fa38ed24ba040dbf0f645bd/illmat/illmat.py#L5-L29
def illmat(D, random_state=None): """Generate a <D x D> ill-conditioned correlation matrix with random coefficients Parameters: ----------- D : int Dimension of the matrix Return: ------- cmat : ndarray DxD matrix with +1 as diagonal elements, mirrored random numbers [-1,+1]. """ if random_state: np.random.seed(random_state) uni = np.random.uniform(size=(D, D)) tmp = np.minimum(1., np.maximum(-1., 2 * uni - 1.0)) tmp = np.triu(tmp, k=1) return np.eye(D) + tmp + tmp.T
[ "def", "illmat", "(", "D", ",", "random_state", "=", "None", ")", ":", "if", "random_state", ":", "np", ".", "random", ".", "seed", "(", "random_state", ")", "uni", "=", "np", ".", "random", ".", "uniform", "(", "size", "=", "(", "D", ",", "D", "...
Generate a <D x D> ill-conditioned correlation matrix with random coefficients Parameters: ----------- D : int Dimension of the matrix Return: ------- cmat : ndarray DxD matrix with +1 as diagonal elements, mirrored random numbers [-1,+1].
[ "Generate", "a", "<D", "x", "D", ">", "ill", "-", "conditioned", "correlation", "matrix", "with", "random", "coefficients" ]
python
train
Gandi/gandi.cli
gandi/cli/core/params.py
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/core/params.py#L562-L583
def handle_parse_result(self, ctx, opts, args): """ Save value for this option in configuration if key/value pair doesn't already exist. Or old value in config was deprecated it needs to be updated to the new value format but the value keeps the same "meaning" """ gandi = ctx.obj needs_update = False value, args = click.Option.handle_parse_result(self, ctx, opts, args) if value is not None: previous_value = gandi.get(global_=True, key=self.name) if isinstance(self.type, GandiChoice): if value == previous_value: needs_update = True value = self.type.convert_deprecated_value(value) if not previous_value or needs_update: gandi.configure(global_=True, key=self.name, val=value) opts[self.name] = value value, args = click.Option.handle_parse_result(self, ctx, opts, args) return value, args
[ "def", "handle_parse_result", "(", "self", ",", "ctx", ",", "opts", ",", "args", ")", ":", "gandi", "=", "ctx", ".", "obj", "needs_update", "=", "False", "value", ",", "args", "=", "click", ".", "Option", ".", "handle_parse_result", "(", "self", ",", "...
Save value for this option in configuration if key/value pair doesn't already exist. Or old value in config was deprecated it needs to be updated to the new value format but the value keeps the same "meaning"
[ "Save", "value", "for", "this", "option", "in", "configuration", "if", "key", "/", "value", "pair", "doesn", "t", "already", "exist", ".", "Or", "old", "value", "in", "config", "was", "deprecated", "it", "needs", "to", "be", "updated", "to", "the", "new"...
python
train
Kronuz/pyScss
scss/extension/compass/images.py
https://github.com/Kronuz/pyScss/blob/fb32b317f6e2b4b4aad2b86a74844658ac4aa11e/scss/extension/compass/images.py#L210-L217
def image_url(path, only_path=False, cache_buster=True, dst_color=None, src_color=None, spacing=None, collapse_x=None, collapse_y=None): """ Generates a path to an asset found relative to the project's images directory. Passing a true value as the second argument will cause the only the path to be returned instead of a `url()` function """ return _image_url(path, only_path, cache_buster, dst_color, src_color, False, None, spacing, collapse_x, collapse_y)
[ "def", "image_url", "(", "path", ",", "only_path", "=", "False", ",", "cache_buster", "=", "True", ",", "dst_color", "=", "None", ",", "src_color", "=", "None", ",", "spacing", "=", "None", ",", "collapse_x", "=", "None", ",", "collapse_y", "=", "None", ...
Generates a path to an asset found relative to the project's images directory. Passing a true value as the second argument will cause the only the path to be returned instead of a `url()` function
[ "Generates", "a", "path", "to", "an", "asset", "found", "relative", "to", "the", "project", "s", "images", "directory", ".", "Passing", "a", "true", "value", "as", "the", "second", "argument", "will", "cause", "the", "only", "the", "path", "to", "be", "r...
python
train
f3at/feat
src/feat/common/run.py
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/common/run.py#L247-L258
def _ensure_dir(directory, description): """ Ensure the given directory exists, creating it if not. @raise errors.FatalError: if the directory could not be created. """ if not os.path.exists(directory): try: os.makedirs(directory) except OSError, e: sys.stderr.write("could not create %s directory %s: %s" % ( description, directory, str(e)))
[ "def", "_ensure_dir", "(", "directory", ",", "description", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "directory", ")", ":", "try", ":", "os", ".", "makedirs", "(", "directory", ")", "except", "OSError", ",", "e", ":", "sys", ".",...
Ensure the given directory exists, creating it if not. @raise errors.FatalError: if the directory could not be created.
[ "Ensure", "the", "given", "directory", "exists", "creating", "it", "if", "not", "." ]
python
train
rcarmo/pngcanvas
pngcanvas.py
https://github.com/rcarmo/pngcanvas/blob/e2eaa0d5ba353005b3b658f6ee453c1956340670/pngcanvas.py#L141-L144
def rectangle(self, x0, y0, x1, y1): """Draw a rectangle""" x0, y0, x1, y1 = self.rect_helper(x0, y0, x1, y1) self.polyline([[x0, y0], [x1, y0], [x1, y1], [x0, y1], [x0, y0]])
[ "def", "rectangle", "(", "self", ",", "x0", ",", "y0", ",", "x1", ",", "y1", ")", ":", "x0", ",", "y0", ",", "x1", ",", "y1", "=", "self", ".", "rect_helper", "(", "x0", ",", "y0", ",", "x1", ",", "y1", ")", "self", ".", "polyline", "(", "[...
Draw a rectangle
[ "Draw", "a", "rectangle" ]
python
train
polyaxon/polyaxon-cli
polyaxon_cli/cli/superuser.py
https://github.com/polyaxon/polyaxon-cli/blob/a7f5eed74d4d909cad79059f3c21c58606881449/polyaxon_cli/cli/superuser.py#L48-L66
def revoke(username): """Revoke superuser role to a user. Example: \b ```bash $ polyaxon superuser revoke david ``` """ try: PolyaxonClient().user.revoke_superuser(username) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not revoke superuser role from user `{}`.'.format(username)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success( "Superuser role was revoked successfully from user `{}`.".format(username))
[ "def", "revoke", "(", "username", ")", ":", "try", ":", "PolyaxonClient", "(", ")", ".", "user", ".", "revoke_superuser", "(", "username", ")", "except", "(", "PolyaxonHTTPError", ",", "PolyaxonShouldExitError", ",", "PolyaxonClientException", ")", "as", "e", ...
Revoke superuser role to a user. Example: \b ```bash $ polyaxon superuser revoke david ```
[ "Revoke", "superuser", "role", "to", "a", "user", "." ]
python
valid
edx/edx-oauth2-provider
edx_oauth2_provider/views.py
https://github.com/edx/edx-oauth2-provider/blob/73e7569a8369e74c345022ccba634365e24befab/edx_oauth2_provider/views.py#L155-L171
def encode_id_token(self, id_token): """ Return encoded ID token. """ # Encode the ID token using the `client_secret`. # # TODO: Using the `client_secret` is not ideal, since it is transmitted # over the wire in some authentication flows. A better alternative is # to use the public key of the issuer, which also allows the ID token to # be shared among clients. Doing so however adds some operational # costs. We should consider this for the future. secret = id_token.access_token.client.client_secret return id_token.encode(secret)
[ "def", "encode_id_token", "(", "self", ",", "id_token", ")", ":", "# Encode the ID token using the `client_secret`.", "#", "# TODO: Using the `client_secret` is not ideal, since it is transmitted", "# over the wire in some authentication flows. A better alternative is", "# to use the public...
Return encoded ID token.
[ "Return", "encoded", "ID", "token", "." ]
python
train
TheHive-Project/Cortex-Analyzers
analyzers/SoltraEdge/soltra.py
https://github.com/TheHive-Project/Cortex-Analyzers/blob/8dae6a8c4cf9af5554ae8c844985c4b44d4bd4bf/analyzers/SoltraEdge/soltra.py#L55-L68
def api_related(self, query): ''' Find related objects through SoltraEdge API ''' url = "{0}/{1}/related/?format=json".format(self.base_url, query) response = requests.get(url, headers=self.headers, verify=self.verify_ssl) if response.status_code == 200: return response.json() else: self.error('Received status code: {0} from Soltra Server. Content:\n{1}'.format( response.status_code, response.text) )
[ "def", "api_related", "(", "self", ",", "query", ")", ":", "url", "=", "\"{0}/{1}/related/?format=json\"", ".", "format", "(", "self", ".", "base_url", ",", "query", ")", "response", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "self", "...
Find related objects through SoltraEdge API
[ "Find", "related", "objects", "through", "SoltraEdge", "API" ]
python
train
ahopkins/sanic-jwt
sanic_jwt/authentication.py
https://github.com/ahopkins/sanic-jwt/blob/fca7750499c8cedde823d778512f613777fb5282/sanic_jwt/authentication.py#L117-L142
def _check_authentication(self, request, request_args, request_kwargs): """ Checks a request object to determine if that request contains a valid, and authenticated JWT. It returns a tuple: 1. Boolean whether the request is authenticated with a valid JWT 2. HTTP status code 3. Reasons (if any) for a potential authentication failure """ try: is_valid, status, reasons = self._verify( request, request_args=request_args, request_kwargs=request_kwargs, ) except Exception as e: logger.debug(e.args) if self.config.debug(): raise e args = e.args if isinstance(e, SanicJWTException) else [] raise exceptions.Unauthorized(*args) return is_valid, status, reasons
[ "def", "_check_authentication", "(", "self", ",", "request", ",", "request_args", ",", "request_kwargs", ")", ":", "try", ":", "is_valid", ",", "status", ",", "reasons", "=", "self", ".", "_verify", "(", "request", ",", "request_args", "=", "request_args", "...
Checks a request object to determine if that request contains a valid, and authenticated JWT. It returns a tuple: 1. Boolean whether the request is authenticated with a valid JWT 2. HTTP status code 3. Reasons (if any) for a potential authentication failure
[ "Checks", "a", "request", "object", "to", "determine", "if", "that", "request", "contains", "a", "valid", "and", "authenticated", "JWT", "." ]
python
train
ungarj/mapchete
mapchete/config.py
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/config.py#L557-L560
def process_file(self): """Deprecated.""" warnings.warn(DeprecationWarning("'self.process_file' is deprecated")) return os.path.join(self._raw["config_dir"], self._raw["process"])
[ "def", "process_file", "(", "self", ")", ":", "warnings", ".", "warn", "(", "DeprecationWarning", "(", "\"'self.process_file' is deprecated\"", ")", ")", "return", "os", ".", "path", ".", "join", "(", "self", ".", "_raw", "[", "\"config_dir\"", "]", ",", "se...
Deprecated.
[ "Deprecated", "." ]
python
valid
atlassian-api/atlassian-python-api
atlassian/confluence.py
https://github.com/atlassian-api/atlassian-python-api/blob/540d269905c3e7547b666fe30c647b2d512cf358/atlassian/confluence.py#L397-L405
def remove_content_history(self, page_id, version_number): """ Remove content history. It works as experimental method :param page_id: :param version_number: version number :return: """ url = 'rest/experimental/content/{id}/version/{versionNumber}'.format(id=page_id, versionNumber=version_number) self.delete(url)
[ "def", "remove_content_history", "(", "self", ",", "page_id", ",", "version_number", ")", ":", "url", "=", "'rest/experimental/content/{id}/version/{versionNumber}'", ".", "format", "(", "id", "=", "page_id", ",", "versionNumber", "=", "version_number", ")", "self", ...
Remove content history. It works as experimental method :param page_id: :param version_number: version number :return:
[ "Remove", "content", "history", ".", "It", "works", "as", "experimental", "method", ":", "param", "page_id", ":", ":", "param", "version_number", ":", "version", "number", ":", "return", ":" ]
python
train
Azure/blobxfer
blobxfer/operations/upload.py
https://github.com/Azure/blobxfer/blob/3eccbe7530cc6a20ab2d30f9e034b6f021817f34/blobxfer/operations/upload.py#L131-L143
def termination_check_md5(self): # type: (Uploader) -> bool """Check if terminated from MD5 context :param Uploader self: this :rtype: bool :return: if terminated from MD5 context """ with self._md5_meta_lock: with self._upload_lock: return (self._upload_terminate or (self._all_files_processed and len(self._md5_map) == 0 and len(self._upload_set) == 0))
[ "def", "termination_check_md5", "(", "self", ")", ":", "# type: (Uploader) -> bool", "with", "self", ".", "_md5_meta_lock", ":", "with", "self", ".", "_upload_lock", ":", "return", "(", "self", ".", "_upload_terminate", "or", "(", "self", ".", "_all_files_processe...
Check if terminated from MD5 context :param Uploader self: this :rtype: bool :return: if terminated from MD5 context
[ "Check", "if", "terminated", "from", "MD5", "context", ":", "param", "Uploader", "self", ":", "this", ":", "rtype", ":", "bool", ":", "return", ":", "if", "terminated", "from", "MD5", "context" ]
python
train
ashleysommer/sanicpluginsframework
spf/plugin.py
https://github.com/ashleysommer/sanicpluginsframework/blob/2cb1656d9334f04c30c738074784b0450c1b893e/spf/plugin.py#L107-L130
def route(self, uri, *args, **kwargs): """Create a plugin route from a decorated function. :param uri: endpoint at which the route will be accessible. :type uri: str :param args: captures all of the positional arguments passed in :type args: tuple(Any) :param kwargs: captures the keyword arguments passed in :type kwargs: dict(Any) :return: The exception function to use as the decorator :rtype: fn """ if len(args) == 0 and callable(uri): # pragma: no cover raise RuntimeError("Cannot use the @route decorator without " "arguments.") kwargs.setdefault('methods', frozenset({'GET'})) kwargs.setdefault('host', None) kwargs.setdefault('strict_slashes', False) kwargs.setdefault('stream', False) kwargs.setdefault('name', None) def wrapper(handler_f): self._routes.append(FutureRoute(handler_f, uri, args, kwargs)) return handler_f return wrapper
[ "def", "route", "(", "self", ",", "uri", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "len", "(", "args", ")", "==", "0", "and", "callable", "(", "uri", ")", ":", "# pragma: no cover", "raise", "RuntimeError", "(", "\"Cannot use the @rout...
Create a plugin route from a decorated function. :param uri: endpoint at which the route will be accessible. :type uri: str :param args: captures all of the positional arguments passed in :type args: tuple(Any) :param kwargs: captures the keyword arguments passed in :type kwargs: dict(Any) :return: The exception function to use as the decorator :rtype: fn
[ "Create", "a", "plugin", "route", "from", "a", "decorated", "function", ".", ":", "param", "uri", ":", "endpoint", "at", "which", "the", "route", "will", "be", "accessible", ".", ":", "type", "uri", ":", "str", ":", "param", "args", ":", "captures", "a...
python
train
twisted/mantissa
xmantissa/liveform.py
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/liveform.py#L1251-L1259
def options(self, request, tag): """ Render each of the options of the wrapped L{ChoiceParameter} instance. """ option = tag.patternGenerator('option') return tag[[ OptionView(index, o, option()) for (index, o) in enumerate(self.parameter.choices)]]
[ "def", "options", "(", "self", ",", "request", ",", "tag", ")", ":", "option", "=", "tag", ".", "patternGenerator", "(", "'option'", ")", "return", "tag", "[", "[", "OptionView", "(", "index", ",", "o", ",", "option", "(", ")", ")", "for", "(", "in...
Render each of the options of the wrapped L{ChoiceParameter} instance.
[ "Render", "each", "of", "the", "options", "of", "the", "wrapped", "L", "{", "ChoiceParameter", "}", "instance", "." ]
python
train
mapbox/mapbox-cli-py
mapboxcli/scripts/geocoding.py
https://github.com/mapbox/mapbox-cli-py/blob/b75544a2f83a4fda79d78b5673058e16e64a4f6d/mapboxcli/scripts/geocoding.py#L24-L31
def coords_from_query(query): """Transform a query line into a (lng, lat) pair of coordinates.""" try: coords = json.loads(query) except ValueError: vals = re.split(r'[,\s]+', query.strip()) coords = [float(v) for v in vals] return tuple(coords[:2])
[ "def", "coords_from_query", "(", "query", ")", ":", "try", ":", "coords", "=", "json", ".", "loads", "(", "query", ")", "except", "ValueError", ":", "vals", "=", "re", ".", "split", "(", "r'[,\\s]+'", ",", "query", ".", "strip", "(", ")", ")", "coord...
Transform a query line into a (lng, lat) pair of coordinates.
[ "Transform", "a", "query", "line", "into", "a", "(", "lng", "lat", ")", "pair", "of", "coordinates", "." ]
python
train
scanny/python-pptx
pptx/shapes/shapetree.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/shapes/shapetree.py#L843-L855
def new_movie_pic(cls, shapes, shape_id, movie_file, x, y, cx, cy, poster_frame_image, mime_type): """Return a new `p:pic` element containing video in *movie_file*. If *mime_type* is None, 'video/unknown' is used. If *poster_frame_file* is None, the default "media loudspeaker" image is used. """ return cls( shapes, shape_id, movie_file, x, y, cx, cy, poster_frame_image, mime_type )._pic return
[ "def", "new_movie_pic", "(", "cls", ",", "shapes", ",", "shape_id", ",", "movie_file", ",", "x", ",", "y", ",", "cx", ",", "cy", ",", "poster_frame_image", ",", "mime_type", ")", ":", "return", "cls", "(", "shapes", ",", "shape_id", ",", "movie_file", ...
Return a new `p:pic` element containing video in *movie_file*. If *mime_type* is None, 'video/unknown' is used. If *poster_frame_file* is None, the default "media loudspeaker" image is used.
[ "Return", "a", "new", "p", ":", "pic", "element", "containing", "video", "in", "*", "movie_file", "*", "." ]
python
train
SpriteLink/NIPAP
nipap/nipap/backend.py
https://github.com/SpriteLink/NIPAP/blob/f96069f11ab952d80b13cab06e0528f2d24b3de9/nipap/nipap/backend.py#L1625-L1695
def smart_search_vrf(self, auth, query_str, search_options=None, extra_query=None): """ Perform a smart search on VRF list. * `auth` [BaseAuth] AAA options. * `query_str` [string] Search string * `search_options` [options_dict] Search options. See :func:`search_vrf`. * `extra_query` [dict_to_sql] Extra search terms, will be AND:ed together with what is extracted from the query string. Return a dict with three elements: * :attr:`interpretation` - How the query string was interpreted. * :attr:`search_options` - Various search_options. * :attr:`result` - The search result. The :attr:`interpretation` is given as a list of dicts, each explaining how a part of the search key was interpreted (ie. what VRF attribute the search operation was performed on). The :attr:`result` is a list of dicts containing the search result. The smart search function tries to convert the query from a text string to a `query` dict which is passed to the :func:`search_vrf` function. If multiple search keys are detected, they are combined with a logical AND. It will basically just take each search term and try to match it against the name or description column with regex match or the VRF column with an exact match. See the :func:`search_vrf` function for an explanation of the `search_options` argument. This is the documentation of the internal backend function. It's exposed over XML-RPC, please also see the XML-RPC documentation for :py:func:`nipap.xmlrpc.NipapXMLRPC.smart_search_vrf` for full understanding. """ if search_options is None: search_options = {} self._logger.debug("smart_search_vrf query string: %s" % query_str) success, query = self._parse_vrf_query(query_str) if not success: return { 'interpretation': query, 'search_options': search_options, 'result': [], 'error': True, 'error_message': 'query interpretation failed' } if extra_query is not None: query = { 'operator': 'and', 'val1': query, 'val2': extra_query } self._logger.debug("smart_search_vrf; query expanded to: %s" % unicode(query)) search_result = self.search_vrf(auth, query, search_options) search_result['interpretation'] = query search_result['error'] = False return search_result
[ "def", "smart_search_vrf", "(", "self", ",", "auth", ",", "query_str", ",", "search_options", "=", "None", ",", "extra_query", "=", "None", ")", ":", "if", "search_options", "is", "None", ":", "search_options", "=", "{", "}", "self", ".", "_logger", ".", ...
Perform a smart search on VRF list. * `auth` [BaseAuth] AAA options. * `query_str` [string] Search string * `search_options` [options_dict] Search options. See :func:`search_vrf`. * `extra_query` [dict_to_sql] Extra search terms, will be AND:ed together with what is extracted from the query string. Return a dict with three elements: * :attr:`interpretation` - How the query string was interpreted. * :attr:`search_options` - Various search_options. * :attr:`result` - The search result. The :attr:`interpretation` is given as a list of dicts, each explaining how a part of the search key was interpreted (ie. what VRF attribute the search operation was performed on). The :attr:`result` is a list of dicts containing the search result. The smart search function tries to convert the query from a text string to a `query` dict which is passed to the :func:`search_vrf` function. If multiple search keys are detected, they are combined with a logical AND. It will basically just take each search term and try to match it against the name or description column with regex match or the VRF column with an exact match. See the :func:`search_vrf` function for an explanation of the `search_options` argument. This is the documentation of the internal backend function. It's exposed over XML-RPC, please also see the XML-RPC documentation for :py:func:`nipap.xmlrpc.NipapXMLRPC.smart_search_vrf` for full understanding.
[ "Perform", "a", "smart", "search", "on", "VRF", "list", "." ]
python
train
bokeh/bokeh
bokeh/colors/color.py
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/colors/color.py#L81-L94
def darken(self, amount): ''' Darken (reduce the luminance) of this color. Args: amount (float) : Amount to reduce the luminance by (clamped above zero) Returns: Color ''' hsl = self.to_hsl() hsl.l = self.clamp(hsl.l - amount) return self.from_hsl(hsl)
[ "def", "darken", "(", "self", ",", "amount", ")", ":", "hsl", "=", "self", ".", "to_hsl", "(", ")", "hsl", ".", "l", "=", "self", ".", "clamp", "(", "hsl", ".", "l", "-", "amount", ")", "return", "self", ".", "from_hsl", "(", "hsl", ")" ]
Darken (reduce the luminance) of this color. Args: amount (float) : Amount to reduce the luminance by (clamped above zero) Returns: Color
[ "Darken", "(", "reduce", "the", "luminance", ")", "of", "this", "color", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xchart/xchartrenderer.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xchart/xchartrenderer.py#L320-L369
def drawAxis(self, painter, rect, axis): """ Draws the axis for the given painter. :param painter | <QPainter> rect | <QRect> """ if not axis: return # draw the axis lines painter.save() pen = QPen(self.axisColor()) pen.setWidth(3) painter.setPen(pen) # draw the vertical line if axis.orientation() == Qt.Vertical: line = QLineF(rect.right(), rect.top(), rect.right(), rect.bottom()) painter.drawLine(line) painter.setFont(axis.labelFont()) for y, height, label in self._buildData.get('grid_h_labels', []): painter.drawText(0, y - height / 2.0, rect.width() - 3, height, Qt.AlignRight | Qt.AlignVCenter, label) painter.translate(0, rect.center().y()) painter.rotate(-90) painter.setFont(axis.titleFont()) painter.drawText(-rect.height()/2, 0, rect.height(), rect.width(), Qt.AlignHCenter | Qt.AlignTop, axis.title()) # draw the horizontal line else: line = QLineF(rect.left(), rect.top(), rect.right(), rect.top()) painter.setFont(axis.titleFont()) painter.drawText(rect, Qt.AlignHCenter | Qt.AlignBottom, axis.title()) painter.drawLine(line) painter.setFont(axis.labelFont()) for x, width, label in self._buildData.get('grid_v_labels', []): painter.drawText(x - width / 2.0, 3, width, rect.height() - 6, Qt.AlignHCenter | Qt.AlignTop, label) painter.restore()
[ "def", "drawAxis", "(", "self", ",", "painter", ",", "rect", ",", "axis", ")", ":", "if", "not", "axis", ":", "return", "# draw the axis lines\r", "painter", ".", "save", "(", ")", "pen", "=", "QPen", "(", "self", ".", "axisColor", "(", ")", ")", "pe...
Draws the axis for the given painter. :param painter | <QPainter> rect | <QRect>
[ "Draws", "the", "axis", "for", "the", "given", "painter", ".", ":", "param", "painter", "|", "<QPainter", ">", "rect", "|", "<QRect", ">" ]
python
train
secure-systems-lab/securesystemslib
securesystemslib/pyca_crypto_keys.py
https://github.com/secure-systems-lab/securesystemslib/blob/beb3109d5bb462e5a60eed88fb40ed1167bd354e/securesystemslib/pyca_crypto_keys.py#L360-L452
def verify_rsa_signature(signature, signature_scheme, public_key, data): """ <Purpose> Determine whether the corresponding private key of 'public_key' produced 'signature'. verify_signature() will use the public key, signature scheme, and 'data' to complete the verification. >>> public, private = generate_rsa_public_and_private(2048) >>> data = b'The quick brown fox jumps over the lazy dog' >>> scheme = 'rsassa-pss-sha256' >>> signature, scheme = create_rsa_signature(private, data, scheme) >>> verify_rsa_signature(signature, scheme, public, data) True >>> verify_rsa_signature(signature, scheme, public, b'bad_data') False <Arguments> signature: A signature, as a string. This is the signature returned by create_rsa_signature(). signature_scheme: A string that indicates the signature scheme used to generate 'signature'. 'rsassa-pss-sha256' is currently supported. public_key: The RSA public key, a string in PEM format. data: Data used by securesystemslib.keys.create_signature() to generate 'signature'. 'data' (a string) is needed here to verify 'signature'. <Exceptions> securesystemslib.exceptions.FormatError, if 'signature', 'signature_scheme', 'public_key', or 'data' are improperly formatted. securesystemslib.exceptions.UnsupportedAlgorithmError, if the signature scheme used by 'signature' is not one supported by securesystemslib.keys.create_signature(). securesystemslib.exceptions.CryptoError, if the private key cannot be decoded or its key type is unsupported. <Side Effects> pyca/cryptography's RSAPublicKey.verifier() called to do the actual verification. <Returns> Boolean. True if the signature is valid, False otherwise. """ # Does 'public_key' have the correct format? # This check will ensure 'public_key' conforms to # 'securesystemslib.formats.PEMRSA_SCHEMA'. Raise # 'securesystemslib.exceptions.FormatError' if the check fails. securesystemslib.formats.PEMRSA_SCHEMA.check_match(public_key) # Does 'signature_scheme' have the correct format? securesystemslib.formats.RSA_SCHEME_SCHEMA.check_match(signature_scheme) # Does 'signature' have the correct format? securesystemslib.formats.PYCACRYPTOSIGNATURE_SCHEMA.check_match(signature) # What about 'data'? securesystemslib.formats.DATA_SCHEMA.check_match(data) # Verify whether the private key of 'public_key' produced 'signature'. # Before returning the 'valid_signature' Boolean result, ensure 'RSASSA-PSS' # was used as the signature scheme. valid_signature = False # Verify the RSASSA-PSS signature with pyca/cryptography. try: public_key_object = serialization.load_pem_public_key(public_key.encode('utf-8'), backend=default_backend()) # verify() raises 'cryptography.exceptions.InvalidSignature' if the # signature is invalid. 'salt_length' is set to the digest size of the # hashing algorithm. try: public_key_object.verify(signature, data, padding.PSS(mgf=padding.MGF1(hashes.SHA256()), salt_length=hashes.SHA256().digest_size), hashes.SHA256()) return True except cryptography.exceptions.InvalidSignature: return False # Raised by load_pem_public_key(). except (ValueError, cryptography.exceptions.UnsupportedAlgorithm) as e: raise securesystemslib.exceptions.CryptoError('The PEM could not be' ' decoded successfully, or contained an unsupported key type: ' + str(e))
[ "def", "verify_rsa_signature", "(", "signature", ",", "signature_scheme", ",", "public_key", ",", "data", ")", ":", "# Does 'public_key' have the correct format?", "# This check will ensure 'public_key' conforms to", "# 'securesystemslib.formats.PEMRSA_SCHEMA'. Raise", "# 'securesyste...
<Purpose> Determine whether the corresponding private key of 'public_key' produced 'signature'. verify_signature() will use the public key, signature scheme, and 'data' to complete the verification. >>> public, private = generate_rsa_public_and_private(2048) >>> data = b'The quick brown fox jumps over the lazy dog' >>> scheme = 'rsassa-pss-sha256' >>> signature, scheme = create_rsa_signature(private, data, scheme) >>> verify_rsa_signature(signature, scheme, public, data) True >>> verify_rsa_signature(signature, scheme, public, b'bad_data') False <Arguments> signature: A signature, as a string. This is the signature returned by create_rsa_signature(). signature_scheme: A string that indicates the signature scheme used to generate 'signature'. 'rsassa-pss-sha256' is currently supported. public_key: The RSA public key, a string in PEM format. data: Data used by securesystemslib.keys.create_signature() to generate 'signature'. 'data' (a string) is needed here to verify 'signature'. <Exceptions> securesystemslib.exceptions.FormatError, if 'signature', 'signature_scheme', 'public_key', or 'data' are improperly formatted. securesystemslib.exceptions.UnsupportedAlgorithmError, if the signature scheme used by 'signature' is not one supported by securesystemslib.keys.create_signature(). securesystemslib.exceptions.CryptoError, if the private key cannot be decoded or its key type is unsupported. <Side Effects> pyca/cryptography's RSAPublicKey.verifier() called to do the actual verification. <Returns> Boolean. True if the signature is valid, False otherwise.
[ "<Purpose", ">", "Determine", "whether", "the", "corresponding", "private", "key", "of", "public_key", "produced", "signature", ".", "verify_signature", "()", "will", "use", "the", "public", "key", "signature", "scheme", "and", "data", "to", "complete", "the", "...
python
train
stormpath/stormpath-django
django_stormpath/backends.py
https://github.com/stormpath/stormpath-django/blob/af60eb5da2115d94ac313613c5d4e6b9f3d16157/django_stormpath/backends.py#L86-L106
def authenticate(self, username=None, password=None, **kwargs): """The authenticate method takes credentials as keyword arguments, usually username/email and password. Returns a user model if the Stormpath authentication was successful or None otherwise. It expects three variable to be defined in Django settings: \n STORMPATH_ID = "apiKeyId" \n STORMPATH_SECRET = "apiKeySecret" \n STORMPATH_APPLICATION = "https://api.stormpath.com/v1/applications/APP_UID" """ if username is None: UserModel = get_user_model() username = kwargs.get(UserModel.USERNAME_FIELD) account = self._stormpath_authenticate(username, password) if account is None: return None return self._create_or_get_user(account)
[ "def", "authenticate", "(", "self", ",", "username", "=", "None", ",", "password", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "username", "is", "None", ":", "UserModel", "=", "get_user_model", "(", ")", "username", "=", "kwargs", ".", "get",...
The authenticate method takes credentials as keyword arguments, usually username/email and password. Returns a user model if the Stormpath authentication was successful or None otherwise. It expects three variable to be defined in Django settings: \n STORMPATH_ID = "apiKeyId" \n STORMPATH_SECRET = "apiKeySecret" \n STORMPATH_APPLICATION = "https://api.stormpath.com/v1/applications/APP_UID"
[ "The", "authenticate", "method", "takes", "credentials", "as", "keyword", "arguments", "usually", "username", "/", "email", "and", "password", "." ]
python
train
ahmontero/dop
dop/client.py
https://github.com/ahmontero/dop/blob/40354ac6feefe92a7555fe2d1834138c9a03e518/dop/client.py#L532-L558
def transfer_image(self, image_id_or_slug, region_id): """ This method allows you to transfer an image to a specified region. Required parameters image_id: Numeric, this is the id of the image you would like to transfer. region_id Numeric, this is the id of the region to which you would like to transfer. """ if not image_id_or_slug: msg = 'image_id_or_slug is required to transfer an image!' raise DOPException(msg) if not region_id: raise DOPException('region_id is required to transfer an image!') params = {'region_id': region_id} json = self.request('/images/%s/transfer' % image_id_or_slug, method='GET', params=params) status = json.get('status') if status == 'OK': return json.get('event_id') else: message = json.get('message') raise DOPException('[%s]: %s' % (status, message))
[ "def", "transfer_image", "(", "self", ",", "image_id_or_slug", ",", "region_id", ")", ":", "if", "not", "image_id_or_slug", ":", "msg", "=", "'image_id_or_slug is required to transfer an image!'", "raise", "DOPException", "(", "msg", ")", "if", "not", "region_id", "...
This method allows you to transfer an image to a specified region. Required parameters image_id: Numeric, this is the id of the image you would like to transfer. region_id Numeric, this is the id of the region to which you would like to transfer.
[ "This", "method", "allows", "you", "to", "transfer", "an", "image", "to", "a", "specified", "region", "." ]
python
train
saltstack/salt
salt/states/redismod.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/redismod.py#L84-L117
def absent(name, keys=None, **connection_args): ''' Ensure key absent from redis name Key to ensure absent from redis keys list of keys to ensure absent, name will be ignored if this is used ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': 'Key(s) specified already absent'} if keys: if not isinstance(keys, list): ret['result'] = False ret['comment'] = '`keys` not formed as a list type' return ret delete_list = [key for key in keys if __salt__['redis.exists'](key, **connection_args)] if not delete_list: return ret __salt__['redis.delete'](*delete_list, **connection_args) ret['changes']['deleted'] = delete_list ret['comment'] = 'Keys deleted' return ret if __salt__['redis.exists'](name, **connection_args): __salt__['redis.delete'](name, **connection_args) ret['comment'] = 'Key deleted' ret['changes']['deleted'] = [name] return ret
[ "def", "absent", "(", "name", ",", "keys", "=", "None", ",", "*", "*", "connection_args", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "True", ",", "'comment'", ":", "'Key(s) specified already...
Ensure key absent from redis name Key to ensure absent from redis keys list of keys to ensure absent, name will be ignored if this is used
[ "Ensure", "key", "absent", "from", "redis" ]
python
train
ska-sa/montblanc
montblanc/util/__init__.py
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/util/__init__.py#L324-L352
def shape_from_str_tuple(sshape, variables, ignore=None): """ Substitutes string values in the supplied shape parameter with integer variables stored in a dictionary Parameters ---------- sshape : tuple/string composed of integers and strings. The strings should related to integral properties registered with this Solver object variables : dictionary Keys with associated integer values. Used to replace string values within the tuple ignore : list A list of tuple strings to ignore >>> print self.shape_from_str_tuple((4,'na','ntime'),ignore=['ntime']) (4, 3) """ if ignore is None: ignore = [] if not isinstance(sshape, tuple) and not isinstance(sshape, list): raise TypeError, 'sshape argument must be a tuple or list' if not isinstance(ignore, list): raise TypeError, 'ignore argument must be a list' return tuple([int(eval_expr(v,variables)) if isinstance(v,str) else int(v) for v in sshape if v not in ignore])
[ "def", "shape_from_str_tuple", "(", "sshape", ",", "variables", ",", "ignore", "=", "None", ")", ":", "if", "ignore", "is", "None", ":", "ignore", "=", "[", "]", "if", "not", "isinstance", "(", "sshape", ",", "tuple", ")", "and", "not", "isinstance", "...
Substitutes string values in the supplied shape parameter with integer variables stored in a dictionary Parameters ---------- sshape : tuple/string composed of integers and strings. The strings should related to integral properties registered with this Solver object variables : dictionary Keys with associated integer values. Used to replace string values within the tuple ignore : list A list of tuple strings to ignore >>> print self.shape_from_str_tuple((4,'na','ntime'),ignore=['ntime']) (4, 3)
[ "Substitutes", "string", "values", "in", "the", "supplied", "shape", "parameter", "with", "integer", "variables", "stored", "in", "a", "dictionary" ]
python
train
MediaFire/mediafire-python-open-sdk
examples/mediafire-cli.py
https://github.com/MediaFire/mediafire-python-open-sdk/blob/8f1f23db1b16f16e026f5c6777aec32d00baa05f/examples/mediafire-cli.py#L145-L273
def main(): # pylint: disable=too-many-statements """Main entry point""" parser = argparse.ArgumentParser(prog='mediafire-cli', description=__doc__) parser.add_argument('--debug', dest='debug', action='store_true', default=False, help='Enable debug output') parser.add_argument('--email', dest='email', required=False, default=os.environ.get('MEDIAFIRE_EMAIL', None)) parser.add_argument('--password', dest='password', required=False, default=os.environ.get('MEDIAFIRE_PASSWORD', None)) actions = parser.add_subparsers(title='Actions', dest='action') # http://bugs.python.org/issue9253#msg186387 actions.required = True # ls subparser = actions.add_parser('ls', help=do_ls.__doc__) subparser.add_argument('uri', nargs='?', help='MediaFire URI', default='mf:///') # file-upload subparser = actions.add_parser('file-upload', help=do_file_upload.__doc__) subparser.add_argument('paths', nargs='+', help='Path[s] to upload') subparser.add_argument('dest_uri', help='Destination MediaFire URI') # file-download subparser = actions.add_parser('file-download', help=do_file_download.__doc__) subparser.add_argument('uris', nargs='+', help='MediaFire File URI[s] to download') subparser.add_argument('dest_path', help='Destination path') # file-show subparser = actions.add_parser('file-show', help=do_file_show.__doc__) subparser.add_argument('uris', nargs='+', help='MediaFire File URI[s] to print out') # folder-create subparser = actions.add_parser('folder-create', help=do_folder_create.__doc__) subparser.add_argument('uris', nargs='+', help='MediaFire folder path URI[s]') # resource-delete subparser = actions.add_parser('resource-delete', help=do_resource_delete.__doc__) subparser.add_argument('uris', nargs='+', help='MediaFire resource URI[s]') subparser.add_argument('--purge', help="Purge, don't send to trash", dest="purge", action="store_true", default=False) # file-update-metadata subparser = actions.add_parser('file-update-metadata', help=do_file_update_metadata.__doc__) subparser.add_argument('uri', help='MediaFire file URI') subparser.add_argument('--filename', help='Set file name', default=None, dest='filename') subparser.add_argument('--privacy', help='Set file privacy', choices=['public', 'private'], default=None, dest='privacy') subparser.add_argument('--description', help='Set file description', dest='description', default=None) subparser.add_argument('--mtime', help="Set file modification time", dest='mtime', default=None) # folder-update-metadata subparser = actions.add_parser('folder-update-metadata', help=do_folder_update_metadata.__doc__) subparser.add_argument('uri', help='MediaFire folder URI') subparser.add_argument('--foldername', help='Set folder name', default=None, dest='foldername') subparser.add_argument('--privacy', help='Set folder privacy', choices=['public', 'private'], default=None, dest='privacy') subparser.add_argument('--recursive', help='Set privacy recursively', action='store_true', default=None, dest='recursive') subparser.add_argument('--description', help='Set folder description', dest='description', default=None) subparser.add_argument('--mtime', help='Set folder mtime', default=None, dest='mtime') # debug-get-resource subparser = actions.add_parser('debug-get-resource', help=do_debug_get_resource.__doc__) subparser.add_argument('uri', help='MediaFire resource URI', default='mediafire:/', nargs='?') args = parser.parse_args() if args.debug: logger = logging.getLogger() logger.setLevel(logging.DEBUG) logging.getLogger("mediafire.client").setLevel(logging.DEBUG) client = MediaFireClient() if args.email and args.password: client.login(args.email, args.password, app_id=APP_ID) router = { "file-upload": do_file_upload, "file-download": do_file_download, "file-show": do_file_show, "ls": do_ls, "folder-create": do_folder_create, "resource-delete": do_resource_delete, "file-update-metadata": do_file_update_metadata, "folder-update-metadata": do_folder_update_metadata, "debug-get-resource": do_debug_get_resource } if args.action in router: result = router[args.action](client, args) if not result: sys.exit(1) else: print('Unsupported action: {}'.format(args.action)) sys.exit(1)
[ "def", "main", "(", ")", ":", "# pylint: disable=too-many-statements", "parser", "=", "argparse", ".", "ArgumentParser", "(", "prog", "=", "'mediafire-cli'", ",", "description", "=", "__doc__", ")", "parser", ".", "add_argument", "(", "'--debug'", ",", "dest", "...
Main entry point
[ "Main", "entry", "point" ]
python
train
log2timeline/plaso
plaso/parsers/plist_plugins/install_history.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/plist_plugins/install_history.py#L25-L55
def GetEntries(self, parser_mediator, top_level=None, **unused_kwargs): """Extracts relevant install history entries. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. top_level (dict[str, object]): plist top-level key. """ for entry in top_level: datetime_value = entry.get('date', None) package_identifiers = entry.get('packageIdentifiers', []) if not datetime_value or not package_identifiers: continue display_name = entry.get('displayName', '<UNKNOWN>') display_version = entry.get('displayVersion', '<DISPLAY_VERSION>') process_name = entry.get('processName', '<PROCESS_NAME>') package_identifiers = ', '.join(package_identifiers) event_data = plist_event.PlistTimeEventData() event_data.desc = ( 'Installation of [{0:s} {1:s}] using [{2:s}]. Packages: ' '{3:s}.').format( display_name, display_version, process_name, package_identifiers) event_data.key = '' event_data.root = '/item' event = time_events.PythonDatetimeEvent( datetime_value, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
[ "def", "GetEntries", "(", "self", ",", "parser_mediator", ",", "top_level", "=", "None", ",", "*", "*", "unused_kwargs", ")", ":", "for", "entry", "in", "top_level", ":", "datetime_value", "=", "entry", ".", "get", "(", "'date'", ",", "None", ")", "packa...
Extracts relevant install history entries. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. top_level (dict[str, object]): plist top-level key.
[ "Extracts", "relevant", "install", "history", "entries", "." ]
python
train
spdx/tools-python
spdx/parsers/tagvalue.py
https://github.com/spdx/tools-python/blob/301d72f6ae57c832c1da7f6402fa49b192de6810/spdx/parsers/tagvalue.py#L451-L456
def p_file_cr_value_1(self, p): """file_cr_value : TEXT""" if six.PY2: p[0] = p[1].decode(encoding='utf-8') else: p[0] = p[1]
[ "def", "p_file_cr_value_1", "(", "self", ",", "p", ")", ":", "if", "six", ".", "PY2", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]", ".", "decode", "(", "encoding", "=", "'utf-8'", ")", "else", ":", "p", "[", "0", "]", "=", "p", "[", "1"...
file_cr_value : TEXT
[ "file_cr_value", ":", "TEXT" ]
python
valid
geoadmin/lib-gatilegrid
gatilegrid/tilegrids.py
https://github.com/geoadmin/lib-gatilegrid/blob/28e39cba22451f6ef0ddcb93cbc0838f06815505/gatilegrid/tilegrids.py#L206-L220
def tileBounds(self, zoom, tileCol, tileRow): "Returns the bounds of a tile in LV03 (EPSG:21781)" assert zoom in range(0, len(self.RESOLUTIONS)) # 0,0 at top left: y axis down and x axis right tileSize = self.tileSize(zoom) minX = self.MINX + tileCol * tileSize maxX = self.MINX + (tileCol + 1) * tileSize if self.originCorner == 'bottom-left': minY = self.MINY + tileRow * tileSize maxY = self.MINY + (tileRow + 1) * tileSize elif self.originCorner == 'top-left': minY = self.MAXY - (tileRow + 1) * tileSize maxY = self.MAXY - tileRow * tileSize return [minX, minY, maxX, maxY]
[ "def", "tileBounds", "(", "self", ",", "zoom", ",", "tileCol", ",", "tileRow", ")", ":", "assert", "zoom", "in", "range", "(", "0", ",", "len", "(", "self", ".", "RESOLUTIONS", ")", ")", "# 0,0 at top left: y axis down and x axis right", "tileSize", "=", "se...
Returns the bounds of a tile in LV03 (EPSG:21781)
[ "Returns", "the", "bounds", "of", "a", "tile", "in", "LV03", "(", "EPSG", ":", "21781", ")" ]
python
train
fedora-python/pyp2rpm
pyp2rpm/module_runners.py
https://github.com/fedora-python/pyp2rpm/blob/853eb3d226689a5ccdcdb9358b1a3394fafbd2b5/pyp2rpm/module_runners.py#L35-L42
def run(self): """Executes the code of the specified module.""" with utils.ChangeDir(self.dirname): sys.path.insert(0, self.dirname) sys.argv[1:] = self.args runpy.run_module(self.not_suffixed(self.filename), run_name='__main__', alter_sys=True)
[ "def", "run", "(", "self", ")", ":", "with", "utils", ".", "ChangeDir", "(", "self", ".", "dirname", ")", ":", "sys", ".", "path", ".", "insert", "(", "0", ",", "self", ".", "dirname", ")", "sys", ".", "argv", "[", "1", ":", "]", "=", "self", ...
Executes the code of the specified module.
[ "Executes", "the", "code", "of", "the", "specified", "module", "." ]
python
train
andycasey/sick
sick/clis/run.py
https://github.com/andycasey/sick/blob/6c37686182794c4cafea45abf7062b30b789b1a2/sick/clis/run.py#L202-L224
def _announce_theta(theta): """ Announce theta values to the log. """ c = 299792.458 # km/s is_a_redshift = lambda p: p == "z" or p[:2] == "z_" for parameter, value in theta.items(): try: value[0] except (IndexError, TypeError): message = "\t{0}: {1:.3f}".format(parameter, value) if is_a_redshift(parameter): message += " [{0:.1f} km/s]".format(value * c) else: # (MAP, u_pos, u_neg) message = "\t{0}: {1:.3f} ({2:+.3f}, {3:+.3f})".format(parameter, value[0], value[1], value[2]) if is_a_redshift(parameter): message += " [{0:.1f} ({1:+.1f}, {2:+.1f}) km/s]".format( value[0] * c, value[1] * c, value[2] * c) logger.info(message)
[ "def", "_announce_theta", "(", "theta", ")", ":", "c", "=", "299792.458", "# km/s", "is_a_redshift", "=", "lambda", "p", ":", "p", "==", "\"z\"", "or", "p", "[", ":", "2", "]", "==", "\"z_\"", "for", "parameter", ",", "value", "in", "theta", ".", "it...
Announce theta values to the log.
[ "Announce", "theta", "values", "to", "the", "log", "." ]
python
train
saltstack/salt
salt/utils/openstack/neutron.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/openstack/neutron.py#L450-L459
def create_router(self, name, ext_network=None, admin_state_up=True): ''' Creates a new router ''' body = {'name': name, 'admin_state_up': admin_state_up} if ext_network: net_id = self._find_network_id(ext_network) body['external_gateway_info'] = {'network_id': net_id} return self.network_conn.create_router(body={'router': body})
[ "def", "create_router", "(", "self", ",", "name", ",", "ext_network", "=", "None", ",", "admin_state_up", "=", "True", ")", ":", "body", "=", "{", "'name'", ":", "name", ",", "'admin_state_up'", ":", "admin_state_up", "}", "if", "ext_network", ":", "net_id...
Creates a new router
[ "Creates", "a", "new", "router" ]
python
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/job.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/job.py#L1859-L1875
def destination_uri_file_counts(self): """Return file counts from job statistics, if present. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.extract.destinationUriFileCounts Returns: a list of integer counts, each representing the number of files per destination URI or URI pattern specified in the extract configuration. These values will be in the same order as the URIs specified in the 'destinationUris' field. Returns None if job is not yet complete. """ counts = self._job_statistics().get("destinationUriFileCounts") if counts is not None: return [int(count) for count in counts] return None
[ "def", "destination_uri_file_counts", "(", "self", ")", ":", "counts", "=", "self", ".", "_job_statistics", "(", ")", ".", "get", "(", "\"destinationUriFileCounts\"", ")", "if", "counts", "is", "not", "None", ":", "return", "[", "int", "(", "count", ")", "...
Return file counts from job statistics, if present. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.extract.destinationUriFileCounts Returns: a list of integer counts, each representing the number of files per destination URI or URI pattern specified in the extract configuration. These values will be in the same order as the URIs specified in the 'destinationUris' field. Returns None if job is not yet complete.
[ "Return", "file", "counts", "from", "job", "statistics", "if", "present", "." ]
python
train
f3at/feat
src/feat/extern/log/log.py
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/extern/log/log.py#L557-L574
def addLogHandler(func): """ Add a custom log handler. @param func: a function object with prototype (level, object, category, message) where level is either ERROR, WARN, INFO, DEBUG, or LOG, and the rest of the arguments are strings or None. Use getLevelName(level) to get a printable name for the log level. @type func: a callable function @raises TypeError: if func is not a callable """ if not callable(func): raise TypeError("func must be callable") if func not in _log_handlers: _log_handlers.append(func)
[ "def", "addLogHandler", "(", "func", ")", ":", "if", "not", "callable", "(", "func", ")", ":", "raise", "TypeError", "(", "\"func must be callable\"", ")", "if", "func", "not", "in", "_log_handlers", ":", "_log_handlers", ".", "append", "(", "func", ")" ]
Add a custom log handler. @param func: a function object with prototype (level, object, category, message) where level is either ERROR, WARN, INFO, DEBUG, or LOG, and the rest of the arguments are strings or None. Use getLevelName(level) to get a printable name for the log level. @type func: a callable function @raises TypeError: if func is not a callable
[ "Add", "a", "custom", "log", "handler", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/pipeline/run_info.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/run_info.py#L402-L413
def _clean_characters(x): """Clean problem characters in sample lane or descriptions. """ if not isinstance(x, six.string_types): x = str(x) else: if not all(ord(char) < 128 for char in x): msg = "Found unicode character in input YAML (%s)" % (x) raise ValueError(repr(msg)) for problem in [" ", ".", "/", "\\", "[", "]", "&", ";", "#", "+", ":", ")", "("]: x = x.replace(problem, "_") return x
[ "def", "_clean_characters", "(", "x", ")", ":", "if", "not", "isinstance", "(", "x", ",", "six", ".", "string_types", ")", ":", "x", "=", "str", "(", "x", ")", "else", ":", "if", "not", "all", "(", "ord", "(", "char", ")", "<", "128", "for", "c...
Clean problem characters in sample lane or descriptions.
[ "Clean", "problem", "characters", "in", "sample", "lane", "or", "descriptions", "." ]
python
train
adamchainz/django-perf-rec
django_perf_rec/utils.py
https://github.com/adamchainz/django-perf-rec/blob/76a1874820b55bcbc2f95a85bbda3cb056584e2c/django_perf_rec/utils.py#L82-L98
def sorted_names(names): """ Sort a list of names but keep the word 'default' first if it's there. """ names = list(names) have_default = False if 'default' in names: names.remove('default') have_default = True sorted_names = sorted(names) if have_default: sorted_names = ['default'] + sorted_names return sorted_names
[ "def", "sorted_names", "(", "names", ")", ":", "names", "=", "list", "(", "names", ")", "have_default", "=", "False", "if", "'default'", "in", "names", ":", "names", ".", "remove", "(", "'default'", ")", "have_default", "=", "True", "sorted_names", "=", ...
Sort a list of names but keep the word 'default' first if it's there.
[ "Sort", "a", "list", "of", "names", "but", "keep", "the", "word", "default", "first", "if", "it", "s", "there", "." ]
python
train
codelv/enaml-native
src/enamlnative/android/android_spinner.py
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/android/android_spinner.py#L53-L63
def create_widget(self): """ Create the underlying label widget. """ d = self.declaration mode = 1 if d.mode == 'dropdown' else 0 self.widget = Spinner(self.get_context(), mode) # Create the adapter simple_spinner_item = 0x01090008 self.adapter = ArrayAdapter(self.get_context(), '@layout/simple_spinner_dropdown_item')
[ "def", "create_widget", "(", "self", ")", ":", "d", "=", "self", ".", "declaration", "mode", "=", "1", "if", "d", ".", "mode", "==", "'dropdown'", "else", "0", "self", ".", "widget", "=", "Spinner", "(", "self", ".", "get_context", "(", ")", ",", "...
Create the underlying label widget.
[ "Create", "the", "underlying", "label", "widget", "." ]
python
train
fake-name/ChromeController
ChromeController/Generator/Generated.py
https://github.com/fake-name/ChromeController/blob/914dd136184e8f1165c7aa6ef30418aaf10c61f0/ChromeController/Generator/Generated.py#L427-L451
def Page_deleteCookie(self, cookieName, url): """ Function path: Page.deleteCookie Domain: Page Method name: deleteCookie WARNING: This function is marked 'Experimental'! Parameters: Required arguments: 'cookieName' (type: string) -> Name of the cookie to remove. 'url' (type: string) -> URL to match cooke domain and path. No return value. Description: Deletes browser cookie with given name, domain and path. """ assert isinstance(cookieName, (str,) ), "Argument 'cookieName' must be of type '['str']'. Received type: '%s'" % type( cookieName) assert isinstance(url, (str,) ), "Argument 'url' must be of type '['str']'. Received type: '%s'" % type( url) subdom_funcs = self.synchronous_command('Page.deleteCookie', cookieName= cookieName, url=url) return subdom_funcs
[ "def", "Page_deleteCookie", "(", "self", ",", "cookieName", ",", "url", ")", ":", "assert", "isinstance", "(", "cookieName", ",", "(", "str", ",", ")", ")", ",", "\"Argument 'cookieName' must be of type '['str']'. Received type: '%s'\"", "%", "type", "(", "cookieNam...
Function path: Page.deleteCookie Domain: Page Method name: deleteCookie WARNING: This function is marked 'Experimental'! Parameters: Required arguments: 'cookieName' (type: string) -> Name of the cookie to remove. 'url' (type: string) -> URL to match cooke domain and path. No return value. Description: Deletes browser cookie with given name, domain and path.
[ "Function", "path", ":", "Page", ".", "deleteCookie", "Domain", ":", "Page", "Method", "name", ":", "deleteCookie", "WARNING", ":", "This", "function", "is", "marked", "Experimental", "!", "Parameters", ":", "Required", "arguments", ":", "cookieName", "(", "ty...
python
train
geertj/gruvi
lib/gruvi/http.py
https://github.com/geertj/gruvi/blob/1d77ca439600b6ea7a19aa1ee85dca0f3be3f3f8/lib/gruvi/http.py#L303-L327
def parse_url(url, default_scheme='http', is_connect=False): """Parse an URL and return its components. The *default_scheme* argument specifies the scheme in case URL is an otherwise valid absolute URL but with a missing scheme. The *is_connect* argument must be set to ``True`` if the URL was requested with the HTTP CONNECT method. These URLs have a different form and need to be parsed differently. The result is a :class:`ParsedUrl` containing the URL components. """ # If this is not in origin-form, authority-form or asterisk-form and no # scheme is present, assume it's in absolute-form with a missing scheme. # See RFC7230 section 5.3. if url[:1] not in '*/' and not is_connect and '://' not in url: url = '{}://{}'.format(default_scheme, url) burl = s2b(url) parser = ffi.new('struct http_parser_url *') lib.http_parser_url_init(parser) res = lib.http_parser_parse_url(ffi.from_buffer(burl), len(burl), is_connect, parser) if res != 0: raise ValueError('invalid URL') parsed = ParsedUrl.from_parser(parser, url) return parsed
[ "def", "parse_url", "(", "url", ",", "default_scheme", "=", "'http'", ",", "is_connect", "=", "False", ")", ":", "# If this is not in origin-form, authority-form or asterisk-form and no", "# scheme is present, assume it's in absolute-form with a missing scheme.", "# See RFC7230 secti...
Parse an URL and return its components. The *default_scheme* argument specifies the scheme in case URL is an otherwise valid absolute URL but with a missing scheme. The *is_connect* argument must be set to ``True`` if the URL was requested with the HTTP CONNECT method. These URLs have a different form and need to be parsed differently. The result is a :class:`ParsedUrl` containing the URL components.
[ "Parse", "an", "URL", "and", "return", "its", "components", "." ]
python
train
marcomusy/vtkplotter
vtkplotter/colors.py
https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/colors.py#L386-L413
def makeLUTfromCTF(sclist, N=None): """ Use a Color Transfer Function to generate colors in a vtk lookup table. See `here <http://www.vtk.org/doc/nightly/html/classvtkColorTransferFunction.html>`_. :param list sclist: a list in the form ``[(scalar1, [r,g,b]), (scalar2, 'blue'), ...]``. :return: the lookup table object ``vtkLookupTable``. This can be fed into ``colorMap``. """ ctf = vtk.vtkColorTransferFunction() ctf.SetColorSpaceToDiverging() for sc in sclist: scalar, col = sc r, g, b = getColor(col) ctf.AddRGBPoint(scalar, r, g, b) if N is None: N = len(sclist) lut = vtk.vtkLookupTable() lut.SetNumberOfTableValues(N) lut.Build() for i in range(N): rgb = list(ctf.GetColor(float(i) / N)) + [1] lut.SetTableValue(i, rgb) return lut
[ "def", "makeLUTfromCTF", "(", "sclist", ",", "N", "=", "None", ")", ":", "ctf", "=", "vtk", ".", "vtkColorTransferFunction", "(", ")", "ctf", ".", "SetColorSpaceToDiverging", "(", ")", "for", "sc", "in", "sclist", ":", "scalar", ",", "col", "=", "sc", ...
Use a Color Transfer Function to generate colors in a vtk lookup table. See `here <http://www.vtk.org/doc/nightly/html/classvtkColorTransferFunction.html>`_. :param list sclist: a list in the form ``[(scalar1, [r,g,b]), (scalar2, 'blue'), ...]``. :return: the lookup table object ``vtkLookupTable``. This can be fed into ``colorMap``.
[ "Use", "a", "Color", "Transfer", "Function", "to", "generate", "colors", "in", "a", "vtk", "lookup", "table", ".", "See", "here", "<http", ":", "//", "www", ".", "vtk", ".", "org", "/", "doc", "/", "nightly", "/", "html", "/", "classvtkColorTransferFunct...
python
train
pyusb/pyusb
usb/legacy.py
https://github.com/pyusb/pyusb/blob/ffe6faf42c6ad273880b0b464b9bbf44c1d4b2e9/usb/legacy.py#L220-L223
def releaseInterface(self): r"""Release an interface previously claimed with claimInterface.""" util.release_interface(self.dev, self.__claimed_interface) self.__claimed_interface = -1
[ "def", "releaseInterface", "(", "self", ")", ":", "util", ".", "release_interface", "(", "self", ".", "dev", ",", "self", ".", "__claimed_interface", ")", "self", ".", "__claimed_interface", "=", "-", "1" ]
r"""Release an interface previously claimed with claimInterface.
[ "r", "Release", "an", "interface", "previously", "claimed", "with", "claimInterface", "." ]
python
train
vmware/pyvmomi
pyVmomi/DynamicTypeManagerHelper.py
https://github.com/vmware/pyvmomi/blob/3ffcb23bf77d757175c0d5216ba9a25345d824cd/pyVmomi/DynamicTypeManagerHelper.py#L227-L242
def _ConvertManagedType(self, managedType): """ Convert vmodl.reflect.DynamicTypeManager.ManagedTypeInfo to pyVmomi managed type definition """ if managedType: vmodlName = managedType.name wsdlName = managedType.wsdlName version = managedType.version parent = managedType.base[0] props = self._Filter(self._ConvertManagedPropertyType, managedType.property) methods = self._Filter(self._ConvertMethodType, managedType.method) moType = (vmodlName, wsdlName, parent, version, props, methods) else: moType = None return moType
[ "def", "_ConvertManagedType", "(", "self", ",", "managedType", ")", ":", "if", "managedType", ":", "vmodlName", "=", "managedType", ".", "name", "wsdlName", "=", "managedType", ".", "wsdlName", "version", "=", "managedType", ".", "version", "parent", "=", "man...
Convert vmodl.reflect.DynamicTypeManager.ManagedTypeInfo to pyVmomi managed type definition
[ "Convert", "vmodl", ".", "reflect", ".", "DynamicTypeManager", ".", "ManagedTypeInfo", "to", "pyVmomi", "managed", "type", "definition" ]
python
train
thumbor/thumbor
thumbor/url_composer.py
https://github.com/thumbor/thumbor/blob/558ccdd6e3bc29e1c9ee3687372c4b3eb05ac607/thumbor/url_composer.py#L159-L183
def main(arguments=None): '''Converts a given url with the specified arguments.''' parsed_options, arguments = get_options(arguments) image_url = arguments[0] image_url = quote(image_url) try: config = Config.load(None) except Exception: config = None if not parsed_options.key and not config: sys.stdout.write('Error: The -k or --key argument is mandatory. For more information type thumbor-url -h\n') return security_key, thumbor_params = get_thumbor_params(image_url, parsed_options, config) crypto = CryptoURL(key=security_key) url = crypto.generate(**thumbor_params) sys.stdout.write('URL:\n') sys.stdout.write('%s\n' % url) return url
[ "def", "main", "(", "arguments", "=", "None", ")", ":", "parsed_options", ",", "arguments", "=", "get_options", "(", "arguments", ")", "image_url", "=", "arguments", "[", "0", "]", "image_url", "=", "quote", "(", "image_url", ")", "try", ":", "config", "...
Converts a given url with the specified arguments.
[ "Converts", "a", "given", "url", "with", "the", "specified", "arguments", "." ]
python
train
LonamiWebs/Telethon
telethon_generator/generators/docs.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon_generator/generators/docs.py#L28-L33
def get_import_code(tlobject): """``TLObject -> from ... import ...``.""" kind = 'functions' if tlobject.is_function else 'types' ns = '.' + tlobject.namespace if tlobject.namespace else '' return 'from telethon.tl.{}{} import {}'\ .format(kind, ns, tlobject.class_name)
[ "def", "get_import_code", "(", "tlobject", ")", ":", "kind", "=", "'functions'", "if", "tlobject", ".", "is_function", "else", "'types'", "ns", "=", "'.'", "+", "tlobject", ".", "namespace", "if", "tlobject", ".", "namespace", "else", "''", "return", "'from ...
``TLObject -> from ... import ...``.
[ "TLObject", "-", ">", "from", "...", "import", "...", "." ]
python
train
cox-labs/perseuspy
perseuspy/io/perseus/matrix.py
https://github.com/cox-labs/perseuspy/blob/3809c1bd46512605f9e7ca7f97e026e4940ed604/perseuspy/io/perseus/matrix.py#L105-L147
def to_perseus(df, path_or_file, main_columns=None, separator=separator, convert_bool_to_category=True, numerical_annotation_rows = set([])): """ Save pd.DataFrame to Perseus text format. :param df: pd.DataFrame. :param path_or_file: File name or file-like object. :param main_columns: Main columns. Will be infered if set to None. All numeric columns up-until the first non-numeric column are considered main columns. :param separator: For separating fields, default='\t'. :param covert_bool_to_category: Convert bool columns of True/False to category columns '+'/'', default=True. :param numerical_annotation_rows: Set of column names to be interpreted as numerical annotation rows, default=set([]). """ _df = df.copy() if not _df.columns.name: _df.columns.name = 'Column Name' column_names = _df.columns.get_level_values('Column Name') annotations = {} main_columns = _infer_main_columns(_df) if main_columns is None else main_columns annotations['Type'] = ['E' if column_names[i] in main_columns else dtype_to_perseus(dtype) for i, dtype in enumerate(_df.dtypes)] # detect multi-numeric columns for i, column in enumerate(_df.columns): valid_values = [value for value in _df[column] if value is not None] if len(valid_values) > 0 and all(type(value) is list for value in valid_values): annotations['Type'][i] = 'M' _df[column] = _df[column].apply(lambda xs: ';'.join(str(x) for x in xs)) if convert_bool_to_category: for i, column in enumerate(_df.columns): if _df.dtypes[i] is np.dtype('bool'): values = _df[column].values _df[column][values] = '+' _df[column][~values] = '' annotation_row_names = set(_df.columns.names) - {'Column Name'} for name in annotation_row_names: annotation_type = 'N' if name in numerical_annotation_rows else 'C' annotations['{}:{}'.format(annotation_type, name)] = _df.columns.get_level_values(name) with PathOrFile(path_or_file, 'w') as f: f.write(separator.join(column_names) + '\n') for name, values in annotations.items(): f.write('#!{{{name}}}{values}\n'.format(name=name, values=separator.join([str(x) for x in values]))) _df.to_csv(f, header=None, index=False, sep=separator)
[ "def", "to_perseus", "(", "df", ",", "path_or_file", ",", "main_columns", "=", "None", ",", "separator", "=", "separator", ",", "convert_bool_to_category", "=", "True", ",", "numerical_annotation_rows", "=", "set", "(", "[", "]", ")", ")", ":", "_df", "=", ...
Save pd.DataFrame to Perseus text format. :param df: pd.DataFrame. :param path_or_file: File name or file-like object. :param main_columns: Main columns. Will be infered if set to None. All numeric columns up-until the first non-numeric column are considered main columns. :param separator: For separating fields, default='\t'. :param covert_bool_to_category: Convert bool columns of True/False to category columns '+'/'', default=True. :param numerical_annotation_rows: Set of column names to be interpreted as numerical annotation rows, default=set([]).
[ "Save", "pd", ".", "DataFrame", "to", "Perseus", "text", "format", "." ]
python
train
timofurrer/observable
observable/core.py
https://github.com/timofurrer/observable/blob/a6a764efaf9408a334bdb1ddf4327d9dbc4b8eaa/observable/core.py#L53-L57
def is_registered(self, event: str, handler: T.Callable) -> bool: """Returns whether the given handler is registered for the given event.""" return handler in self._events.get(event, [])
[ "def", "is_registered", "(", "self", ",", "event", ":", "str", ",", "handler", ":", "T", ".", "Callable", ")", "->", "bool", ":", "return", "handler", "in", "self", ".", "_events", ".", "get", "(", "event", ",", "[", "]", ")" ]
Returns whether the given handler is registered for the given event.
[ "Returns", "whether", "the", "given", "handler", "is", "registered", "for", "the", "given", "event", "." ]
python
train
gesellkammer/sndfileio
sndfileio/resampling.py
https://github.com/gesellkammer/sndfileio/blob/8e2b264cadb652f09d2e775f54090c0a3cb2ced2/sndfileio/resampling.py#L191-L213
def _resample_obspy(samples, sr, newsr, window='hanning', lowpass=True): # type: (np.ndarray, int, int, str, bool) -> np.ndarray """ Resample using Fourier method. The same as resample_scipy but with low-pass filtering for upsampling """ from scipy.signal import resample from math import ceil factor = sr/float(newsr) if newsr < sr and lowpass: # be sure filter still behaves good if factor > 16: logger.info("Automatic filter design is unstable for resampling " "factors (current sampling rate/new sampling rate) " "above 16. Manual resampling is necessary.") freq = min(sr, newsr) * 0.5 / float(factor) logger.debug(f"resample_obspy: lowpass {freq}") samples = lowpass_cheby2(samples, freq=freq, sr=sr, maxorder=12) num = int(ceil(len(samples) / factor)) return _applyMultichan(samples, lambda S: resample(S, num, window=window))
[ "def", "_resample_obspy", "(", "samples", ",", "sr", ",", "newsr", ",", "window", "=", "'hanning'", ",", "lowpass", "=", "True", ")", ":", "# type: (np.ndarray, int, int, str, bool) -> np.ndarray", "from", "scipy", ".", "signal", "import", "resample", "from", "mat...
Resample using Fourier method. The same as resample_scipy but with low-pass filtering for upsampling
[ "Resample", "using", "Fourier", "method", ".", "The", "same", "as", "resample_scipy", "but", "with", "low", "-", "pass", "filtering", "for", "upsampling" ]
python
train
annoviko/pyclustering
pyclustering/nnet/examples/syncpr_examples.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/nnet/examples/syncpr_examples.py#L106-L116
def small_ftk_image_recognition(): """! @brief Trains network using letters 'F', 'T', 'K' and recognize each of them with and without noise. """ images = []; images += IMAGE_SYMBOL_SAMPLES.LIST_IMAGES_SYMBOL_F; images += IMAGE_SYMBOL_SAMPLES.LIST_IMAGES_SYMBOL_T; images += IMAGE_SYMBOL_SAMPLES.LIST_IMAGES_SYMBOL_K; template_recognition_image(images, 100, 10, 0.2);
[ "def", "small_ftk_image_recognition", "(", ")", ":", "images", "=", "[", "]", "images", "+=", "IMAGE_SYMBOL_SAMPLES", ".", "LIST_IMAGES_SYMBOL_F", "images", "+=", "IMAGE_SYMBOL_SAMPLES", ".", "LIST_IMAGES_SYMBOL_T", "images", "+=", "IMAGE_SYMBOL_SAMPLES", ".", "LIST_IMA...
! @brief Trains network using letters 'F', 'T', 'K' and recognize each of them with and without noise.
[ "!" ]
python
valid
lsbardel/python-stdnet
stdnet/odm/struct.py
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/struct.py#L588-L592
def pop_back(self): '''Remove the last element from the :class:`Sequence`.''' backend = self.backend return backend.execute(backend.structure(self).pop_back(), self.value_pickler.loads)
[ "def", "pop_back", "(", "self", ")", ":", "backend", "=", "self", ".", "backend", "return", "backend", ".", "execute", "(", "backend", ".", "structure", "(", "self", ")", ".", "pop_back", "(", ")", ",", "self", ".", "value_pickler", ".", "loads", ")" ]
Remove the last element from the :class:`Sequence`.
[ "Remove", "the", "last", "element", "from", "the", ":", "class", ":", "Sequence", "." ]
python
train
eykd/paved
paved/pycheck.py
https://github.com/eykd/paved/blob/f04f8a4248c571f3d5ce882b325884a3e5d80203/paved/pycheck.py#L37-L71
def sloccount(): '''Print "Source Lines of Code" and export to file. Export is hudson_ plugin_ compatible: sloccount.sc requirements: - sloccount_ should be installed. - tee and pipes are used options.paved.pycheck.sloccount.param .. _sloccount: http://www.dwheeler.com/sloccount/ .. _hudson: http://hudson-ci.org/ .. _plugin: http://wiki.hudson-ci.org/display/HUDSON/SLOCCount+Plugin ''' # filter out subpackages setup = options.get('setup') packages = options.get('packages') if setup else None if packages: dirs = [x for x in packages if '.' not in x] else: dirs = ['.'] # sloccount has strange behaviour with directories, # can cause exception in hudson sloccount plugin. # Better to call it with file list ls=[] for d in dirs: ls += list(path(d).walkfiles()) #ls=list(set(ls)) files=' '.join(ls) param=options.paved.pycheck.sloccount.param sh('sloccount {param} {files} | tee sloccount.sc'.format(param=param, files=files))
[ "def", "sloccount", "(", ")", ":", "# filter out subpackages", "setup", "=", "options", ".", "get", "(", "'setup'", ")", "packages", "=", "options", ".", "get", "(", "'packages'", ")", "if", "setup", "else", "None", "if", "packages", ":", "dirs", "=", "...
Print "Source Lines of Code" and export to file. Export is hudson_ plugin_ compatible: sloccount.sc requirements: - sloccount_ should be installed. - tee and pipes are used options.paved.pycheck.sloccount.param .. _sloccount: http://www.dwheeler.com/sloccount/ .. _hudson: http://hudson-ci.org/ .. _plugin: http://wiki.hudson-ci.org/display/HUDSON/SLOCCount+Plugin
[ "Print", "Source", "Lines", "of", "Code", "and", "export", "to", "file", "." ]
python
valid
spacetelescope/stsci.tools
lib/stsci/tools/iterfile.py
https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/iterfile.py#L111-L153
def parseFilename(filename): """ Parse out filename from any specified extensions. Returns rootname and string version of extension name. Modified from 'pydrizzle.fileutil' to allow this module to be independent of PyDrizzle/MultiDrizzle. """ # Parse out any extension specified in filename _indx = filename.find('[') if _indx > 0: # Read extension name provided _fname = filename[:_indx] extn = filename[_indx+1:-1] # An extension was provided, so parse it out... if repr(extn).find(',') > 1: _extns = extn.split(',') # Two values given for extension: # for example, 'sci,1' or 'dq,1' _extn = [_extns[0],int(_extns[1])] elif repr(extn).find('/') > 1: # We are working with GEIS group syntax _indx = str(extn[:extn.find('/')]) _extn = [int(_indx)] elif isinstance(extn, str): # Only one extension value specified... if extn.isdigit(): # We only have an extension number specified as a string... _nextn = int(extn) else: # We only have EXTNAME specified... _nextn = extn _extn = [_nextn] else: # Only integer extension number given, or default of 0 is used. _extn = [int(extn)] else: _fname = filename _extn = None return _fname,_extn
[ "def", "parseFilename", "(", "filename", ")", ":", "# Parse out any extension specified in filename", "_indx", "=", "filename", ".", "find", "(", "'['", ")", "if", "_indx", ">", "0", ":", "# Read extension name provided", "_fname", "=", "filename", "[", ":", "_ind...
Parse out filename from any specified extensions. Returns rootname and string version of extension name. Modified from 'pydrizzle.fileutil' to allow this module to be independent of PyDrizzle/MultiDrizzle.
[ "Parse", "out", "filename", "from", "any", "specified", "extensions", ".", "Returns", "rootname", "and", "string", "version", "of", "extension", "name", "." ]
python
train
cackharot/suds-py3
suds/wsse.py
https://github.com/cackharot/suds-py3/blob/7387ec7806e9be29aad0a711bea5cb3c9396469c/suds/wsse.py#L160-L181
def xml(self): """ Get xml representation of the object. @return: The root node. @rtype: L{Element} """ root = Element('UsernameToken', ns=wssens) u = Element('Username', ns=wssens) u.setText(self.username) root.append(u) p = Element('Password', ns=wssens) p.setText(self.password) root.append(p) if self.nonce is not None: n = Element('Nonce', ns=wssens) n.setText(self.nonce) root.append(n) if self.created is not None: n = Element('Created', ns=wsuns) n.setText(str(DateTime(self.created))) root.append(n) return root
[ "def", "xml", "(", "self", ")", ":", "root", "=", "Element", "(", "'UsernameToken'", ",", "ns", "=", "wssens", ")", "u", "=", "Element", "(", "'Username'", ",", "ns", "=", "wssens", ")", "u", ".", "setText", "(", "self", ".", "username", ")", "root...
Get xml representation of the object. @return: The root node. @rtype: L{Element}
[ "Get", "xml", "representation", "of", "the", "object", "." ]
python
train
AguaClara/aguaclara
aguaclara/core/drills.py
https://github.com/AguaClara/aguaclara/blob/8dd4e734768b166a7fc2b60388a24df2f93783fc/aguaclara/core/drills.py#L8-L18
def get_drill_bits_d_imperial(): """Return array of possible drill diameters in imperial.""" step_32nd = np.arange(0.03125, 0.25, 0.03125) step_8th = np.arange(0.25, 1.0, 0.125) step_4th = np.arange(1.0, 2.0, 0.25) maximum = [2.0] return np.concatenate((step_32nd, step_8th, step_4th, maximum)) * u.inch
[ "def", "get_drill_bits_d_imperial", "(", ")", ":", "step_32nd", "=", "np", ".", "arange", "(", "0.03125", ",", "0.25", ",", "0.03125", ")", "step_8th", "=", "np", ".", "arange", "(", "0.25", ",", "1.0", ",", "0.125", ")", "step_4th", "=", "np", ".", ...
Return array of possible drill diameters in imperial.
[ "Return", "array", "of", "possible", "drill", "diameters", "in", "imperial", "." ]
python
train
sendgrid/sendgrid-python
sendgrid/helpers/mail/attachment.py
https://github.com/sendgrid/sendgrid-python/blob/266c2abde7a35dfcce263e06bedc6a0bbdebeac9/sendgrid/helpers/mail/attachment.py#L196-L218
def get(self): """ Get a JSON-ready representation of this Attachment. :returns: This Attachment, ready for use in a request body. :rtype: dict """ attachment = {} if self.file_content is not None: attachment["content"] = self.file_content.get() if self.file_type is not None: attachment["type"] = self.file_type.get() if self.file_name is not None: attachment["filename"] = self.file_name.get() if self.disposition is not None: attachment["disposition"] = self.disposition.get() if self.content_id is not None: attachment["content_id"] = self.content_id.get() return attachment
[ "def", "get", "(", "self", ")", ":", "attachment", "=", "{", "}", "if", "self", ".", "file_content", "is", "not", "None", ":", "attachment", "[", "\"content\"", "]", "=", "self", ".", "file_content", ".", "get", "(", ")", "if", "self", ".", "file_typ...
Get a JSON-ready representation of this Attachment. :returns: This Attachment, ready for use in a request body. :rtype: dict
[ "Get", "a", "JSON", "-", "ready", "representation", "of", "this", "Attachment", "." ]
python
train
facetoe/zenpy
zenpy/lib/api_objects/__init__.py
https://github.com/facetoe/zenpy/blob/34c54c7e408b9ed01604ddf8b3422204c8bf31ea/zenpy/lib/api_objects/__init__.py#L56-L67
def to_dict(self, serialize=False): """ This method returns the object as a Python dict. If serialize is passed, only those attributes that have been modified will be included in the result. :param serialize: :return: """ if serialize: encode_method = json_encode_for_zendesk else: encode_method = json_encode_for_printing return json.loads(json.dumps(self._to_dict(serialize=serialize), default=encode_method))
[ "def", "to_dict", "(", "self", ",", "serialize", "=", "False", ")", ":", "if", "serialize", ":", "encode_method", "=", "json_encode_for_zendesk", "else", ":", "encode_method", "=", "json_encode_for_printing", "return", "json", ".", "loads", "(", "json", ".", "...
This method returns the object as a Python dict. If serialize is passed, only those attributes that have been modified will be included in the result. :param serialize: :return:
[ "This", "method", "returns", "the", "object", "as", "a", "Python", "dict", ".", "If", "serialize", "is", "passed", "only", "those", "attributes", "that", "have", "been", "modified", "will", "be", "included", "in", "the", "result", ".", ":", "param", "seria...
python
train
colab/colab
colab/accounts/utils/validators.py
https://github.com/colab/colab/blob/2ad099231e620bec647363b27d38006eca71e13b/colab/accounts/utils/validators.py#L6-L28
def validate_social_account(account, url): """Verifies if a social account is valid. Examples: >>> validate_social_account('seocam', 'http://twitter.com') True >>> validate_social_account('seocam-fake-should-fail', 'http://twitter.com') False """ request = urllib2.Request(urlparse.urljoin(url, account)) request.get_method = lambda: 'HEAD' try: response = urllib2.urlopen(request) except urllib2.HTTPError: return False return response.code == 200
[ "def", "validate_social_account", "(", "account", ",", "url", ")", ":", "request", "=", "urllib2", ".", "Request", "(", "urlparse", ".", "urljoin", "(", "url", ",", "account", ")", ")", "request", ".", "get_method", "=", "lambda", ":", "'HEAD'", "try", "...
Verifies if a social account is valid. Examples: >>> validate_social_account('seocam', 'http://twitter.com') True >>> validate_social_account('seocam-fake-should-fail', 'http://twitter.com') False
[ "Verifies", "if", "a", "social", "account", "is", "valid", "." ]
python
train
Groundworkstech/pybfd
setup.py
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/setup.py#L117-L142
def find_binutils_libs(self, libdir, lib_ext): """Find Binutils libraries.""" bfd_expr = re.compile("(lib(?:bfd)|(?:opcodes))(.*?)\%s" % lib_ext ) libs = {} for root, dirs, files in os.walk(libdir): for f in files: m = bfd_expr.search(f) if m: lib, version = m.groups() fp = os.path.join(root, f) if version in libs: libs[ version ].append( fp ) else: libs[ version ] = [fp,] # first, search for multiarch files. # check if we found more than one version of the multiarch libs. multiarch_libs = dict( [(v,_l) for v, _l in libs.items() \ if v.find("multiarch") != -1 ] ) if len(multiarch_libs) > 1: print "[W] Multiple binutils versions detected. Trying to build with default..." return multiarch_libs.values()[0] if len(multiarch_libs) == 1: return multiarch_libs.values()[0] # or use the default libs, or .. none return libs.get("",[])
[ "def", "find_binutils_libs", "(", "self", ",", "libdir", ",", "lib_ext", ")", ":", "bfd_expr", "=", "re", ".", "compile", "(", "\"(lib(?:bfd)|(?:opcodes))(.*?)\\%s\"", "%", "lib_ext", ")", "libs", "=", "{", "}", "for", "root", ",", "dirs", ",", "files", "i...
Find Binutils libraries.
[ "Find", "Binutils", "libraries", "." ]
python
train
MrYsLab/pymata-aio
pymata_aio/pymata_core.py
https://github.com/MrYsLab/pymata-aio/blob/015081a4628b9d47dfe3f8d6c698ff903f107810/pymata_aio/pymata_core.py#L1683-L1724
async def _report_firmware(self, sysex_data): """ This is a private message handler method. This method handles the sysex 'report firmware' command sent by Firmata (0x79). It assembles the firmware version by concatenating the major and minor version number components and the firmware identifier into a string. e.g. "2.3 StandardFirmata.ino" :param sysex_data: Sysex data sent from Firmata :returns: None """ # first byte after command is major number major = sysex_data[1] version_string = str(major) # next byte is minor number minor = sysex_data[2] # append a dot to major number version_string += '.' # append minor number version_string += str(minor) # add a space after the major and minor numbers version_string += ' ' # slice the identifier - from the first byte after the minor # number up until, but not including the END_SYSEX byte name = sysex_data[3:-1] firmware_name_iterator = iter(name) # convert each element from two 7-bit bytes into characters, then add each # character to the version string for e in firmware_name_iterator: version_string += chr(e + (next(firmware_name_iterator) << 7)) # store the value self.query_reply_data[PrivateConstants.REPORT_FIRMWARE] = version_string
[ "async", "def", "_report_firmware", "(", "self", ",", "sysex_data", ")", ":", "# first byte after command is major number", "major", "=", "sysex_data", "[", "1", "]", "version_string", "=", "str", "(", "major", ")", "# next byte is minor number", "minor", "=", "syse...
This is a private message handler method. This method handles the sysex 'report firmware' command sent by Firmata (0x79). It assembles the firmware version by concatenating the major and minor version number components and the firmware identifier into a string. e.g. "2.3 StandardFirmata.ino" :param sysex_data: Sysex data sent from Firmata :returns: None
[ "This", "is", "a", "private", "message", "handler", "method", ".", "This", "method", "handles", "the", "sysex", "report", "firmware", "command", "sent", "by", "Firmata", "(", "0x79", ")", ".", "It", "assembles", "the", "firmware", "version", "by", "concatena...
python
train
TrafficSenseMSD/SumoTools
traci/_vehicletype.py
https://github.com/TrafficSenseMSD/SumoTools/blob/8607b4f885f1d1798e43240be643efe6dccccdaa/traci/_vehicletype.py#L380-L386
def copy(self, origTypeID, newTypeID): """copy(string, string) -> None Duplicates the vType with ID origTypeID. The newly created vType is assigned the ID newTypeID """ self._connection._sendStringCmd( tc.CMD_SET_VEHICLETYPE_VARIABLE, tc.COPY, origTypeID, newTypeID)
[ "def", "copy", "(", "self", ",", "origTypeID", ",", "newTypeID", ")", ":", "self", ".", "_connection", ".", "_sendStringCmd", "(", "tc", ".", "CMD_SET_VEHICLETYPE_VARIABLE", ",", "tc", ".", "COPY", ",", "origTypeID", ",", "newTypeID", ")" ]
copy(string, string) -> None Duplicates the vType with ID origTypeID. The newly created vType is assigned the ID newTypeID
[ "copy", "(", "string", "string", ")", "-", ">", "None" ]
python
train
fboender/ansible-cmdb
lib/jsonxs.py
https://github.com/fboender/ansible-cmdb/blob/ebd960ac10684e8c9ec2b12751bba2c4c9504ab7/lib/jsonxs.py#L88-L127
def tokenize(expr): """ Parse a string expression into a set of tokens that can be used as a path into a Python datastructure. """ tokens = [] escape = False cur_token = '' for c in expr: if escape == True: cur_token += c escape = False else: if c == '\\': # Next char will be escaped escape = True continue elif c == '[': # Next token is of type index (list) if len(cur_token) > 0: tokens.append(cur_token) cur_token = '' elif c == ']': # End of index token. Next token defaults to a key (dict) if len(cur_token) > 0: tokens.append(int(cur_token)) cur_token = '' elif c == '.': # End of key token. Next token defaults to a key (dict) if len(cur_token) > 0: tokens.append(cur_token) cur_token = '' else: # Append char to token name cur_token += c if len(cur_token) > 0: tokens.append(cur_token) return tokens
[ "def", "tokenize", "(", "expr", ")", ":", "tokens", "=", "[", "]", "escape", "=", "False", "cur_token", "=", "''", "for", "c", "in", "expr", ":", "if", "escape", "==", "True", ":", "cur_token", "+=", "c", "escape", "=", "False", "else", ":", "if", ...
Parse a string expression into a set of tokens that can be used as a path into a Python datastructure.
[ "Parse", "a", "string", "expression", "into", "a", "set", "of", "tokens", "that", "can", "be", "used", "as", "a", "path", "into", "a", "Python", "datastructure", "." ]
python
train
DEIB-GECO/PyGMQL
gmql/ml/algorithms/clustering.py
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L214-L232
def fit(self, data=None): """ Performs clustering.rst :param data: Data to be fit :return: the clustering.rst object """ if self.is_pyclustering_instance(self.model): if isinstance(self.model, xmeans): data = self.input_preprocess(data) self.model._xmeans__pointer_data = data elif isinstance(self.model, clarans): data = self.input_preprocess(data) self.model._clarans__pointer_data = data self.model.process() else: self.model.fit(data) return self
[ "def", "fit", "(", "self", ",", "data", "=", "None", ")", ":", "if", "self", ".", "is_pyclustering_instance", "(", "self", ".", "model", ")", ":", "if", "isinstance", "(", "self", ".", "model", ",", "xmeans", ")", ":", "data", "=", "self", ".", "in...
Performs clustering.rst :param data: Data to be fit :return: the clustering.rst object
[ "Performs", "clustering", ".", "rst" ]
python
train
wbond/pybars3
pybars/_compiler.py
https://github.com/wbond/pybars3/blob/71f13d1012d3746e76099d8db141c43fc769cfed/pybars/_compiler.py#L852-L891
def compile(self, source, path=None): """Compile source to a ready to run template. :param source: The template to compile - should be a unicode string :return: A template function ready to execute """ container = self._generate_code(source) def make_module_name(name, suffix=None): output = 'pybars._templates.%s' % name if suffix: output += '_%s' % suffix return output if not path: path = '_template' generate_name = True else: path = path.replace('\\', '/') path = path.replace('/', '_') mod_name = make_module_name(path) generate_name = mod_name in sys.modules if generate_name: mod_name = make_module_name(path, self.template_counter) while mod_name in sys.modules: self.template_counter += 1 mod_name = make_module_name(path, self.template_counter) mod = ModuleType(mod_name) filename = '%s.py' % mod_name.replace('pybars.', '').replace('.', '/') exec(compile(container.full_code, filename, 'exec', dont_inherit=True), mod.__dict__) sys.modules[mod_name] = mod linecache.getlines(filename, mod.__dict__) return mod.__dict__[container.name]
[ "def", "compile", "(", "self", ",", "source", ",", "path", "=", "None", ")", ":", "container", "=", "self", ".", "_generate_code", "(", "source", ")", "def", "make_module_name", "(", "name", ",", "suffix", "=", "None", ")", ":", "output", "=", "'pybars...
Compile source to a ready to run template. :param source: The template to compile - should be a unicode string :return: A template function ready to execute
[ "Compile", "source", "to", "a", "ready", "to", "run", "template", "." ]
python
train
wdbm/shijian
shijian.py
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L1231-L1242
def ustr(text): """ Convert a string to Python 2 unicode or Python 3 string as appropriate to the version of Python in use. """ if text is not None: if sys.version_info >= (3, 0): return str(text) else: return unicode(text) else: return text
[ "def", "ustr", "(", "text", ")", ":", "if", "text", "is", "not", "None", ":", "if", "sys", ".", "version_info", ">=", "(", "3", ",", "0", ")", ":", "return", "str", "(", "text", ")", "else", ":", "return", "unicode", "(", "text", ")", "else", "...
Convert a string to Python 2 unicode or Python 3 string as appropriate to the version of Python in use.
[ "Convert", "a", "string", "to", "Python", "2", "unicode", "or", "Python", "3", "string", "as", "appropriate", "to", "the", "version", "of", "Python", "in", "use", "." ]
python
train
odlgroup/odl
odl/tomo/geometry/parallel.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/tomo/geometry/parallel.py#L196-L325
def det_to_src(self, angle, dparam): """Direction from a detector location to the source. The direction vector is computed as follows:: dir = rotation_matrix(angle).dot(detector.surface_normal(dparam)) Note that for flat detectors, ``surface_normal`` does not depend on the parameter ``dparam``, hence this function is constant in that variable. Parameters ---------- angle : `array-like` or sequence One or several (Euler) angles in radians at which to evaluate. If ``motion_params.ndim >= 2``, a sequence of that length must be provided. dparam : `array-like` or sequence Detector parameter(s) at which to evaluate. If ``det_params.ndim >= 2``, a sequence of that length must be provided. Returns ------- det_to_src : `numpy.ndarray` Vector(s) pointing from a detector point to the source (at infinity). The shape of the returned array is obtained from the (broadcast) shapes of ``angle`` and ``dparam``, and broadcasting is supported within both parameters and between them. The precise definition of the shape is ``broadcast(bcast_angle, bcast_dparam).shape + (ndim,)``, where ``bcast_angle`` is - ``angle`` if `motion_params` is 1D, - ``broadcast(*angle)`` otherwise, and ``bcast_dparam`` defined analogously. Examples -------- The method works with single parameter values, in which case a single vector is returned: >>> apart = odl.uniform_partition(0, np.pi, 10) >>> dpart = odl.uniform_partition(-1, 1, 20) >>> geom = odl.tomo.Parallel2dGeometry(apart, dpart) >>> geom.det_to_src(0, 0) array([ 0., -1.]) >>> geom.det_to_src(0, 1) array([ 0., -1.]) >>> dir = geom.det_to_src(np.pi / 2, 0) >>> np.allclose(dir, [1, 0]) True >>> dir = geom.det_to_src(np.pi / 2, 1) >>> np.allclose(dir, [1, 0]) True Both variables support vectorized calls, i.e., stacks of parameters can be provided. The order of axes in the output (left of the ``ndim`` axis for the vector dimension) corresponds to the order of arguments: >>> dirs = geom.det_to_src(0, [-1, 0, 0.5, 1]) >>> dirs array([[ 0., -1.], [ 0., -1.], [ 0., -1.], [ 0., -1.]]) >>> dirs.shape # (num_dparams, ndim) (4, 2) >>> dirs = geom.det_to_src([0, np.pi / 2, np.pi], 0) >>> np.allclose(dirs, [[0, -1], ... [1, 0], ... [0, 1]]) True >>> dirs.shape # (num_angles, ndim) (3, 2) >>> # Providing 3 pairs of parameters, resulting in 3 vectors >>> dirs = geom.det_to_src([0, np.pi / 2, np.pi], [-1, 0, 1]) >>> dirs[0] # Corresponds to angle = 0, dparam = -1 array([ 0., -1.]) >>> dirs.shape (3, 2) >>> # Pairs of parameters arranged in arrays of same size >>> geom.det_to_src(np.zeros((4, 5)), np.zeros((4, 5))).shape (4, 5, 2) >>> # "Outer product" type evaluation using broadcasting >>> geom.det_to_src(np.zeros((4, 1)), np.zeros((1, 5))).shape (4, 5, 2) """ # Always call the downstream methods with vectorized arguments # to be able to reliably manipulate the final axes of the result if self.motion_params.ndim == 1: squeeze_angle = (np.shape(angle) == ()) angle = np.array(angle, dtype=float, copy=False, ndmin=1) matrix = self.rotation_matrix(angle) # shape (m, ndim, ndim) else: squeeze_angle = (np.broadcast(*angle).shape == ()) angle = tuple(np.array(a, dtype=float, copy=False, ndmin=1) for a in angle) matrix = self.rotation_matrix(angle) # shape (m, ndim, ndim) if self.det_params.ndim == 1: squeeze_dparam = (np.shape(dparam) == ()) dparam = np.array(dparam, dtype=float, copy=False, ndmin=1) else: squeeze_dparam = (np.broadcast(*dparam).shape == ()) dparam = tuple(np.array(p, dtype=float, copy=False, ndmin=1) for p in dparam) normal = self.detector.surface_normal(dparam) # shape (d, ndim) # Perform matrix-vector multiplication along the last axis of both # `matrix` and `normal` while "zipping" all axes that do not # participate in the matrix-vector product. In other words, the axes # are labelled # [0, 1, ..., r-1, r, r+1] for `matrix` and # [0, 1, ..., r-1, r+1] for `normal`, and the output axes are set to # [0, 1, ..., r-1, r]. This automatically supports broadcasting # along the axes 0, ..., r-1. matrix_axes = list(range(matrix.ndim)) normal_axes = list(range(matrix.ndim - 2)) + [matrix_axes[-1]] out_axes = list(range(matrix.ndim - 1)) det_to_src = np.einsum(matrix, matrix_axes, normal, normal_axes, out_axes) if squeeze_angle and squeeze_dparam: det_to_src = det_to_src.squeeze() return det_to_src
[ "def", "det_to_src", "(", "self", ",", "angle", ",", "dparam", ")", ":", "# Always call the downstream methods with vectorized arguments", "# to be able to reliably manipulate the final axes of the result", "if", "self", ".", "motion_params", ".", "ndim", "==", "1", ":", "s...
Direction from a detector location to the source. The direction vector is computed as follows:: dir = rotation_matrix(angle).dot(detector.surface_normal(dparam)) Note that for flat detectors, ``surface_normal`` does not depend on the parameter ``dparam``, hence this function is constant in that variable. Parameters ---------- angle : `array-like` or sequence One or several (Euler) angles in radians at which to evaluate. If ``motion_params.ndim >= 2``, a sequence of that length must be provided. dparam : `array-like` or sequence Detector parameter(s) at which to evaluate. If ``det_params.ndim >= 2``, a sequence of that length must be provided. Returns ------- det_to_src : `numpy.ndarray` Vector(s) pointing from a detector point to the source (at infinity). The shape of the returned array is obtained from the (broadcast) shapes of ``angle`` and ``dparam``, and broadcasting is supported within both parameters and between them. The precise definition of the shape is ``broadcast(bcast_angle, bcast_dparam).shape + (ndim,)``, where ``bcast_angle`` is - ``angle`` if `motion_params` is 1D, - ``broadcast(*angle)`` otherwise, and ``bcast_dparam`` defined analogously. Examples -------- The method works with single parameter values, in which case a single vector is returned: >>> apart = odl.uniform_partition(0, np.pi, 10) >>> dpart = odl.uniform_partition(-1, 1, 20) >>> geom = odl.tomo.Parallel2dGeometry(apart, dpart) >>> geom.det_to_src(0, 0) array([ 0., -1.]) >>> geom.det_to_src(0, 1) array([ 0., -1.]) >>> dir = geom.det_to_src(np.pi / 2, 0) >>> np.allclose(dir, [1, 0]) True >>> dir = geom.det_to_src(np.pi / 2, 1) >>> np.allclose(dir, [1, 0]) True Both variables support vectorized calls, i.e., stacks of parameters can be provided. The order of axes in the output (left of the ``ndim`` axis for the vector dimension) corresponds to the order of arguments: >>> dirs = geom.det_to_src(0, [-1, 0, 0.5, 1]) >>> dirs array([[ 0., -1.], [ 0., -1.], [ 0., -1.], [ 0., -1.]]) >>> dirs.shape # (num_dparams, ndim) (4, 2) >>> dirs = geom.det_to_src([0, np.pi / 2, np.pi], 0) >>> np.allclose(dirs, [[0, -1], ... [1, 0], ... [0, 1]]) True >>> dirs.shape # (num_angles, ndim) (3, 2) >>> # Providing 3 pairs of parameters, resulting in 3 vectors >>> dirs = geom.det_to_src([0, np.pi / 2, np.pi], [-1, 0, 1]) >>> dirs[0] # Corresponds to angle = 0, dparam = -1 array([ 0., -1.]) >>> dirs.shape (3, 2) >>> # Pairs of parameters arranged in arrays of same size >>> geom.det_to_src(np.zeros((4, 5)), np.zeros((4, 5))).shape (4, 5, 2) >>> # "Outer product" type evaluation using broadcasting >>> geom.det_to_src(np.zeros((4, 1)), np.zeros((1, 5))).shape (4, 5, 2)
[ "Direction", "from", "a", "detector", "location", "to", "the", "source", "." ]
python
train
rvswift/EB
EB/builder/exhaustive/exhaustive.py
https://github.com/rvswift/EB/blob/341880b79faf8147dc9fa6e90438531cd09fabcc/EB/builder/exhaustive/exhaustive.py#L19-L60
def run(itf): """ Run optimize functions. """ if not itf: return 1 # access user input options = SplitInput(itf) # read input inputpath = os.path.abspath(options.inputpath) print(" Reading input file ...") molecules = csv_interface.read_csv(inputpath, options) if not molecules: print("\n '{flag}' was unable to be parsed\n".format(flag=os.path.basename(options.inputpath))) sys.exit(1) # determine the sort order & ensemble_size #sort_order = classification.get_sort_order(molecules) sort_order = 'asc' ensemble_size = options.ensemble_size # loop over all ensembles # temp 2/3/15 append to auc_list ef_list & write it out for later histogram construction auc_list = [] ef_list = [] for size in [x + 1 for x in range(ensemble_size)]: auc, ef = optimizor(molecules, sort_order, size, options) auc_list += auc ef_list += ef # temp 2/9/15 write auc_list & ef_list out to files for subsequent post-processing f = open('auc_histogram.csv', 'w') for value in auc_list: f.write('%f\n' % value) #f.write('%f, %s\n' % (value[0], value[1])) f.close() f = open('ef_histogram.csv', 'w') for value in ef_list: f.write('%f\n' % value) f.close()
[ "def", "run", "(", "itf", ")", ":", "if", "not", "itf", ":", "return", "1", "# access user input", "options", "=", "SplitInput", "(", "itf", ")", "# read input", "inputpath", "=", "os", ".", "path", ".", "abspath", "(", "options", ".", "inputpath", ")", ...
Run optimize functions.
[ "Run", "optimize", "functions", "." ]
python
train
JamesPHoughton/pysd
pysd/py_backend/vensim/vensim2py.py
https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/vensim/vensim2py.py#L206-L305
def get_equation_components(equation_str): """ Breaks down a string representing only the equation part of a model element. Recognizes the various types of model elements that may exist, and identifies them. Parameters ---------- equation_str : basestring the first section in each model element - the full equation. Returns ------- Returns a dictionary containing the following: real_name: basestring The name of the element as given in the original vensim file subs: list of strings list of subscripts or subscript elements expr: basestring kind: basestring What type of equation have we found? - *component* - normal model expression or constant - *lookup* - a lookup table - *subdef* - a subscript definition Examples -------- >>> get_equation_components(r'constant = 25') {'expr': '25', 'kind': 'component', 'subs': [], 'real_name': 'constant'} Notes ----- in this function we dont create python identifiers, we use real names. This is so that when everything comes back together, we can manage any potential namespace conflicts properly """ component_structure_grammar = _include_common_grammar(r""" entry = component / subscript_definition / lookup_definition component = name _ subscriptlist? _ "=" _ expression subscript_definition = name _ ":" _ subscript _ ("," _ subscript)* lookup_definition = name _ &"(" _ expression # uses lookahead assertion to capture whole group name = basic_id / escape_group subscriptlist = '[' _ subscript _ ("," _ subscript)* _ ']' expression = ~r".*" # expression could be anything, at this point. subscript = basic_id / escape_group """) # replace any amount of whitespace with a single space equation_str = equation_str.replace('\\t', ' ') equation_str = re.sub(r"\s+", ' ', equation_str) parser = parsimonious.Grammar(component_structure_grammar) tree = parser.parse(equation_str) class ComponentParser(parsimonious.NodeVisitor): def __init__(self, ast): self.subscripts = [] self.real_name = None self.expression = None self.kind = None self.visit(ast) def visit_subscript_definition(self, n, vc): self.kind = 'subdef' def visit_lookup_definition(self, n, vc): self.kind = 'lookup' def visit_component(self, n, vc): self.kind = 'component' def visit_name(self, n, vc): (name,) = vc self.real_name = name.strip() def visit_subscript(self, n, vc): (subscript,) = vc self.subscripts.append(subscript.strip()) def visit_expression(self, n, vc): self.expression = n.text.strip() def generic_visit(self, n, vc): return ''.join(filter(None, vc)) or n.text def visit__(self, n, vc): return ' ' parse_object = ComponentParser(tree) return {'real_name': parse_object.real_name, 'subs': parse_object.subscripts, 'expr': parse_object.expression, 'kind': parse_object.kind}
[ "def", "get_equation_components", "(", "equation_str", ")", ":", "component_structure_grammar", "=", "_include_common_grammar", "(", "r\"\"\"\n entry = component / subscript_definition / lookup_definition\n component = name _ subscriptlist? _ \"=\" _ expression\n subscript_definition = ...
Breaks down a string representing only the equation part of a model element. Recognizes the various types of model elements that may exist, and identifies them. Parameters ---------- equation_str : basestring the first section in each model element - the full equation. Returns ------- Returns a dictionary containing the following: real_name: basestring The name of the element as given in the original vensim file subs: list of strings list of subscripts or subscript elements expr: basestring kind: basestring What type of equation have we found? - *component* - normal model expression or constant - *lookup* - a lookup table - *subdef* - a subscript definition Examples -------- >>> get_equation_components(r'constant = 25') {'expr': '25', 'kind': 'component', 'subs': [], 'real_name': 'constant'} Notes ----- in this function we dont create python identifiers, we use real names. This is so that when everything comes back together, we can manage any potential namespace conflicts properly
[ "Breaks", "down", "a", "string", "representing", "only", "the", "equation", "part", "of", "a", "model", "element", ".", "Recognizes", "the", "various", "types", "of", "model", "elements", "that", "may", "exist", "and", "identifies", "them", "." ]
python
train
conbus/fbmq
fbmq/fbmq.py
https://github.com/conbus/fbmq/blob/2e016597e49d4d3d8bd52a4da5d778b992697649/fbmq/fbmq.py#L523-L536
def set_webhook_handler(self, scope, callback): """ Allows adding a webhook_handler as an alternative to the decorators """ scope = scope.lower() if scope == 'after_send': self._after_send = callback return if scope not in Page.WEBHOOK_ENDPOINTS: raise ValueError("The 'scope' argument must be one of {}.".format(Page.WEBHOOK_ENDPOINTS)) self._webhook_handlers[scope] = callback
[ "def", "set_webhook_handler", "(", "self", ",", "scope", ",", "callback", ")", ":", "scope", "=", "scope", ".", "lower", "(", ")", "if", "scope", "==", "'after_send'", ":", "self", ".", "_after_send", "=", "callback", "return", "if", "scope", "not", "in"...
Allows adding a webhook_handler as an alternative to the decorators
[ "Allows", "adding", "a", "webhook_handler", "as", "an", "alternative", "to", "the", "decorators" ]
python
train
agile4you/SchemaFactory
schema_factory/schema.py
https://github.com/agile4you/SchemaFactory/blob/515e3fb84cddf70fc17e5d300c74c3a63539f223/schema_factory/schema.py#L108-L200
def schema_factory(schema_name, **schema_nodes): """Schema Validation class factory. Args: schema_name(str): The namespace of the schema. schema_nodes(dict): The attr_names / SchemaNodes mapping of schema. Returns: A Schema class. Raises: SchemaError, for bad attribute setting initialization. Examples: >>> from schema_factory import FloatNode, StringNode, SchemaNode >>> >>> PointSchema = schema_factory( ... schema_name='point', ... lat=FloatNode(), ... lng=FloatNode(), ... ) ... >>> point = PointSchema(lat=34, lng=29.01) >>> print(point.to_dict) OrderedDict([('lat', 34.0), ('lng', 29.01)]) >>> point2 = PointSchema(lat='34', lng='0') >>> print(point2.to_dict) OrderedDict([('lat', 34.0), ('lng', 0.0)]) >>> RegionSchema = schema_factory( ... schema_name='Region', ... name=StringNode(), ... country_code=StringNode( required=True, validators=[lambda x: len(x) == 2]), ... location=SchemaNode(PointSchema, required=False, default=None), ... keywords=StringNode(array=True, required=False, default=[]) ... ) ... >>> region = RegionSchema(name='Athens', country_code='gr', location={'lat': 32.7647, 'lng': 27.03}) >>> print(region) <RegionSchema instance, attributes:['country_code', 'keywords', 'location', 'name']> >>> region.keywords [] >>> region2 = RegionSchema(name='Athens') Traceback (most recent call last): ... schema_factory.errors.SchemaError: Missing Required Attributes: {'country_code'} >>> region3 = RegionSchema(name='Athens', country_code='gr', location={'lat': 32.7647, 'lng': 27.03}, ... foo='bar') Traceback (most recent call last): ... schema_factory.errors.SchemaError: Invalid Attributes RegionSchema for {'foo'}. >>> region4 = RegionSchema(name='Athens', country_code='gr', keywords=['Acropolis', 'Mousaka', 434132]) """ schema_dict = dict() schema_dict.update(schema_nodes) def cls_repr(self): # pragma: no cover return "<{} instance at: 0x{:x}>".format(self.__class__, id(self)) def cls_str(self): # pragma: no cover return "<{} instance, attributes:{}>".format( self.__class__.__name__, self.schema_nodes ) def cls_init(self, **kwargs): kwargs_set = set(kwargs) if not self.required.issubset(kwargs_set): raise SchemaError('Missing Required Attributes: {}'.format( self.required.difference(kwargs_set) )) if not set(kwargs).issubset(set(self.schema_nodes)): raise SchemaError('Invalid Attributes {} for {}.'.format( self.__class__.__name__, set(kwargs).difference(set(self.schema_nodes)) )) for attr_name in kwargs: setattr(self, attr_name, kwargs[attr_name]) def to_dict(self): return OrderedDict([(k, getattr(self, k)) for k in self.schema_nodes]) schema_dict['to_dict'] = property(to_dict) schema_dict['__init__'] = cls_init schema_dict['__repr__'] = cls_repr schema_dict['__str__'] = cls_str return SchemaType('{}Schema'.format(schema_name.title()), (), schema_dict)
[ "def", "schema_factory", "(", "schema_name", ",", "*", "*", "schema_nodes", ")", ":", "schema_dict", "=", "dict", "(", ")", "schema_dict", ".", "update", "(", "schema_nodes", ")", "def", "cls_repr", "(", "self", ")", ":", "# pragma: no cover", "return", "\"<...
Schema Validation class factory. Args: schema_name(str): The namespace of the schema. schema_nodes(dict): The attr_names / SchemaNodes mapping of schema. Returns: A Schema class. Raises: SchemaError, for bad attribute setting initialization. Examples: >>> from schema_factory import FloatNode, StringNode, SchemaNode >>> >>> PointSchema = schema_factory( ... schema_name='point', ... lat=FloatNode(), ... lng=FloatNode(), ... ) ... >>> point = PointSchema(lat=34, lng=29.01) >>> print(point.to_dict) OrderedDict([('lat', 34.0), ('lng', 29.01)]) >>> point2 = PointSchema(lat='34', lng='0') >>> print(point2.to_dict) OrderedDict([('lat', 34.0), ('lng', 0.0)]) >>> RegionSchema = schema_factory( ... schema_name='Region', ... name=StringNode(), ... country_code=StringNode( required=True, validators=[lambda x: len(x) == 2]), ... location=SchemaNode(PointSchema, required=False, default=None), ... keywords=StringNode(array=True, required=False, default=[]) ... ) ... >>> region = RegionSchema(name='Athens', country_code='gr', location={'lat': 32.7647, 'lng': 27.03}) >>> print(region) <RegionSchema instance, attributes:['country_code', 'keywords', 'location', 'name']> >>> region.keywords [] >>> region2 = RegionSchema(name='Athens') Traceback (most recent call last): ... schema_factory.errors.SchemaError: Missing Required Attributes: {'country_code'} >>> region3 = RegionSchema(name='Athens', country_code='gr', location={'lat': 32.7647, 'lng': 27.03}, ... foo='bar') Traceback (most recent call last): ... schema_factory.errors.SchemaError: Invalid Attributes RegionSchema for {'foo'}. >>> region4 = RegionSchema(name='Athens', country_code='gr', keywords=['Acropolis', 'Mousaka', 434132])
[ "Schema", "Validation", "class", "factory", "." ]
python
train
tensorflow/cleverhans
examples/nips17_adversarial_competition/dev_toolkit/sample_attacks/noop/attack_noop.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/dev_toolkit/sample_attacks/noop/attack_noop.py#L86-L90
def main(_): """Run the sample attack""" batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3] for filenames, images in load_images(FLAGS.input_dir, batch_shape): save_images(images, filenames, FLAGS.output_dir)
[ "def", "main", "(", "_", ")", ":", "batch_shape", "=", "[", "FLAGS", ".", "batch_size", ",", "FLAGS", ".", "image_height", ",", "FLAGS", ".", "image_width", ",", "3", "]", "for", "filenames", ",", "images", "in", "load_images", "(", "FLAGS", ".", "inpu...
Run the sample attack
[ "Run", "the", "sample", "attack" ]
python
train
Genida/archan
src/archan/dsm.py
https://github.com/Genida/archan/blob/a026d3105c7e86f30e6c9507b93ceb736684bfdc/src/archan/dsm.py#L90-L96
def validate(self): """Validate data (rows length, categories=entities, square).""" validate_rows_length(self.data, self.columns, exception=self.error) validate_categories_equal_entities(self.categories, self.entities, exception=self.error) if self.square: validate_square(self.data, exception=self.error)
[ "def", "validate", "(", "self", ")", ":", "validate_rows_length", "(", "self", ".", "data", ",", "self", ".", "columns", ",", "exception", "=", "self", ".", "error", ")", "validate_categories_equal_entities", "(", "self", ".", "categories", ",", "self", ".",...
Validate data (rows length, categories=entities, square).
[ "Validate", "data", "(", "rows", "length", "categories", "=", "entities", "square", ")", "." ]
python
train
GNS3/gns3-server
gns3server/controller/export_project.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/export_project.py#L231-L268
def _export_remote_images(project, compute_id, image_type, image, project_zipfile, temporary_dir): """ Export specific image from remote compute :param project: :param compute_id: :param image_type: :param image: :param project_zipfile: :return: """ log.info("Obtaining image `{}` from `{}`".format(image, compute_id)) try: compute = [compute for compute in project.computes if compute.id == compute_id][0] except IndexError: raise aiohttp.web.HTTPConflict( text="Cannot export image from `{}` compute. Compute doesn't exist.".format(compute_id)) (fd, temp_path) = tempfile.mkstemp(dir=temporary_dir) f = open(fd, "wb", closefd=True) response = yield from compute.download_image(image_type, image) if response.status != 200: raise aiohttp.web.HTTPConflict( text="Cannot export image from `{}` compute. Compute sent `{}` status.".format( compute_id, response.status)) while True: data = yield from response.content.read(512) if not data: break f.write(data) response.close() f.close() arcname = os.path.join("images", image_type, image) log.info("Saved {}".format(arcname)) project_zipfile.write(temp_path, arcname=arcname, compress_type=zipfile.ZIP_DEFLATED)
[ "def", "_export_remote_images", "(", "project", ",", "compute_id", ",", "image_type", ",", "image", ",", "project_zipfile", ",", "temporary_dir", ")", ":", "log", ".", "info", "(", "\"Obtaining image `{}` from `{}`\"", ".", "format", "(", "image", ",", "compute_id...
Export specific image from remote compute :param project: :param compute_id: :param image_type: :param image: :param project_zipfile: :return:
[ "Export", "specific", "image", "from", "remote", "compute", ":", "param", "project", ":", ":", "param", "compute_id", ":", ":", "param", "image_type", ":", ":", "param", "image", ":", ":", "param", "project_zipfile", ":", ":", "return", ":" ]
python
train
trac-hacks/trac-github
tracext/github/__init__.py
https://github.com/trac-hacks/trac-github/blob/004b382bb3c76c4d52a04aaaf57d00807e14f0d2/tracext/github/__init__.py#L648-L686
def github_api(self, url, *args): """ Connect to the given GitHub API URL template by replacing all placeholders with the given parameters and return the decoded JSON result on success. On error, return `None`. :param url: The path to request from the GitHub API. Contains format string placeholders that will be replaced with all additional positional arguments. """ import requests import urllib github_api_url = os.environ.get("TRAC_GITHUB_API_URL", "https://api.github.com/") formatted_url = github_api_url + url.format(*(urllib.quote(str(x)) for x in args)) access_token = _config_secret(self.access_token) self.log.debug("Hitting GitHub API endpoint %s with user %s", formatted_url, self.username) # pylint: disable=no-member results = [] try: has_next = True while has_next: req = requests.get(formatted_url, auth=(self.username, access_token)) if req.status_code != 200: try: message = req.json()['message'] except Exception: # pylint: disable=broad-except message = req.text self.log.error("Error communicating with GitHub API at {}: {}".format( # pylint: disable=no-member formatted_url, message)) return None results.extend(req.json()) has_next = 'next' in req.links if has_next: formatted_url = req.links['next']['url'] except requests.exceptions.ConnectionError as rce: self.log.error("Exception while communicating with GitHub API at {}: {}".format( # pylint: disable=no-member formatted_url, rce)) return None return results
[ "def", "github_api", "(", "self", ",", "url", ",", "*", "args", ")", ":", "import", "requests", "import", "urllib", "github_api_url", "=", "os", ".", "environ", ".", "get", "(", "\"TRAC_GITHUB_API_URL\"", ",", "\"https://api.github.com/\"", ")", "formatted_url",...
Connect to the given GitHub API URL template by replacing all placeholders with the given parameters and return the decoded JSON result on success. On error, return `None`. :param url: The path to request from the GitHub API. Contains format string placeholders that will be replaced with all additional positional arguments.
[ "Connect", "to", "the", "given", "GitHub", "API", "URL", "template", "by", "replacing", "all", "placeholders", "with", "the", "given", "parameters", "and", "return", "the", "decoded", "JSON", "result", "on", "success", ".", "On", "error", "return", "None", "...
python
train
priestc/moneywagon
moneywagon/tx.py
https://github.com/priestc/moneywagon/blob/00518f1f557dcca8b3031f46d3564c2baa0227a3/moneywagon/tx.py#L67-L77
def private_key_to_address(self, pk): """ Convert a private key (in hex format) into an address. """ pub = privtopub(pk) pub_byte, priv_byte = get_magic_bytes(self.crypto) if priv_byte >= 128: priv_byte -= 128 #pybitcointools bug return pubtoaddr(pub, pub_byte)
[ "def", "private_key_to_address", "(", "self", ",", "pk", ")", ":", "pub", "=", "privtopub", "(", "pk", ")", "pub_byte", ",", "priv_byte", "=", "get_magic_bytes", "(", "self", ".", "crypto", ")", "if", "priv_byte", ">=", "128", ":", "priv_byte", "-=", "12...
Convert a private key (in hex format) into an address.
[ "Convert", "a", "private", "key", "(", "in", "hex", "format", ")", "into", "an", "address", "." ]
python
train
CiscoDevNet/webexteamssdk
webexteamssdk/restsession.py
https://github.com/CiscoDevNet/webexteamssdk/blob/6fc2cc3557e080ba4b2a380664cb2a0532ae45cd/webexteamssdk/restsession.py#L184-L197
def update_headers(self, headers): """Update the HTTP headers used for requests in this session. Note: Updates provided by the dictionary passed as the `headers` parameter to this method are merged into the session headers by adding new key-value pairs and/or updating the values of existing keys. The session headers are not replaced by the provided dictionary. Args: headers(dict): Updates to the current session headers. """ check_type(headers, dict, may_be_none=False) self._req_session.headers.update(headers)
[ "def", "update_headers", "(", "self", ",", "headers", ")", ":", "check_type", "(", "headers", ",", "dict", ",", "may_be_none", "=", "False", ")", "self", ".", "_req_session", ".", "headers", ".", "update", "(", "headers", ")" ]
Update the HTTP headers used for requests in this session. Note: Updates provided by the dictionary passed as the `headers` parameter to this method are merged into the session headers by adding new key-value pairs and/or updating the values of existing keys. The session headers are not replaced by the provided dictionary. Args: headers(dict): Updates to the current session headers.
[ "Update", "the", "HTTP", "headers", "used", "for", "requests", "in", "this", "session", "." ]
python
test
adafruit/Adafruit_Python_GPIO
Adafruit_GPIO/I2C.py
https://github.com/adafruit/Adafruit_Python_GPIO/blob/a92a23d6b5869663b2bc1ccf78bb11585076a9c4/Adafruit_GPIO/I2C.py#L40-L57
def get_default_bus(): """Return the default bus number based on the device platform. For a Raspberry Pi either bus 0 or 1 (based on the Pi revision) will be returned. For a Beaglebone Black the first user accessible bus, 1, will be returned. """ plat = Platform.platform_detect() if plat == Platform.RASPBERRY_PI: if Platform.pi_revision() == 1: # Revision 1 Pi uses I2C bus 0. return 0 else: # Revision 2 Pi uses I2C bus 1. return 1 elif plat == Platform.BEAGLEBONE_BLACK: # Beaglebone Black has multiple I2C buses, default to 1 (P9_19 and P9_20). return 1 else: raise RuntimeError('Could not determine default I2C bus for platform.')
[ "def", "get_default_bus", "(", ")", ":", "plat", "=", "Platform", ".", "platform_detect", "(", ")", "if", "plat", "==", "Platform", ".", "RASPBERRY_PI", ":", "if", "Platform", ".", "pi_revision", "(", ")", "==", "1", ":", "# Revision 1 Pi uses I2C bus 0.", "...
Return the default bus number based on the device platform. For a Raspberry Pi either bus 0 or 1 (based on the Pi revision) will be returned. For a Beaglebone Black the first user accessible bus, 1, will be returned.
[ "Return", "the", "default", "bus", "number", "based", "on", "the", "device", "platform", ".", "For", "a", "Raspberry", "Pi", "either", "bus", "0", "or", "1", "(", "based", "on", "the", "Pi", "revision", ")", "will", "be", "returned", ".", "For", "a", ...
python
valid
log2timeline/plaso
plaso/cli/log2timeline_tool.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/cli/log2timeline_tool.py#L138-L277
def ParseArguments(self): """Parses the command line arguments. Returns: bool: True if the arguments were successfully parsed. """ loggers.ConfigureLogging() argument_parser = argparse.ArgumentParser( description=self.DESCRIPTION, epilog=self.EPILOG, add_help=False, formatter_class=argparse.RawDescriptionHelpFormatter) self.AddBasicOptions(argument_parser) helpers_manager.ArgumentHelperManager.AddCommandLineArguments( argument_parser, names=['storage_file']) data_location_group = argument_parser.add_argument_group( 'data location arguments') argument_helper_names = ['artifact_definitions', 'data_location'] helpers_manager.ArgumentHelperManager.AddCommandLineArguments( data_location_group, names=argument_helper_names) extraction_group = argument_parser.add_argument_group( 'extraction arguments') argument_helper_names = [ 'artifact_filters', 'extraction', 'filter_file', 'hashers', 'parsers', 'yara_rules'] helpers_manager.ArgumentHelperManager.AddCommandLineArguments( extraction_group, names=argument_helper_names) self.AddStorageMediaImageOptions(extraction_group) self.AddTimeZoneOption(extraction_group) self.AddVSSProcessingOptions(extraction_group) self.AddCredentialOptions(extraction_group) info_group = argument_parser.add_argument_group('informational arguments') self.AddInformationalOptions(info_group) info_group.add_argument( '--info', dest='show_info', action='store_true', default=False, help='Print out information about supported plugins and parsers.') info_group.add_argument( '--use_markdown', '--use-markdown', dest='use_markdown', action='store_true', default=False, help=( 'Output lists in Markdown format use in combination with ' '"--hashers list", "--parsers list" or "--timezone list"')) info_group.add_argument( '--no_dependencies_check', '--no-dependencies-check', dest='dependencies_check', action='store_false', default=True, help='Disable the dependencies check.') self.AddLogFileOptions(info_group) helpers_manager.ArgumentHelperManager.AddCommandLineArguments( info_group, names=['status_view']) output_group = argument_parser.add_argument_group('output arguments') helpers_manager.ArgumentHelperManager.AddCommandLineArguments( output_group, names=['text_prepend']) processing_group = argument_parser.add_argument_group( 'processing arguments') self.AddPerformanceOptions(processing_group) self.AddProcessingOptions(processing_group) processing_group.add_argument( '--sigsegv_handler', '--sigsegv-handler', dest='sigsegv_handler', action='store_true', default=False, help=( 'Enables the SIGSEGV handler. WARNING this functionality is ' 'experimental and will a deadlock worker process if a real ' 'segfault is caught, but not signal SIGSEGV. This functionality ' 'is therefore primarily intended for debugging purposes')) profiling_group = argument_parser.add_argument_group('profiling arguments') helpers_manager.ArgumentHelperManager.AddCommandLineArguments( profiling_group, names=['profiling']) storage_group = argument_parser.add_argument_group('storage arguments') helpers_manager.ArgumentHelperManager.AddCommandLineArguments( storage_group, names=['storage_format']) argument_parser.add_argument( self._SOURCE_OPTION, action='store', metavar='SOURCE', nargs='?', default=None, type=str, help=( 'Path to a source device, file or directory. If the source is ' 'a supported storage media device or image file, archive file ' 'or a directory, the files within are processed recursively.')) try: options = argument_parser.parse_args() except UnicodeEncodeError: # If we get here we are attempting to print help in a non-Unicode # terminal. self._output_writer.Write('\n') self._output_writer.Write(argument_parser.format_help()) return False # Properly prepare the attributes according to local encoding. if self.preferred_encoding == 'ascii': logger.warning( 'The preferred encoding of your system is ASCII, which is not ' 'optimal for the typically non-ASCII characters that need to be ' 'parsed and processed. The tool will most likely crash and die, ' 'perhaps in a way that may not be recoverable. A five second delay ' 'is introduced to give you time to cancel the runtime and ' 'reconfigure your preferred encoding, otherwise continue at own ' 'risk.') time.sleep(5) if self._process_archives: logger.warning( 'Scanning archive files currently can cause deadlock. Continue at ' 'your own risk.') time.sleep(5) try: self.ParseOptions(options) except errors.BadConfigOption as exception: self._output_writer.Write('ERROR: {0!s}\n'.format(exception)) self._output_writer.Write('\n') self._output_writer.Write(argument_parser.format_usage()) return False self._command_line_arguments = self.GetCommandLineArguments() loggers.ConfigureLogging( debug_output=self._debug_mode, filename=self._log_file, quiet_mode=self._quiet_mode) return True
[ "def", "ParseArguments", "(", "self", ")", ":", "loggers", ".", "ConfigureLogging", "(", ")", "argument_parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "self", ".", "DESCRIPTION", ",", "epilog", "=", "self", ".", "EPILOG", ",", "add...
Parses the command line arguments. Returns: bool: True if the arguments were successfully parsed.
[ "Parses", "the", "command", "line", "arguments", "." ]
python
train
rix0rrr/gcl
gcl/ast_util.py
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/ast_util.py#L75-L98
def enumerate_scope(ast_rootpath, root_env=None, include_default_builtins=False): """Return a dict of { name => Completions } for the given tuple node. Enumerates all keys that are in scope in a given tuple. The node part of the tuple may be None, in case the binding is a built-in. """ with util.LogTime('enumerate_scope'): scope = {} for node in reversed(ast_rootpath): if is_tuple_node(node): for member in node.members: if member.name not in scope: scope[member.name] = Completion(member.name, False, member.comment.as_string(), member.location) if include_default_builtins: # Backwards compat flag root_env = gcl.default_env if root_env: for k in root_env.keys(): if k not in scope and not hide_from_autocomplete(root_env[k]): v = root_env[k] scope[k] = Completion(k, True, dedent(v.__doc__ or ''), None) return scope
[ "def", "enumerate_scope", "(", "ast_rootpath", ",", "root_env", "=", "None", ",", "include_default_builtins", "=", "False", ")", ":", "with", "util", ".", "LogTime", "(", "'enumerate_scope'", ")", ":", "scope", "=", "{", "}", "for", "node", "in", "reversed",...
Return a dict of { name => Completions } for the given tuple node. Enumerates all keys that are in scope in a given tuple. The node part of the tuple may be None, in case the binding is a built-in.
[ "Return", "a", "dict", "of", "{", "name", "=", ">", "Completions", "}", "for", "the", "given", "tuple", "node", "." ]
python
train
evhub/coconut
coconut/terminal.py
https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/terminal.py#L269-L282
def gather_parsing_stats(self): """Times parsing if --verbose.""" if self.verbose: start_time = time.clock() try: yield finally: elapsed_time = time.clock() - start_time printerr("Time while parsing:", elapsed_time, "seconds") if packrat_cache: hits, misses = ParserElement.packrat_cache_stats printerr("Packrat parsing stats:", hits, "hits;", misses, "misses") else: yield
[ "def", "gather_parsing_stats", "(", "self", ")", ":", "if", "self", ".", "verbose", ":", "start_time", "=", "time", ".", "clock", "(", ")", "try", ":", "yield", "finally", ":", "elapsed_time", "=", "time", ".", "clock", "(", ")", "-", "start_time", "pr...
Times parsing if --verbose.
[ "Times", "parsing", "if", "--", "verbose", "." ]
python
train