repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
libyal/dtfabric
dtfabric/runtime/runtime.py
https://github.com/libyal/dtfabric/blob/0d2b5719fa257f6e5c661a406737ebcf8c8db266/dtfabric/runtime/runtime.py#L172-L197
def CreateClass(cls, data_type_definition): """Creates a new structure values class. Args: data_type_definition (DataTypeDefinition): data type definition. Returns: class: structure values class. """ cls._ValidateDataTypeDefinition(data_type_definition) class_definition = cls._CreateClassTemplate(data_type_definition) namespace = { '__builtins__' : { 'object': builtins.object, 'super': builtins.super}, '__name__': '{0:s}'.format(data_type_definition.name)} if sys.version_info[0] >= 3: # pylint: disable=no-member namespace['__builtins__']['__build_class__'] = builtins.__build_class__ exec(class_definition, namespace) # pylint: disable=exec-used return namespace[data_type_definition.name]
[ "def", "CreateClass", "(", "cls", ",", "data_type_definition", ")", ":", "cls", ".", "_ValidateDataTypeDefinition", "(", "data_type_definition", ")", "class_definition", "=", "cls", ".", "_CreateClassTemplate", "(", "data_type_definition", ")", "namespace", "=", "{", ...
Creates a new structure values class. Args: data_type_definition (DataTypeDefinition): data type definition. Returns: class: structure values class.
[ "Creates", "a", "new", "structure", "values", "class", "." ]
python
train
biocore/burrito-fillings
bfillings/vsearch.py
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/vsearch.py#L175-L213
def _get_result_paths(self, data): """ Set the result paths """ result = {} result['Output'] = ResultPath( Path=self.Parameters['--output'].Value, IsWritten=self.Parameters['--output'].isOn()) result['ClusterFile'] = ResultPath( Path=self.Parameters['--uc'].Value, IsWritten=self.Parameters['--uc'].isOn()) # uchime 3-way global alignments result['Output_aln'] = ResultPath( Path=self.Parameters['--uchimealns'].Value, IsWritten=self.Parameters['--uchimealns'].isOn()) # uchime tab-separated format result['Output_tabular'] = ResultPath( Path=self.Parameters['--uchimeout'].Value, IsWritten=self.Parameters['--uchimeout'].isOn()) # chimeras fasta file output result['Output_chimeras'] = ResultPath( Path=self.Parameters['--chimeras'].Value, IsWritten=self.Parameters['--chimeras'].isOn()) # nonchimeras fasta file output result['Output_nonchimeras'] = ResultPath( Path=self.Parameters['--nonchimeras'].Value, IsWritten=self.Parameters['--nonchimeras'].isOn()) # log file result['LogFile'] = ResultPath( Path=self.Parameters['--log'].Value, IsWritten=self.Parameters['--log'].isOn()) return result
[ "def", "_get_result_paths", "(", "self", ",", "data", ")", ":", "result", "=", "{", "}", "result", "[", "'Output'", "]", "=", "ResultPath", "(", "Path", "=", "self", ".", "Parameters", "[", "'--output'", "]", ".", "Value", ",", "IsWritten", "=", "self"...
Set the result paths
[ "Set", "the", "result", "paths" ]
python
train
deepmind/pysc2
pysc2/env/host_remote_agent.py
https://github.com/deepmind/pysc2/blob/df4cc4b00f07a2242be9ba153d4a7f4ad2017897/pysc2/env/host_remote_agent.py#L208-L215
def close(self): """Shutdown and free all resources.""" if self._controller is not None: self._controller.quit() self._controller = None if self._process is not None: self._process.close() self._process = None
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "_controller", "is", "not", "None", ":", "self", ".", "_controller", ".", "quit", "(", ")", "self", ".", "_controller", "=", "None", "if", "self", ".", "_process", "is", "not", "None", ":", ...
Shutdown and free all resources.
[ "Shutdown", "and", "free", "all", "resources", "." ]
python
train
mbedmicro/pyOCD
pyocd/flash/loader.py
https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/flash/loader.py#L478-L515
def commit(self): """! @brief Write all collected data to flash. This routine ensures that chip erase is only used once if either the auto mode or chip erase mode are used. As an example, if two regions are to be written to and True was passed to the constructor for chip_erase (or if the session option was set), then only the first region will actually use chip erase. The second region will be forced to use sector erase. This will not result in extra erasing, as sector erase always verifies whether the sectors are already erased. This will, of course, also work correctly if the flash algorithm for the first region doesn't actually erase the entire chip (all regions). After calling this method, the loader instance can be reused to program more data. """ didChipErase = False perfList = [] # Iterate over builders we've created and program the data. for builder in sorted(self._builders.values(), key=lambda v: v.flash_start): # Determine this builder's portion of total progress. self._current_progress_fraction = builder.buffered_data_size / self._total_data_size # Program the data. chipErase = self._chip_erase if not didChipErase else False perf = builder.program(chip_erase=chipErase, progress_cb=self._progress_cb, smart_flash=self._smart_flash, fast_verify=self._trust_crc, keep_unwritten=self._keep_unwritten) perfList.append(perf) didChipErase = True self._progress_offset += self._current_progress_fraction # Report programming statistics. self._log_performance(perfList) # Clear state to allow reuse. self._reset_state()
[ "def", "commit", "(", "self", ")", ":", "didChipErase", "=", "False", "perfList", "=", "[", "]", "# Iterate over builders we've created and program the data.", "for", "builder", "in", "sorted", "(", "self", ".", "_builders", ".", "values", "(", ")", ",", "key", ...
! @brief Write all collected data to flash. This routine ensures that chip erase is only used once if either the auto mode or chip erase mode are used. As an example, if two regions are to be written to and True was passed to the constructor for chip_erase (or if the session option was set), then only the first region will actually use chip erase. The second region will be forced to use sector erase. This will not result in extra erasing, as sector erase always verifies whether the sectors are already erased. This will, of course, also work correctly if the flash algorithm for the first region doesn't actually erase the entire chip (all regions). After calling this method, the loader instance can be reused to program more data.
[ "!" ]
python
train
flatangle/flatlib
flatlib/predictives/primarydirections.py
https://github.com/flatangle/flatlib/blob/44e05b2991a296c678adbc17a1d51b6a21bc867c/flatlib/predictives/primarydirections.py#L187-L195
def N(self, ID, asp=0): """ Returns the conjunction or opposition aspect of an object. """ obj = self.chart.get(ID).copy() obj.relocate(obj.lon + asp) ID = 'N_%s_%s' % (ID, asp) return self.G(ID, obj.lat, obj.lon)
[ "def", "N", "(", "self", ",", "ID", ",", "asp", "=", "0", ")", ":", "obj", "=", "self", ".", "chart", ".", "get", "(", "ID", ")", ".", "copy", "(", ")", "obj", ".", "relocate", "(", "obj", ".", "lon", "+", "asp", ")", "ID", "=", "'N_%s_%s'"...
Returns the conjunction or opposition aspect of an object.
[ "Returns", "the", "conjunction", "or", "opposition", "aspect", "of", "an", "object", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/gui/controllers/state_editor/semantic_data_editor.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/state_editor/semantic_data_editor.py#L127-L136
def get_selected_object(self): """ Gets the selected object in the treeview :return: """ model, paths = self.tree_view.get_selection().get_selected_rows() if len(paths) == 1: return self.tree_store.get_iter(paths[0]), paths[0] else: return None, paths
[ "def", "get_selected_object", "(", "self", ")", ":", "model", ",", "paths", "=", "self", ".", "tree_view", ".", "get_selection", "(", ")", ".", "get_selected_rows", "(", ")", "if", "len", "(", "paths", ")", "==", "1", ":", "return", "self", ".", "tree_...
Gets the selected object in the treeview :return:
[ "Gets", "the", "selected", "object", "in", "the", "treeview" ]
python
train
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/util/event.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/util/event.py#L719-L729
def connect(self, callback, ref=False, position='first', before=None, after=None): """ Connect the callback to the event group. The callback will receive events from *all* of the emitters in the group. See :func:`EventEmitter.connect() <vispy.event.EventEmitter.connect>` for arguments. """ self._connect_emitters(True) return EventEmitter.connect(self, callback, ref, position, before, after)
[ "def", "connect", "(", "self", ",", "callback", ",", "ref", "=", "False", ",", "position", "=", "'first'", ",", "before", "=", "None", ",", "after", "=", "None", ")", ":", "self", ".", "_connect_emitters", "(", "True", ")", "return", "EventEmitter", "....
Connect the callback to the event group. The callback will receive events from *all* of the emitters in the group. See :func:`EventEmitter.connect() <vispy.event.EventEmitter.connect>` for arguments.
[ "Connect", "the", "callback", "to", "the", "event", "group", ".", "The", "callback", "will", "receive", "events", "from", "*", "all", "*", "of", "the", "emitters", "in", "the", "group", "." ]
python
train
SamLau95/nbinteract
nbinteract/cli.py
https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/nbinteract/cli.py#L360-L389
def expand_folder(notebook_or_folder, recursive=False): """ If notebook_or_folder is a folder, returns a list containing all notebooks in the folder. Otherwise, returns a list containing the notebook name. If recursive is True, recurses into subdirectories. """ is_file = os.path.isfile(notebook_or_folder) is_dir = os.path.isdir(notebook_or_folder) if not (is_file or is_dir): raise ValueError( '{} is neither an existing file nor a folder.' .format(notebook_or_folder) ) if is_file: return [notebook_or_folder] # Now we know the input is a directory if not recursive: return glob('{}/*.ipynb'.format(notebook_or_folder)) # Recursive case return [ os.path.join(folder, filename) for folder, _, filenames in os.walk(notebook_or_folder) # Skip folders that start with . if not os.path.basename(folder).startswith('.') for filename in fnmatch.filter(filenames, '*.ipynb') ]
[ "def", "expand_folder", "(", "notebook_or_folder", ",", "recursive", "=", "False", ")", ":", "is_file", "=", "os", ".", "path", ".", "isfile", "(", "notebook_or_folder", ")", "is_dir", "=", "os", ".", "path", ".", "isdir", "(", "notebook_or_folder", ")", "...
If notebook_or_folder is a folder, returns a list containing all notebooks in the folder. Otherwise, returns a list containing the notebook name. If recursive is True, recurses into subdirectories.
[ "If", "notebook_or_folder", "is", "a", "folder", "returns", "a", "list", "containing", "all", "notebooks", "in", "the", "folder", ".", "Otherwise", "returns", "a", "list", "containing", "the", "notebook", "name", "." ]
python
train
buildbot/buildbot
master/buildbot/worker/openstack.py
https://github.com/buildbot/buildbot/blob/5df3cfae6d760557d99156633c32b1822a1e130c/master/buildbot/worker/openstack.py#L106-L120
def _constructClient(client_version, username, user_domain, password, project_name, project_domain, auth_url): """Return a novaclient from the given args.""" loader = loading.get_plugin_loader('password') # These only work with v3 if user_domain is not None or project_domain is not None: auth = loader.load_from_options(auth_url=auth_url, username=username, user_domain_name=user_domain, password=password, project_name=project_name, project_domain_name=project_domain) else: auth = loader.load_from_options(auth_url=auth_url, username=username, password=password, project_name=project_name) sess = session.Session(auth=auth) return client.Client(client_version, session=sess)
[ "def", "_constructClient", "(", "client_version", ",", "username", ",", "user_domain", ",", "password", ",", "project_name", ",", "project_domain", ",", "auth_url", ")", ":", "loader", "=", "loading", ".", "get_plugin_loader", "(", "'password'", ")", "# These only...
Return a novaclient from the given args.
[ "Return", "a", "novaclient", "from", "the", "given", "args", "." ]
python
train
lokhman/pydbal
pydbal/connection.py
https://github.com/lokhman/pydbal/blob/53f396a2a18826e9fff178cd2c0636c1656cbaea/pydbal/connection.py#L402-L421
def set_auto_commit(self, auto_commit): """Sets auto-commit mode for this connection. If a connection is in auto-commit mode, then all its SQL statements will be executed and committed as individual transactions. Otherwise, its SQL statements are grouped into transactions that are terminated by a call to either the method commit or the method rollback. By default, new connections are in auto-commit mode. NOTE: If this method is called during a transaction and the auto-commit mode is changed, the transaction is committed. If this method is called and the auto-commit mode is not changed, the call is a no-op. :param auto_commit: `True` to enable auto-commit mode; `False` to disable it """ auto_commit = bool(auto_commit) if auto_commit == self._auto_commit: return self._auto_commit = auto_commit if self.is_connected() and self._transaction_nesting_level != 0: self.commit_all()
[ "def", "set_auto_commit", "(", "self", ",", "auto_commit", ")", ":", "auto_commit", "=", "bool", "(", "auto_commit", ")", "if", "auto_commit", "==", "self", ".", "_auto_commit", ":", "return", "self", ".", "_auto_commit", "=", "auto_commit", "if", "self", "....
Sets auto-commit mode for this connection. If a connection is in auto-commit mode, then all its SQL statements will be executed and committed as individual transactions. Otherwise, its SQL statements are grouped into transactions that are terminated by a call to either the method commit or the method rollback. By default, new connections are in auto-commit mode. NOTE: If this method is called during a transaction and the auto-commit mode is changed, the transaction is committed. If this method is called and the auto-commit mode is not changed, the call is a no-op. :param auto_commit: `True` to enable auto-commit mode; `False` to disable it
[ "Sets", "auto", "-", "commit", "mode", "for", "this", "connection", "." ]
python
train
assemblerflow/flowcraft
flowcraft/templates/assembly_report.py
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/assembly_report.py#L143-L181
def _parse_assembly(self, assembly_file): """Parse an assembly file in fasta format. This is a Fasta parsing method that populates the :py:attr:`Assembly.contigs` attribute with data for each contig in the assembly. Parameters ---------- assembly_file : str Path to the assembly fasta file. """ with open(assembly_file) as fh: header = None logger.debug("Starting iteration of assembly file: {}".format( assembly_file)) for line in fh: # Skip empty lines if not line.strip(): continue if line.startswith(">"): # Add contig header to contig dictionary header = line[1:].strip() self.contigs[header] = [] else: # Add sequence string for the current contig self.contigs[header].append(line.strip()) # After populating the contigs dictionary, convert the values # list into a string sequence self.contigs = OrderedDict( (header, "".join(seq)) for header, seq in self.contigs.items())
[ "def", "_parse_assembly", "(", "self", ",", "assembly_file", ")", ":", "with", "open", "(", "assembly_file", ")", "as", "fh", ":", "header", "=", "None", "logger", ".", "debug", "(", "\"Starting iteration of assembly file: {}\"", ".", "format", "(", "assembly_fi...
Parse an assembly file in fasta format. This is a Fasta parsing method that populates the :py:attr:`Assembly.contigs` attribute with data for each contig in the assembly. Parameters ---------- assembly_file : str Path to the assembly fasta file.
[ "Parse", "an", "assembly", "file", "in", "fasta", "format", "." ]
python
test
COLORFULBOARD/revision
revision/client.py
https://github.com/COLORFULBOARD/revision/blob/2f22e72cce5b60032a80c002ac45c2ecef0ed987/revision/client.py#L149-L162
def filename(self): """ :return: :rtype: str """ filename = self.key if self.has_revision_file() and self.history.current_revision: filename += "-" filename += self.history.current_revision.revision_id filename += ".zip" return filename
[ "def", "filename", "(", "self", ")", ":", "filename", "=", "self", ".", "key", "if", "self", ".", "has_revision_file", "(", ")", "and", "self", ".", "history", ".", "current_revision", ":", "filename", "+=", "\"-\"", "filename", "+=", "self", ".", "histo...
:return: :rtype: str
[ ":", "return", ":", ":", "rtype", ":", "str" ]
python
train
inveniosoftware-contrib/invenio-groups
invenio_groups/models.py
https://github.com/inveniosoftware-contrib/invenio-groups/blob/109481d6b02701db00b72223dd4a65e167c589a6/invenio_groups/models.py#L581-L586
def query_invitations(cls, user, eager=False): """Get all invitations for given user.""" if eager: eager = [Membership.group] return cls.query_by_user(user, state=MembershipState.PENDING_USER, eager=eager)
[ "def", "query_invitations", "(", "cls", ",", "user", ",", "eager", "=", "False", ")", ":", "if", "eager", ":", "eager", "=", "[", "Membership", ".", "group", "]", "return", "cls", ".", "query_by_user", "(", "user", ",", "state", "=", "MembershipState", ...
Get all invitations for given user.
[ "Get", "all", "invitations", "for", "given", "user", "." ]
python
valid
kxepal/viivakoodi
barcode/base.py
https://github.com/kxepal/viivakoodi/blob/79f5e866465f481982f9870c31f49a815e921c28/barcode/base.py#L87-L103
def render(self, writer_options=None): """Renders the barcode using `self.writer`. :parameters: writer_options : Dict Options for `self.writer`, see writer docs for details. :returns: Output of the writers render method. """ options = Barcode.default_writer_options.copy() options.update(writer_options or {}) if options['write_text']: options['text'] = self.get_fullcode() self.writer.set_options(options) code = self.build() raw = Barcode.raw = self.writer.render(code) return raw
[ "def", "render", "(", "self", ",", "writer_options", "=", "None", ")", ":", "options", "=", "Barcode", ".", "default_writer_options", ".", "copy", "(", ")", "options", ".", "update", "(", "writer_options", "or", "{", "}", ")", "if", "options", "[", "'wri...
Renders the barcode using `self.writer`. :parameters: writer_options : Dict Options for `self.writer`, see writer docs for details. :returns: Output of the writers render method.
[ "Renders", "the", "barcode", "using", "self", ".", "writer", "." ]
python
test
rackerlabs/simpl
simpl/git.py
https://github.com/rackerlabs/simpl/blob/60ed3336a931cd6a7a7246e60f26165d9dc7c99c/simpl/git.py#L262-L288
def git_ls_remote(repo_dir, remote='origin', refs=None): """Run git ls-remote. 'remote' can be a remote ref in a local repo, e.g. origin, or url of a remote repository. Return format: .. code-block:: python {<ref1>: <commit_hash1>, <ref2>: <commit_hash2>, ..., <refN>: <commit_hashN>, } """ command = ['git', 'ls-remote', pipes.quote(remote)] if refs: if isinstance(refs, list): command.extend(refs) else: command.append(refs) raw = execute_git_command(command, repo_dir=repo_dir).splitlines() output = [l.strip() for l in raw if l.strip() and not l.strip().lower().startswith('from ')] return {ref: commit_hash for commit_hash, ref in [l.split(None, 1) for l in output]}
[ "def", "git_ls_remote", "(", "repo_dir", ",", "remote", "=", "'origin'", ",", "refs", "=", "None", ")", ":", "command", "=", "[", "'git'", ",", "'ls-remote'", ",", "pipes", ".", "quote", "(", "remote", ")", "]", "if", "refs", ":", "if", "isinstance", ...
Run git ls-remote. 'remote' can be a remote ref in a local repo, e.g. origin, or url of a remote repository. Return format: .. code-block:: python {<ref1>: <commit_hash1>, <ref2>: <commit_hash2>, ..., <refN>: <commit_hashN>, }
[ "Run", "git", "ls", "-", "remote", "." ]
python
train
waqasbhatti/astrobase
astrobase/periodbase/zgls.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/periodbase/zgls.py#L269-L351
def generalized_lsp_value_notau(times, mags, errs, omega): ''' This is the simplified version not using tau. The relations used are:: W = sum (1.0/(errs*errs) ) w_i = (1/W)*(1/(errs*errs)) Y = sum( w_i*y_i ) C = sum( w_i*cos(wt_i) ) S = sum( w_i*sin(wt_i) ) YY = sum( w_i*y_i*y_i ) - Y*Y YC = sum( w_i*y_i*cos(wt_i) ) - Y*C YS = sum( w_i*y_i*sin(wt_i) ) - Y*S CpC = sum( w_i*cos(w_t_i)*cos(w_t_i) ) CC = CpC - C*C SS = (1 - CpC) - S*S CS = sum( w_i*cos(w_t_i)*sin(w_t_i) ) - C*S D(omega) = CC*SS - CS*CS P(omega) = (SS*YC*YC + CC*YS*YS - 2.0*CS*YC*YS)/(YY*D) Parameters ---------- times,mags,errs : np.array The time-series to calculate the periodogram value for. omega : float The frequency to calculate the periodogram value at. Returns ------- periodogramvalue : float The normalized periodogram at the specified test frequency `omega`. ''' one_over_errs2 = 1.0/(errs*errs) W = npsum(one_over_errs2) wi = one_over_errs2/W sin_omegat = npsin(omega*times) cos_omegat = npcos(omega*times) sin2_omegat = sin_omegat*sin_omegat cos2_omegat = cos_omegat*cos_omegat sincos_omegat = sin_omegat*cos_omegat # calculate some more sums and terms Y = npsum( wi*mags ) C = npsum( wi*cos_omegat ) S = npsum( wi*sin_omegat ) YpY = npsum( wi*mags*mags) YpC = npsum( wi*mags*cos_omegat ) YpS = npsum( wi*mags*sin_omegat ) CpC = npsum( wi*cos2_omegat ) # SpS = npsum( wi*sin2_omegat ) CpS = npsum( wi*sincos_omegat ) # the final terms YY = YpY - Y*Y YC = YpC - Y*C YS = YpS - Y*S CC = CpC - C*C SS = 1 - CpC - S*S # use SpS = 1 - CpC CS = CpS - C*S # P(omega) = (SS*YC*YC + CC*YS*YS - 2.0*CS*YC*YS)/(YY*D) # D(omega) = CC*SS - CS*CS Domega = CC*SS - CS*CS lspval = (SS*YC*YC + CC*YS*YS - 2.0*CS*YC*YS)/(YY*Domega) return lspval
[ "def", "generalized_lsp_value_notau", "(", "times", ",", "mags", ",", "errs", ",", "omega", ")", ":", "one_over_errs2", "=", "1.0", "/", "(", "errs", "*", "errs", ")", "W", "=", "npsum", "(", "one_over_errs2", ")", "wi", "=", "one_over_errs2", "/", "W", ...
This is the simplified version not using tau. The relations used are:: W = sum (1.0/(errs*errs) ) w_i = (1/W)*(1/(errs*errs)) Y = sum( w_i*y_i ) C = sum( w_i*cos(wt_i) ) S = sum( w_i*sin(wt_i) ) YY = sum( w_i*y_i*y_i ) - Y*Y YC = sum( w_i*y_i*cos(wt_i) ) - Y*C YS = sum( w_i*y_i*sin(wt_i) ) - Y*S CpC = sum( w_i*cos(w_t_i)*cos(w_t_i) ) CC = CpC - C*C SS = (1 - CpC) - S*S CS = sum( w_i*cos(w_t_i)*sin(w_t_i) ) - C*S D(omega) = CC*SS - CS*CS P(omega) = (SS*YC*YC + CC*YS*YS - 2.0*CS*YC*YS)/(YY*D) Parameters ---------- times,mags,errs : np.array The time-series to calculate the periodogram value for. omega : float The frequency to calculate the periodogram value at. Returns ------- periodogramvalue : float The normalized periodogram at the specified test frequency `omega`.
[ "This", "is", "the", "simplified", "version", "not", "using", "tau", "." ]
python
valid
ucfopen/canvasapi
canvasapi/upload.py
https://github.com/ucfopen/canvasapi/blob/319064b5fc97ba54250af683eb98723ef3f76cf8/canvasapi/upload.py#L72-L103
def upload(self, response, file): """ Upload the file. :param response: The response from the upload request. :type response: dict :param file: A file handler pointing to the file to upload. :returns: True if the file uploaded successfully, False otherwise, \ and the JSON response from the API. :rtype: tuple """ response = response.json() if not response.get('upload_url'): raise ValueError('Bad API response. No upload_url.') if not response.get('upload_params'): raise ValueError('Bad API response. No upload_params.') kwargs = response.get('upload_params') response = self._requester.request( 'POST', use_auth=False, _url=response.get('upload_url'), file=file, _kwargs=combine_kwargs(**kwargs) ) # remove `while(1);` that may appear at the top of a response response_json = json.loads(response.text.lstrip('while(1);')) return ('url' in response_json, response_json)
[ "def", "upload", "(", "self", ",", "response", ",", "file", ")", ":", "response", "=", "response", ".", "json", "(", ")", "if", "not", "response", ".", "get", "(", "'upload_url'", ")", ":", "raise", "ValueError", "(", "'Bad API response. No upload_url.'", ...
Upload the file. :param response: The response from the upload request. :type response: dict :param file: A file handler pointing to the file to upload. :returns: True if the file uploaded successfully, False otherwise, \ and the JSON response from the API. :rtype: tuple
[ "Upload", "the", "file", "." ]
python
train
LonamiWebs/Telethon
telethon/network/mtprotosender.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/network/mtprotosender.py#L484-L523
async def _handle_rpc_result(self, message): """ Handles the result for Remote Procedure Calls: rpc_result#f35c6d01 req_msg_id:long result:bytes = RpcResult; This is where the future results for sent requests are set. """ rpc_result = message.obj state = self._pending_state.pop(rpc_result.req_msg_id, None) self._log.debug('Handling RPC result for message %d', rpc_result.req_msg_id) if not state: # TODO We should not get responses to things we never sent # However receiving a File() with empty bytes is "common". # See #658, #759 and #958. They seem to happen in a container # which contain the real response right after. try: with BinaryReader(rpc_result.body) as reader: if not isinstance(reader.tgread_object(), upload.File): raise ValueError('Not an upload.File') except (TypeNotFoundError, ValueError): self._log.info('Received response without parent request: {}' .format(rpc_result.body)) return if rpc_result.error: error = rpc_message_to_error(rpc_result.error, state.request) self._send_queue.append( RequestState(MsgsAck([state.msg_id]), loop=self._loop)) if not state.future.cancelled(): state.future.set_exception(error) else: with BinaryReader(rpc_result.body) as reader: result = state.request.read_result(reader) if not state.future.cancelled(): state.future.set_result(result)
[ "async", "def", "_handle_rpc_result", "(", "self", ",", "message", ")", ":", "rpc_result", "=", "message", ".", "obj", "state", "=", "self", ".", "_pending_state", ".", "pop", "(", "rpc_result", ".", "req_msg_id", ",", "None", ")", "self", ".", "_log", "...
Handles the result for Remote Procedure Calls: rpc_result#f35c6d01 req_msg_id:long result:bytes = RpcResult; This is where the future results for sent requests are set.
[ "Handles", "the", "result", "for", "Remote", "Procedure", "Calls", ":" ]
python
train
singingwolfboy/flask-dance
flask_dance/contrib/gitlab.py
https://github.com/singingwolfboy/flask-dance/blob/87d45328bbdaff833559a6d3da71461fe4579592/flask_dance/contrib/gitlab.py#L16-L86
def make_gitlab_blueprint( client_id=None, client_secret=None, scope=None, redirect_url=None, redirect_to=None, login_url=None, authorized_url=None, session_class=None, storage=None, hostname="gitlab.com", ): """ Make a blueprint for authenticating with GitLab using OAuth 2. This requires a client ID and client secret from GitLab. You should either pass them to this constructor, or make sure that your Flask application config defines them, using the variables :envvar:`GITLAB_OAUTH_CLIENT_ID` and :envvar:`GITLAB_OAUTH_CLIENT_SECRET`. Args: client_id (str): The client ID for your application on GitLab. client_secret (str): The client secret for your application on GitLab scope (str, optional): comma-separated list of scopes for the OAuth token redirect_url (str): the URL to redirect to after the authentication dance is complete redirect_to (str): if ``redirect_url`` is not defined, the name of the view to redirect to after the authentication dance is complete. The actual URL will be determined by :func:`flask.url_for` login_url (str, optional): the URL path for the ``login`` view. Defaults to ``/gitlab`` authorized_url (str, optional): the URL path for the ``authorized`` view. Defaults to ``/gitlab/authorized``. session_class (class, optional): The class to use for creating a Requests session. Defaults to :class:`~flask_dance.consumer.requests.OAuth2Session`. storage: A token storage class, or an instance of a token storage class, to use for this blueprint. Defaults to :class:`~flask_dance.consumer.storage.session.SessionStorage`. hostname (str, optional): If using a private instance of GitLab CE/EE, specify the hostname, default is ``gitlab.com`` :rtype: :class:`~flask_dance.consumer.OAuth2ConsumerBlueprint` :returns: A :ref:`blueprint <flask:blueprints>` to attach to your Flask app. """ gitlab_bp = OAuth2ConsumerBlueprint( "gitlab", __name__, client_id=client_id, client_secret=client_secret, scope=scope, base_url="https://{hostname}/api/v4/".format(hostname=hostname), authorization_url="https://{hostname}/oauth/authorize".format( hostname=hostname ), token_url="https://{hostname}/oauth/token".format(hostname=hostname), redirect_url=redirect_url, redirect_to=redirect_to, login_url=login_url, authorized_url=authorized_url, session_class=session_class, storage=storage, ) gitlab_bp.from_config["client_id"] = "GITLAB_OAUTH_CLIENT_ID" gitlab_bp.from_config["client_secret"] = "GITLAB_OAUTH_CLIENT_SECRET" @gitlab_bp.before_app_request def set_applocal_session(): ctx = stack.top ctx.gitlab_oauth = gitlab_bp.session return gitlab_bp
[ "def", "make_gitlab_blueprint", "(", "client_id", "=", "None", ",", "client_secret", "=", "None", ",", "scope", "=", "None", ",", "redirect_url", "=", "None", ",", "redirect_to", "=", "None", ",", "login_url", "=", "None", ",", "authorized_url", "=", "None",...
Make a blueprint for authenticating with GitLab using OAuth 2. This requires a client ID and client secret from GitLab. You should either pass them to this constructor, or make sure that your Flask application config defines them, using the variables :envvar:`GITLAB_OAUTH_CLIENT_ID` and :envvar:`GITLAB_OAUTH_CLIENT_SECRET`. Args: client_id (str): The client ID for your application on GitLab. client_secret (str): The client secret for your application on GitLab scope (str, optional): comma-separated list of scopes for the OAuth token redirect_url (str): the URL to redirect to after the authentication dance is complete redirect_to (str): if ``redirect_url`` is not defined, the name of the view to redirect to after the authentication dance is complete. The actual URL will be determined by :func:`flask.url_for` login_url (str, optional): the URL path for the ``login`` view. Defaults to ``/gitlab`` authorized_url (str, optional): the URL path for the ``authorized`` view. Defaults to ``/gitlab/authorized``. session_class (class, optional): The class to use for creating a Requests session. Defaults to :class:`~flask_dance.consumer.requests.OAuth2Session`. storage: A token storage class, or an instance of a token storage class, to use for this blueprint. Defaults to :class:`~flask_dance.consumer.storage.session.SessionStorage`. hostname (str, optional): If using a private instance of GitLab CE/EE, specify the hostname, default is ``gitlab.com`` :rtype: :class:`~flask_dance.consumer.OAuth2ConsumerBlueprint` :returns: A :ref:`blueprint <flask:blueprints>` to attach to your Flask app.
[ "Make", "a", "blueprint", "for", "authenticating", "with", "GitLab", "using", "OAuth", "2", ".", "This", "requires", "a", "client", "ID", "and", "client", "secret", "from", "GitLab", ".", "You", "should", "either", "pass", "them", "to", "this", "constructor"...
python
train
sdss/sdss_access
python/sdss_access/path/path.py
https://github.com/sdss/sdss_access/blob/76375bbf37d39d2e4ccbed90bdfa9a4298784470/python/sdss_access/path/path.py#L681-L700
def spectrodir(self, filetype, **kwargs): """Returns :envvar:`SPECTRO_REDUX` or :envvar:`BOSS_SPECTRO_REDUX` depending on the value of `run2d`. Parameters ---------- filetype : str File type parameter. run2d : int or str 2D Reduction ID. Returns ------- spectrodir : str Value of the appropriate environment variable. """ if str(kwargs['run2d']) in ('26', '103', '104'): return os.environ['SPECTRO_REDUX'] else: return os.environ['BOSS_SPECTRO_REDUX']
[ "def", "spectrodir", "(", "self", ",", "filetype", ",", "*", "*", "kwargs", ")", ":", "if", "str", "(", "kwargs", "[", "'run2d'", "]", ")", "in", "(", "'26'", ",", "'103'", ",", "'104'", ")", ":", "return", "os", ".", "environ", "[", "'SPECTRO_REDU...
Returns :envvar:`SPECTRO_REDUX` or :envvar:`BOSS_SPECTRO_REDUX` depending on the value of `run2d`. Parameters ---------- filetype : str File type parameter. run2d : int or str 2D Reduction ID. Returns ------- spectrodir : str Value of the appropriate environment variable.
[ "Returns", ":", "envvar", ":", "SPECTRO_REDUX", "or", ":", "envvar", ":", "BOSS_SPECTRO_REDUX", "depending", "on", "the", "value", "of", "run2d", "." ]
python
train
iotile/coretools
iotilesensorgraph/iotile/sg/sensor_log.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilesensorgraph/iotile/sg/sensor_log.py#L207-L242
def create_walker(self, selector, skip_all=True): """Create a stream walker based on the given selector. This function returns a StreamWalker subclass that will remain up to date and allow iterating over and popping readings from the stream(s) specified by the selector. When the stream walker is done, it should be passed to destroy_walker so that it is removed from internal lists that are used to always keep it in sync. Args: selector (DataStreamSelector): The selector describing the streams that we want to iterate over. skip_all (bool): Whether to start at the beginning of the data or to skip everything and start at the end. Defaults to skipping everything. This parameter only has any effect on buffered stream selectors. Returns: StreamWalker: A properly updating stream walker with the given selector. """ if selector.buffered: walker = BufferedStreamWalker(selector, self._engine, skip_all=skip_all) self._queue_walkers.append(walker) return walker if selector.match_type == DataStream.CounterType: walker = CounterStreamWalker(selector) else: walker = VirtualStreamWalker(selector) self._virtual_walkers.append(walker) return walker
[ "def", "create_walker", "(", "self", ",", "selector", ",", "skip_all", "=", "True", ")", ":", "if", "selector", ".", "buffered", ":", "walker", "=", "BufferedStreamWalker", "(", "selector", ",", "self", ".", "_engine", ",", "skip_all", "=", "skip_all", ")"...
Create a stream walker based on the given selector. This function returns a StreamWalker subclass that will remain up to date and allow iterating over and popping readings from the stream(s) specified by the selector. When the stream walker is done, it should be passed to destroy_walker so that it is removed from internal lists that are used to always keep it in sync. Args: selector (DataStreamSelector): The selector describing the streams that we want to iterate over. skip_all (bool): Whether to start at the beginning of the data or to skip everything and start at the end. Defaults to skipping everything. This parameter only has any effect on buffered stream selectors. Returns: StreamWalker: A properly updating stream walker with the given selector.
[ "Create", "a", "stream", "walker", "based", "on", "the", "given", "selector", "." ]
python
train
BernardFW/bernard
src/bernard/misc/start_project/_base.py
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/misc/start_project/_base.py#L71-L76
def make_file_path(project_dir, project_name, root, name): """ Generates the target path for a file """ return path.join(make_dir_path(project_dir, root, project_name), name)
[ "def", "make_file_path", "(", "project_dir", ",", "project_name", ",", "root", ",", "name", ")", ":", "return", "path", ".", "join", "(", "make_dir_path", "(", "project_dir", ",", "root", ",", "project_name", ")", ",", "name", ")" ]
Generates the target path for a file
[ "Generates", "the", "target", "path", "for", "a", "file" ]
python
train
simon-anders/htseq
python3/HTSeq/__init__.py
https://github.com/simon-anders/htseq/blob/6f7d66e757e610228c33ebf2bb5dc8cc5051c7f0/python3/HTSeq/__init__.py#L742-L824
def pair_SAM_alignments_with_buffer( alignments, max_buffer_size=30000000, primary_only=False): '''Iterate over SAM aligments with buffer, position-sorted paired-end Args: alignments (iterator of SAM/BAM alignments): the alignments to wrap max_buffer_size (int): maxmal numer of alignments to keep in memory. primary_only (bool): for each read, consider only the primary line (SAM flag 0x900 = 0). The SAM specification requires one and only one of those for each read. Yields: 2-tuples with each pair of alignments. ''' almnt_buffer = {} ambiguous_pairing_counter = 0 for almnt in alignments: if not almnt.paired_end: raise ValueError( "Sequence of paired-end alignments expected, but got single-end alignment.") if almnt.pe_which == "unknown": raise ValueError( "Cannot process paired-end alignment found with 'unknown' 'pe_which' status.") # FIXME: almnt.not_primary_alignment currently means secondary if primary_only and (almnt.not_primary_alignment or almnt.supplementary): continue matekey = ( almnt.read.name, "second" if almnt.pe_which == "first" else "first", almnt.mate_start.chrom if almnt.mate_aligned else None, almnt.mate_start.pos if almnt.mate_aligned else None, almnt.iv.chrom if almnt.aligned else None, almnt.iv.start if almnt.aligned else None, -almnt.inferred_insert_size if almnt.aligned and almnt.mate_aligned else None) if matekey in almnt_buffer: if len(almnt_buffer[matekey]) == 1: mate = almnt_buffer[matekey][0] del almnt_buffer[matekey] else: mate = almnt_buffer[matekey].pop(0) if ambiguous_pairing_counter == 0: ambiguous_pairing_first_occurance = matekey ambiguous_pairing_counter += 1 if almnt.pe_which == "first": yield (almnt, mate) else: yield (mate, almnt) else: almntkey = ( almnt.read.name, almnt.pe_which, almnt.iv.chrom if almnt.aligned else None, almnt.iv.start if almnt.aligned else None, almnt.mate_start.chrom if almnt.mate_aligned else None, almnt.mate_start.pos if almnt.mate_aligned else None, almnt.inferred_insert_size if almnt.aligned and almnt.mate_aligned else None) if almntkey not in almnt_buffer: almnt_buffer[almntkey] = [almnt] else: almnt_buffer[almntkey].append(almnt) if len(almnt_buffer) > max_buffer_size: raise ValueError( "Maximum alignment buffer size exceeded while pairing SAM alignments.") if len(almnt_buffer) > 0: warnings.warn( "Mate records missing for %d records; first such record: %s." % (len(almnt_buffer), str(list(almnt_buffer.values())[0][0]))) for almnt_list in list(almnt_buffer.values()): for almnt in almnt_list: if almnt.pe_which == "first": yield (almnt, None) else: yield (None, almnt) if ambiguous_pairing_counter > 0: warnings.warn( "Mate pairing was ambiguous for %d records; mate key for first such record: %s." % (ambiguous_pairing_counter, str(ambiguous_pairing_first_occurance)))
[ "def", "pair_SAM_alignments_with_buffer", "(", "alignments", ",", "max_buffer_size", "=", "30000000", ",", "primary_only", "=", "False", ")", ":", "almnt_buffer", "=", "{", "}", "ambiguous_pairing_counter", "=", "0", "for", "almnt", "in", "alignments", ":", "if", ...
Iterate over SAM aligments with buffer, position-sorted paired-end Args: alignments (iterator of SAM/BAM alignments): the alignments to wrap max_buffer_size (int): maxmal numer of alignments to keep in memory. primary_only (bool): for each read, consider only the primary line (SAM flag 0x900 = 0). The SAM specification requires one and only one of those for each read. Yields: 2-tuples with each pair of alignments.
[ "Iterate", "over", "SAM", "aligments", "with", "buffer", "position", "-", "sorted", "paired", "-", "end" ]
python
train
python-diamond/Diamond
src/diamond/handler/signalfx.py
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/diamond/handler/signalfx.py#L103-L110
def process(self, metric): """ Queue a metric. Flushing queue if batch size reached """ if self._match_metric(metric): self.metrics.append(metric) if self.should_flush(): self._send()
[ "def", "process", "(", "self", ",", "metric", ")", ":", "if", "self", ".", "_match_metric", "(", "metric", ")", ":", "self", ".", "metrics", ".", "append", "(", "metric", ")", "if", "self", ".", "should_flush", "(", ")", ":", "self", ".", "_send", ...
Queue a metric. Flushing queue if batch size reached
[ "Queue", "a", "metric", ".", "Flushing", "queue", "if", "batch", "size", "reached" ]
python
train
ryan-roemer/django-cloud-browser
cloud_browser/cloud/base.py
https://github.com/ryan-roemer/django-cloud-browser/blob/b06cdd24885a6309e843ed924dbf1705b67e7f48/cloud_browser/cloud/base.py#L82-L91
def smart_content_encoding(self): """Smart content encoding.""" encoding = self.content_encoding if not encoding: base_list = self.basename.split('.') while (not encoding) and len(base_list) > 1: _, encoding = mimetypes.guess_type('.'.join(base_list)) base_list.pop() return encoding
[ "def", "smart_content_encoding", "(", "self", ")", ":", "encoding", "=", "self", ".", "content_encoding", "if", "not", "encoding", ":", "base_list", "=", "self", ".", "basename", ".", "split", "(", "'.'", ")", "while", "(", "not", "encoding", ")", "and", ...
Smart content encoding.
[ "Smart", "content", "encoding", "." ]
python
train
dh1tw/pyhamtools
pyhamtools/lookuplib.py
https://github.com/dh1tw/pyhamtools/blob/ee7e4b8732e23c298da10e07163748156c16d0fa/pyhamtools/lookuplib.py#L453-L482
def _check_inv_operation_for_date(self, item, timestamp, data_dict, data_index_dict): """ Checks if the callsign is marked as an invalid operation for a given timestamp. In case the operation is invalid, True is returned. Otherwise a KeyError is raised. """ if item in data_index_dict: for item in data_index_dict[item]: # startdate < timestamp if const.START in data_dict[item] and not const.END in data_dict[item]: if data_dict[item][const.START] < timestamp: return True # enddate > timestamp elif not const.START in data_dict[item] and const.END in data_dict[item]: if data_dict[item][const.END] > timestamp: return True # startdate > timestamp > enddate elif const.START in data_dict[item] and const.END in data_dict[item]: if data_dict[item][const.START] < timestamp \ and data_dict[item][const.END] > timestamp: return True # no startdate or enddate available elif not const.START in data_dict[item] and not const.END in data_dict[item]: return True raise KeyError
[ "def", "_check_inv_operation_for_date", "(", "self", ",", "item", ",", "timestamp", ",", "data_dict", ",", "data_index_dict", ")", ":", "if", "item", "in", "data_index_dict", ":", "for", "item", "in", "data_index_dict", "[", "item", "]", ":", "# startdate < time...
Checks if the callsign is marked as an invalid operation for a given timestamp. In case the operation is invalid, True is returned. Otherwise a KeyError is raised.
[ "Checks", "if", "the", "callsign", "is", "marked", "as", "an", "invalid", "operation", "for", "a", "given", "timestamp", ".", "In", "case", "the", "operation", "is", "invalid", "True", "is", "returned", ".", "Otherwise", "a", "KeyError", "is", "raised", "....
python
train
openearth/bmi-python
bmi/wrapper.py
https://github.com/openearth/bmi-python/blob/2f53f24d45515eb0711c2d28ddd6c1582045248f/bmi/wrapper.py#L328-L362
def initialize(self, configfile=None): """Initialize and load the Fortran library (and model, if applicable). The Fortran library is loaded and ctypes is used to annotate functions inside the library. The Fortran library's initialization is called. Normally a path to an ``*.ini`` model file is passed to the :meth:`__init__`. If so, that model is loaded. Note that :meth:`_load_model` changes the working directory to that of the model. """ if configfile is not None: self.configfile = configfile try: self.configfile except AttributeError: raise ValueError("Specify configfile during construction or during initialize") abs_name = os.path.abspath(self.configfile) os.chdir(os.path.dirname(self.configfile) or '.') logmsg = "Loading model {} in directory {}".format( self.configfile, os.path.abspath(os.getcwd()) ) logger.info(logmsg) # Fortran init function. self.library.initialize.argtypes = [c_char_p] self.library.initialize.restype = None # initialize by abs_name because we already chdirred # if configfile is a relative path we would have a problem ierr = wrap(self.library.initialize)(abs_name) if ierr: errormsg = "Loading model {config} failed with exit code {code}" raise RuntimeError(errormsg.format(config=self.configfile, code=ierr))
[ "def", "initialize", "(", "self", ",", "configfile", "=", "None", ")", ":", "if", "configfile", "is", "not", "None", ":", "self", ".", "configfile", "=", "configfile", "try", ":", "self", ".", "configfile", "except", "AttributeError", ":", "raise", "ValueE...
Initialize and load the Fortran library (and model, if applicable). The Fortran library is loaded and ctypes is used to annotate functions inside the library. The Fortran library's initialization is called. Normally a path to an ``*.ini`` model file is passed to the :meth:`__init__`. If so, that model is loaded. Note that :meth:`_load_model` changes the working directory to that of the model.
[ "Initialize", "and", "load", "the", "Fortran", "library", "(", "and", "model", "if", "applicable", ")", "." ]
python
train
saltstack/salt
salt/fileserver/s3fs.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/s3fs.py#L348-L368
def _init(): ''' Connect to S3 and download the metadata for each file in all buckets specified and cache the data to disk. ''' cache_file = _get_buckets_cache_filename() exp = time.time() - S3_CACHE_EXPIRE # check mtime of the buckets files cache metadata = None try: if os.path.getmtime(cache_file) > exp: metadata = _read_buckets_cache_file(cache_file) except OSError: pass if metadata is None: # bucket files cache expired or does not exist metadata = _refresh_buckets_cache_file(cache_file) return metadata
[ "def", "_init", "(", ")", ":", "cache_file", "=", "_get_buckets_cache_filename", "(", ")", "exp", "=", "time", ".", "time", "(", ")", "-", "S3_CACHE_EXPIRE", "# check mtime of the buckets files cache", "metadata", "=", "None", "try", ":", "if", "os", ".", "pat...
Connect to S3 and download the metadata for each file in all buckets specified and cache the data to disk.
[ "Connect", "to", "S3", "and", "download", "the", "metadata", "for", "each", "file", "in", "all", "buckets", "specified", "and", "cache", "the", "data", "to", "disk", "." ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L8636-L8645
def system_time_send(self, time_unix_usec, time_boot_ms, force_mavlink1=False): ''' The system time is the time of the master clock, typically the computer clock of the main onboard computer. time_unix_usec : Timestamp of the master clock in microseconds since UNIX epoch. (uint64_t) time_boot_ms : Timestamp of the component clock since boot time in milliseconds. (uint32_t) ''' return self.send(self.system_time_encode(time_unix_usec, time_boot_ms), force_mavlink1=force_mavlink1)
[ "def", "system_time_send", "(", "self", ",", "time_unix_usec", ",", "time_boot_ms", ",", "force_mavlink1", "=", "False", ")", ":", "return", "self", ".", "send", "(", "self", ".", "system_time_encode", "(", "time_unix_usec", ",", "time_boot_ms", ")", ",", "for...
The system time is the time of the master clock, typically the computer clock of the main onboard computer. time_unix_usec : Timestamp of the master clock in microseconds since UNIX epoch. (uint64_t) time_boot_ms : Timestamp of the component clock since boot time in milliseconds. (uint32_t)
[ "The", "system", "time", "is", "the", "time", "of", "the", "master", "clock", "typically", "the", "computer", "clock", "of", "the", "main", "onboard", "computer", "." ]
python
train
mapbox/mapbox-sdk-py
mapbox/services/directions.py
https://github.com/mapbox/mapbox-sdk-py/blob/72d19dbcf2d254a6ea08129a726471fd21f13023/mapbox/services/directions.py#L143-L249
def directions(self, features, profile='mapbox/driving', alternatives=None, geometries=None, overview=None, steps=None, continue_straight=None, waypoint_snapping=None, annotations=None, language=None, **kwargs): """Request directions for waypoints encoded as GeoJSON features. Parameters ---------- features : iterable An collection of GeoJSON features profile : str Name of a Mapbox profile such as 'mapbox.driving' alternatives : bool Whether to try to return alternative routes, default: False geometries : string Type of geometry returned (geojson, polyline, polyline6) overview : string or False Type of returned overview geometry: 'full', 'simplified', or False steps : bool Whether to return steps and turn-by-turn instructions, default: False continue_straight : bool Direction of travel when departing intermediate waypoints radiuses : iterable of numbers or 'unlimited' Must be same length as features waypoint_snapping : list Controls snapping of waypoints The list is zipped with the features collection and must have the same length. Elements of the list must be one of: - A number (interpretted as a snapping radius) - The string 'unlimited' (unlimited snapping radius) - A 3-element tuple consisting of (radius, angle, range) - None (no snapping parameters specified for that waypoint) annotations : str Whether or not to return additional metadata along the route Possible values are: 'duration', 'distance', 'speed', and 'congestion'. Several annotations can be used by joining them with ','. language : str Language of returned turn-by-turn text instructions, default: 'en' Returns ------- requests.Response The response object has a geojson() method for access to the route(s) as a GeoJSON-like FeatureCollection dictionary. """ # backwards compatible, deprecated if 'geometry' in kwargs and geometries is None: geometries = kwargs['geometry'] warnings.warn('Use `geometries` instead of `geometry`', errors.MapboxDeprecationWarning) annotations = self._validate_annotations(annotations) coordinates = encode_coordinates( features, precision=6, min_limit=2, max_limit=25) geometries = self._validate_geom_encoding(geometries) overview = self._validate_geom_overview(overview) profile = self._validate_profile(profile) bearings, radii = self._validate_snapping(waypoint_snapping, features) params = {} if alternatives is not None: params.update( {'alternatives': 'true' if alternatives is True else 'false'}) if geometries is not None: params.update({'geometries': geometries}) if overview is not None: params.update( {'overview': 'false' if overview is False else overview}) if steps is not None: params.update( {'steps': 'true' if steps is True else 'false'}) if continue_straight is not None: params.update( {'continue_straight': 'true' if steps is True else 'false'}) if annotations is not None: params.update({'annotations': ','.join(annotations)}) if language is not None: params.update({'language': language}) if radii is not None: params.update( {'radiuses': ';'.join(str(r) for r in radii)}) if bearings is not None: params.update( {'bearings': ';'.join(self._encode_bearing(b) for b in bearings)}) profile_ns, profile_name = profile.split('/') uri = URITemplate( self.baseuri + '/{profile_ns}/{profile_name}/{coordinates}.json').expand( profile_ns=profile_ns, profile_name=profile_name, coordinates=coordinates) resp = self.session.get(uri, params=params) self.handle_http_error(resp) def geojson(): return self._geojson(resp.json(), geom_format=geometries) resp.geojson = geojson return resp
[ "def", "directions", "(", "self", ",", "features", ",", "profile", "=", "'mapbox/driving'", ",", "alternatives", "=", "None", ",", "geometries", "=", "None", ",", "overview", "=", "None", ",", "steps", "=", "None", ",", "continue_straight", "=", "None", ",...
Request directions for waypoints encoded as GeoJSON features. Parameters ---------- features : iterable An collection of GeoJSON features profile : str Name of a Mapbox profile such as 'mapbox.driving' alternatives : bool Whether to try to return alternative routes, default: False geometries : string Type of geometry returned (geojson, polyline, polyline6) overview : string or False Type of returned overview geometry: 'full', 'simplified', or False steps : bool Whether to return steps and turn-by-turn instructions, default: False continue_straight : bool Direction of travel when departing intermediate waypoints radiuses : iterable of numbers or 'unlimited' Must be same length as features waypoint_snapping : list Controls snapping of waypoints The list is zipped with the features collection and must have the same length. Elements of the list must be one of: - A number (interpretted as a snapping radius) - The string 'unlimited' (unlimited snapping radius) - A 3-element tuple consisting of (radius, angle, range) - None (no snapping parameters specified for that waypoint) annotations : str Whether or not to return additional metadata along the route Possible values are: 'duration', 'distance', 'speed', and 'congestion'. Several annotations can be used by joining them with ','. language : str Language of returned turn-by-turn text instructions, default: 'en' Returns ------- requests.Response The response object has a geojson() method for access to the route(s) as a GeoJSON-like FeatureCollection dictionary.
[ "Request", "directions", "for", "waypoints", "encoded", "as", "GeoJSON", "features", "." ]
python
train
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L26606-L26616
def unload_plug_in(self, name): """Unloads a DBGF plug-in. in name of type str The plug-in name or DLL. Special name 'all' unloads all plug-ins. """ if not isinstance(name, basestring): raise TypeError("name can only be an instance of type basestring") self._call("unloadPlugIn", in_p=[name])
[ "def", "unload_plug_in", "(", "self", ",", "name", ")", ":", "if", "not", "isinstance", "(", "name", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"name can only be an instance of type basestring\"", ")", "self", ".", "_call", "(", "\"unloadPlugIn\"", ...
Unloads a DBGF plug-in. in name of type str The plug-in name or DLL. Special name 'all' unloads all plug-ins.
[ "Unloads", "a", "DBGF", "plug", "-", "in", "." ]
python
train
peakwinter/python-nginx
nginx.py
https://github.com/peakwinter/python-nginx/blob/4ecd1cd2e1f11ffb633d188a578a004712eaae16/nginx.py#L58-L75
def filter(self, btype='', name=''): """ Return child object(s) of this Conf that satisfy certain criteria. :param str btype: Type of object to filter by (e.g. 'Key') :param str name: Name of key OR container value to filter by :returns: full list of matching child objects """ filtered = [] for x in self.children: if name and isinstance(x, Key) and x.name == name: filtered.append(x) elif isinstance(x, Container) and x.__class__.__name__ == btype\ and x.value == name: filtered.append(x) elif not name and btype and x.__class__.__name__ == btype: filtered.append(x) return filtered
[ "def", "filter", "(", "self", ",", "btype", "=", "''", ",", "name", "=", "''", ")", ":", "filtered", "=", "[", "]", "for", "x", "in", "self", ".", "children", ":", "if", "name", "and", "isinstance", "(", "x", ",", "Key", ")", "and", "x", ".", ...
Return child object(s) of this Conf that satisfy certain criteria. :param str btype: Type of object to filter by (e.g. 'Key') :param str name: Name of key OR container value to filter by :returns: full list of matching child objects
[ "Return", "child", "object", "(", "s", ")", "of", "this", "Conf", "that", "satisfy", "certain", "criteria", "." ]
python
train
scnerd/miniutils
miniutils/timing.py
https://github.com/scnerd/miniutils/blob/fe927e26afc5877416dead28dabdf6604387f42c/miniutils/timing.py#L8-L21
def timed_call(func, *args, log_level='DEBUG', **kwargs): """Logs a function's run time :param func: The function to run :param args: The args to pass to the function :param kwargs: The keyword args to pass to the function :param log_level: The log level at which to print the run time :return: The function's return value """ start = time() r = func(*args, **kwargs) t = time() - start log(log_level, "Call to '{}' took {:0.6f}s".format(func.__name__, t)) return r
[ "def", "timed_call", "(", "func", ",", "*", "args", ",", "log_level", "=", "'DEBUG'", ",", "*", "*", "kwargs", ")", ":", "start", "=", "time", "(", ")", "r", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "t", "=", "time", "(", ...
Logs a function's run time :param func: The function to run :param args: The args to pass to the function :param kwargs: The keyword args to pass to the function :param log_level: The log level at which to print the run time :return: The function's return value
[ "Logs", "a", "function", "s", "run", "time" ]
python
train
gccxml/pygccxml
pygccxml/parser/scanner.py
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/parser/scanner.py#L345-L349
def __read_byte_offset(decl, attrs): """Using duck typing to set the offset instead of in constructor""" offset = attrs.get(XML_AN_OFFSET, 0) # Make sure the size is in bytes instead of bits decl.byte_offset = int(offset) / 8
[ "def", "__read_byte_offset", "(", "decl", ",", "attrs", ")", ":", "offset", "=", "attrs", ".", "get", "(", "XML_AN_OFFSET", ",", "0", ")", "# Make sure the size is in bytes instead of bits", "decl", ".", "byte_offset", "=", "int", "(", "offset", ")", "/", "8" ...
Using duck typing to set the offset instead of in constructor
[ "Using", "duck", "typing", "to", "set", "the", "offset", "instead", "of", "in", "constructor" ]
python
train
schneiderfelipe/pyrrole
pyrrole/core.py
https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/core.py#L114-L154
def _check_data(data): """ Check a data object for inconsistencies. Parameters ---------- data : `pandas.DataFrame` A `data` object, i.e., a table whose rows store information about chemical species, indexed by chemical species. Warns ----- UserWarning Warned if a ground state species has one or more imaginary vibrational frequencies, or if a transition state species has zero, two or more imaginary vibrational frequencies. Examples -------- >>> import pandas as pd >>> from pyrrole.core import _check_data >>> data = (pd.DataFrame([{'name': 'A', 'vibfreqs': [0., 1., 2.]}, ... {'name': 'B', 'vibfreqs': [0., -1., 2.]}, ... {'name': 'C', 'vibfreqs': [0., -1., -2.]}, ... {'name': 'A#', 'vibfreqs': [0., 1., 2.]}, ... {'name': 'C#', 'vibfreqs': [0., -2., -1.]}, ... {'name': 'B#', 'vibfreqs': [0., -1., 2.]}]) ... .set_index('name')) >>> _check_data(data) """ if "vibfreqs" in data.columns: for species in data.index: vibfreqs = data.loc[species, "vibfreqs"] nimagvibfreqs = _np.sum(_np.array(vibfreqs) < 0) if species[-1] == '#' and nimagvibfreqs != 1: _warnings.warn("'{}' should have 1 imaginary vibfreqs but {} " "found".format(species, nimagvibfreqs)) elif species[-1] != '#' and nimagvibfreqs != 0: _warnings.warn("'{}' should have no imaginary vibfreqs but {} " "found".format(species, nimagvibfreqs))
[ "def", "_check_data", "(", "data", ")", ":", "if", "\"vibfreqs\"", "in", "data", ".", "columns", ":", "for", "species", "in", "data", ".", "index", ":", "vibfreqs", "=", "data", ".", "loc", "[", "species", ",", "\"vibfreqs\"", "]", "nimagvibfreqs", "=", ...
Check a data object for inconsistencies. Parameters ---------- data : `pandas.DataFrame` A `data` object, i.e., a table whose rows store information about chemical species, indexed by chemical species. Warns ----- UserWarning Warned if a ground state species has one or more imaginary vibrational frequencies, or if a transition state species has zero, two or more imaginary vibrational frequencies. Examples -------- >>> import pandas as pd >>> from pyrrole.core import _check_data >>> data = (pd.DataFrame([{'name': 'A', 'vibfreqs': [0., 1., 2.]}, ... {'name': 'B', 'vibfreqs': [0., -1., 2.]}, ... {'name': 'C', 'vibfreqs': [0., -1., -2.]}, ... {'name': 'A#', 'vibfreqs': [0., 1., 2.]}, ... {'name': 'C#', 'vibfreqs': [0., -2., -1.]}, ... {'name': 'B#', 'vibfreqs': [0., -1., 2.]}]) ... .set_index('name')) >>> _check_data(data)
[ "Check", "a", "data", "object", "for", "inconsistencies", "." ]
python
train
JukeboxPipeline/jukebox-core
src/jukeboxcore/gui/widgets/reftrackwidget.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/reftrackwidget.py#L126-L142
def setup_signals(self, ): """Connect the signals with the slots to make the ui functional :returns: None :rtype: None :raises: None """ self.duplicate_tb.clicked.connect(self.duplicate) self.delete_tb.clicked.connect(self.delete) self.load_tb.clicked.connect(self.load) self.unload_tb.clicked.connect(self.unload) self.reference_tb.clicked.connect(self.reference) self.importtf_tb.clicked.connect(self.import_file) self.importref_tb.clicked.connect(self.import_reference) self.replace_tb.clicked.connect(self.replace) self.imported_tb.clicked.connect(partial(self.toggle_tbstyle, button=self.imported_tb)) self.alien_tb.clicked.connect(partial(self.toggle_tbstyle, button=self.alien_tb))
[ "def", "setup_signals", "(", "self", ",", ")", ":", "self", ".", "duplicate_tb", ".", "clicked", ".", "connect", "(", "self", ".", "duplicate", ")", "self", ".", "delete_tb", ".", "clicked", ".", "connect", "(", "self", ".", "delete", ")", "self", ".",...
Connect the signals with the slots to make the ui functional :returns: None :rtype: None :raises: None
[ "Connect", "the", "signals", "with", "the", "slots", "to", "make", "the", "ui", "functional" ]
python
train
gwastro/pycbc
pycbc/workflow/pegasus_workflow.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/workflow/pegasus_workflow.py#L182-L186
def add_output_opt(self, opt, out): """ Add an option that determines an output """ self.add_opt(opt, out._dax_repr()) self._add_output(out)
[ "def", "add_output_opt", "(", "self", ",", "opt", ",", "out", ")", ":", "self", ".", "add_opt", "(", "opt", ",", "out", ".", "_dax_repr", "(", ")", ")", "self", ".", "_add_output", "(", "out", ")" ]
Add an option that determines an output
[ "Add", "an", "option", "that", "determines", "an", "output" ]
python
train
Genida/archan
src/archan/config.py
https://github.com/Genida/archan/blob/a026d3105c7e86f30e6c9507b93ceb736684bfdc/src/archan/config.py#L433-L449
def print_plugins(self): """Print the available plugins.""" width = console_width() line = Style.BRIGHT + '=' * width + '\n' middle = int(width / 2) if self.available_providers: print(line + ' ' * middle + 'PROVIDERS') for provider in sorted(self.available_providers.values(), key=lambda x: x.identifier): provider().print() print() if self.available_checkers: print(line + ' ' * middle + 'CHECKERS') for checker in sorted(self.available_checkers.values(), key=lambda x: x.identifier): checker().print() print()
[ "def", "print_plugins", "(", "self", ")", ":", "width", "=", "console_width", "(", ")", "line", "=", "Style", ".", "BRIGHT", "+", "'='", "*", "width", "+", "'\\n'", "middle", "=", "int", "(", "width", "/", "2", ")", "if", "self", ".", "available_prov...
Print the available plugins.
[ "Print", "the", "available", "plugins", "." ]
python
train
zero-os/zerotier_client
zerotier/network_service.py
https://github.com/zero-os/zerotier_client/blob/03993da11e69d837a0308a2f41ae7b378692fd82/zerotier/network_service.py#L7-L13
def deleteMember(self, address, id, headers=None, query_params=None, content_type="application/json"): """ Delete member from network It is method for DELETE /network/{id}/member/{address} """ uri = self.client.base_url + "/network/"+id+"/member/"+address return self.client.delete(uri, None, headers, query_params, content_type)
[ "def", "deleteMember", "(", "self", ",", "address", ",", "id", ",", "headers", "=", "None", ",", "query_params", "=", "None", ",", "content_type", "=", "\"application/json\"", ")", ":", "uri", "=", "self", ".", "client", ".", "base_url", "+", "\"/network/\...
Delete member from network It is method for DELETE /network/{id}/member/{address}
[ "Delete", "member", "from", "network", "It", "is", "method", "for", "DELETE", "/", "network", "/", "{", "id", "}", "/", "member", "/", "{", "address", "}" ]
python
train
facelessuser/backrefs
backrefs/bre.py
https://github.com/facelessuser/backrefs/blob/3b3d60f5d57b02044f880aa29c9c5add0e31a34f/backrefs/bre.py#L310-L323
def compile(pattern, flags=0, auto_compile=None): # noqa A001 """Compile both the search or search and replace into one object.""" if isinstance(pattern, Bre): if auto_compile is not None: raise ValueError("Cannot compile Bre with a different auto_compile!") elif flags != 0: raise ValueError("Cannot process flags argument with a compiled pattern") return pattern else: if auto_compile is None: auto_compile = True return Bre(compile_search(pattern, flags), auto_compile)
[ "def", "compile", "(", "pattern", ",", "flags", "=", "0", ",", "auto_compile", "=", "None", ")", ":", "# noqa A001", "if", "isinstance", "(", "pattern", ",", "Bre", ")", ":", "if", "auto_compile", "is", "not", "None", ":", "raise", "ValueError", "(", "...
Compile both the search or search and replace into one object.
[ "Compile", "both", "the", "search", "or", "search", "and", "replace", "into", "one", "object", "." ]
python
train
ayust/kitnirc
kitnirc/client.py
https://github.com/ayust/kitnirc/blob/cf19fe39219da75f053e1a3976bf21331b6fefea/kitnirc/client.py#L804-L829
def _parse_namreply(client, command, actor, args): """Parse NAMREPLY and update a Channel object.""" prefixes = client._get_prefixes() channelinfo, _, useritems = args.partition(' :') _, _, channel = channelinfo.rpartition(' ') # channeltype channelname c = client.server.get_channel(channel) if not c: _log.warning("Ignoring NAMREPLY for channel '%s' which we are not in.", channel) return # We bypass Channel.add_user() here because we just want to sync in any # users we don't already have, regardless of if other users exist, and # we don't want the warning spam. for nick in useritems.split(): modes = set() while nick[0] in prefixes: modes.add(prefixes[nick[0]]) nick = nick[1:] user = c.members.get(nick) if not user: user = c.members[nick] = User(nick) _log.debug("Added user %s to channel %s", user, channel) user.modes |= modes
[ "def", "_parse_namreply", "(", "client", ",", "command", ",", "actor", ",", "args", ")", ":", "prefixes", "=", "client", ".", "_get_prefixes", "(", ")", "channelinfo", ",", "_", ",", "useritems", "=", "args", ".", "partition", "(", "' :'", ")", "_", ",...
Parse NAMREPLY and update a Channel object.
[ "Parse", "NAMREPLY", "and", "update", "a", "Channel", "object", "." ]
python
train
pyQode/pyqode.core
pyqode/core/widgets/output_window.py
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/widgets/output_window.py#L537-L737
def parse_text(self, formatted_text): """ Retursn a list of operations (draw, cup, ed,...). Each operation consist of a command and its associated data. :param formatted_text: text to parse with the default char format to apply. :return: list of Operation """ assert isinstance(formatted_text, FormattedText) ret_val = [] fmt = formatted_text.fmt if self._prev_fmt_closed else self._prev_fmt fmt = QtGui.QTextCharFormat(fmt) if not self._pending_text: stripped_text = formatted_text.txt else: stripped_text = self._pending_text + formatted_text.txt self._pending_text = '' while stripped_text: try: escape_pos = stripped_text.index(self._escape[0]) except ValueError: ret_val.append(Operation('draw', FormattedText(stripped_text, fmt))) break else: if escape_pos != 0: ret_val.append(Operation('draw', FormattedText(stripped_text[:escape_pos], fmt))) stripped_text = stripped_text[escape_pos:] fmt = QtGui.QTextCharFormat(fmt) assert stripped_text[0] == self._escape[0] while stripped_text and stripped_text[0] == self._escape[0]: if self._escape.startswith(stripped_text): # control sequence not complete self._pending_text += stripped_text stripped_text = '' break if not stripped_text.startswith(self._escape): # check vt100 escape sequences ctrl_seq = False for alt_seq in self._escape_alts: if stripped_text.startswith(alt_seq): ctrl_seq = True break if not ctrl_seq: # not a control sequence self._pending_text = '' ret_val.append(Operation('draw', FormattedText(stripped_text[:1], fmt))) fmt = QtGui.QTextCharFormat(fmt) stripped_text = stripped_text[1:] continue self._pending_text += _mid(stripped_text, 0, self._escape_len) stripped_text = stripped_text[self._escape_len:] # Non draw related command (cursor/erase) if self._pending_text in [self._escape] + self._escape_alts: m = self._supported_commands.match(stripped_text) if m and self._pending_text == self._escape: _, e = m.span() n = m.group('n') cmd = m.group('cmd') if not n: n = 0 ret_val.append(Operation(self._commands[cmd], n)) self._pending_text = '' stripped_text = stripped_text[e:] continue else: m = self._unsupported_command.match(stripped_text) if m: self._pending_text = '' stripped_text = stripped_text[m.span()[1]:] continue elif self._pending_text in ['\x1b=', '\x1b>']: self._pending_text = '' continue # Handle Select Graphic Rendition commands # get the number str_nbr = '' numbers = [] while stripped_text: if stripped_text[0].isdigit(): str_nbr += stripped_text[0] else: if str_nbr: numbers.append(str_nbr) if not str_nbr or stripped_text[0] != self._semicolon: break str_nbr = '' self._pending_text += _mid(stripped_text, 0, 1) stripped_text = stripped_text[1:] if not stripped_text: break # remove terminating char if not stripped_text.startswith(self._color_terminator): # _logger().warn('removing %s', repr(self._pending_text + stripped_text[0])) self._pending_text = '' stripped_text = stripped_text[1:] break # got consistent control sequence, ok to clear pending text self._pending_text = '' stripped_text = stripped_text[1:] if not numbers: fmt = QtGui.QTextCharFormat(formatted_text.fmt) self.end_format_scope() i_offset = 0 n = len(numbers) for i in range(n): i += i_offset code = int(numbers[i]) if self._TextColorStart <= code <= self._TextColorEnd: fmt.setForeground(_ansi_color(code - self._TextColorStart, self.color_scheme)) self._set_format_scope(fmt) elif self._BackgroundColorStart <= code <= self._BackgroundColorEnd: fmt.setBackground(_ansi_color(code - self._BackgroundColorStart, self.color_scheme)) self._set_format_scope(fmt) else: if code == self._ResetFormat: fmt = QtGui.QTextCharFormat(formatted_text.fmt) self.end_format_scope() elif code == self._BoldText: fmt.setFontWeight(QtGui.QFont.Bold) self._set_format_scope(fmt) elif code == self._NotBold: fmt.setFontWeight(QtGui.QFont.Normal) self._set_format_scope(fmt) elif code == self._ItalicText: fmt.setFontItalic(True) self._set_format_scope(fmt) elif code == self._NotItalicNotFraktur: fmt.setFontItalic(False) self._set_format_scope(fmt) elif code == self._UnderlinedText: fmt.setUnderlineStyle(fmt.SingleUnderline) fmt.setUnderlineColor(fmt.foreground().color()) self._set_format_scope(fmt) elif code == self._NotUnderlined: fmt.setUnderlineStyle(fmt.NoUnderline) self._set_format_scope(fmt) elif code == self._DefaultTextColor: fmt.setForeground(formatted_text.fmt.foreground()) self._set_format_scope(fmt) elif code == self._DefaultBackgroundColor: fmt.setBackground(formatted_text.fmt.background()) self._set_format_scope(fmt) elif code == self._Dim: fmt = QtGui.QTextCharFormat(fmt) fmt.setForeground(fmt.foreground().color().darker(self.DIM_FACTOR)) elif code == self._Negative: normal_fmt = fmt fmt = QtGui.QTextCharFormat(fmt) fmt.setForeground(normal_fmt.background()) fmt.setBackground(normal_fmt.foreground()) elif code == self._Positive: fmt = QtGui.QTextCharFormat(formatted_text.fmt) elif code in [self._RgbBackgroundColor, self._RgbTextColor]: # See http://en.wikipedia.org/wiki/ANSI_escape_code#Colors i += 1 if i == n: break next_code = int(numbers[i]) if next_code == 2: # RGB set with format: 38;2;<r>;<g>;<b> if i + 3 < n: method = fmt.setForeground if code == self._RgbTextColor else fmt.setBackground method(QtGui.QColor(int(numbers[i + 1]), int(numbers[i + 2]), int(numbers[i + 3]))) self._set_format_scope(fmt) i_offset = 3 elif next_code == 5: # 256 color mode with format: 38;5;<i> index = int(numbers[i + 1]) if index < 8: # The first 8 colors are standard low-intensity ANSI colors. color = _ansi_color(index, self.color_scheme) elif index < 16: # The next 8 colors are standard high-intensity ANSI colors. color = _ansi_color(index - 8, self.color_scheme).lighter(150) elif index < 232: # The next 216 colors are a 6x6x6 RGB cube. o = index - 16 color = QtGui.QColor((o / 36) * 51, ((o / 6) % 6) * 51, (o % 6) * 51) else: # The last 24 colors are a greyscale gradient. grey = (index - 232) * 11 color = QtGui.QColor(grey, grey, grey) if code == self._RgbTextColor: fmt.setForeground(color) else: fmt.setBackground(color) self._set_format_scope(fmt) else: _logger().warn('unsupported SGR code: %r', code) return ret_val
[ "def", "parse_text", "(", "self", ",", "formatted_text", ")", ":", "assert", "isinstance", "(", "formatted_text", ",", "FormattedText", ")", "ret_val", "=", "[", "]", "fmt", "=", "formatted_text", ".", "fmt", "if", "self", ".", "_prev_fmt_closed", "else", "s...
Retursn a list of operations (draw, cup, ed,...). Each operation consist of a command and its associated data. :param formatted_text: text to parse with the default char format to apply. :return: list of Operation
[ "Retursn", "a", "list", "of", "operations", "(", "draw", "cup", "ed", "...", ")", "." ]
python
train
ratcave/ratcave
ratcave/experimental.py
https://github.com/ratcave/ratcave/blob/e3862cdaba100ac2c6c78c08c4b09638e0c88fd4/ratcave/experimental.py#L4-L31
def draw_vr_anaglyph(cube_fbo, vr_scene, active_scene, eye_poses=(.035, -.035)): """ Experimental anaglyph drawing function for VR system with red/blue glasses, used in Sirota lab. Draws a virtual scene in red and blue, from subject's (heda trackers) perspective in active scene. Note: assumes shader uses playerPos like ratcave's default shader Args: cube_fbo: texture frameBuffer object. vr_scene: virtual scene object active_scene: active scene object eye_poses: the eye positions Returns: """ color_masks = [(True, False, False, True), (False, True, True, True)] cam = vr_scene.camera orig_cam_position = cam.position.xyz for color_mask, eye_pos in zip(color_masks, eye_poses): gl.glColorMask(*color_mask) cam.position.xyz = cam.model_matrix.dot([eye_pos, 0., 0., 1.])[:3] # inter_eye_distance / 2. cam.uniforms['playerPos'] = cam.position.xyz with cube_fbo as fbo: vr_scene.draw360_to_texture(fbo.texture) cam.position.xyz = orig_cam_position active_scene.draw()
[ "def", "draw_vr_anaglyph", "(", "cube_fbo", ",", "vr_scene", ",", "active_scene", ",", "eye_poses", "=", "(", ".035", ",", "-", ".035", ")", ")", ":", "color_masks", "=", "[", "(", "True", ",", "False", ",", "False", ",", "True", ")", ",", "(", "Fals...
Experimental anaglyph drawing function for VR system with red/blue glasses, used in Sirota lab. Draws a virtual scene in red and blue, from subject's (heda trackers) perspective in active scene. Note: assumes shader uses playerPos like ratcave's default shader Args: cube_fbo: texture frameBuffer object. vr_scene: virtual scene object active_scene: active scene object eye_poses: the eye positions Returns:
[ "Experimental", "anaglyph", "drawing", "function", "for", "VR", "system", "with", "red", "/", "blue", "glasses", "used", "in", "Sirota", "lab", ".", "Draws", "a", "virtual", "scene", "in", "red", "and", "blue", "from", "subject", "s", "(", "heda", "tracker...
python
train
vals/umis
umis/barcodes.py
https://github.com/vals/umis/blob/e8adb8486d9e9134ab8a6cad9811a7e74dcc4a2c/umis/barcodes.py#L194-L199
def acgt_match(string): """ returns True if sting consist of only "A "C" "G" "T" """ search = re.compile(r'[^ACGT]').search return not bool(search(string))
[ "def", "acgt_match", "(", "string", ")", ":", "search", "=", "re", ".", "compile", "(", "r'[^ACGT]'", ")", ".", "search", "return", "not", "bool", "(", "search", "(", "string", ")", ")" ]
returns True if sting consist of only "A "C" "G" "T"
[ "returns", "True", "if", "sting", "consist", "of", "only", "A", "C", "G", "T" ]
python
train
freelancer/freelancer-sdk-python
freelancersdk/resources/projects/projects.py
https://github.com/freelancer/freelancer-sdk-python/blob/e09034936d6f13b3909a9464ee329c81c1834941/freelancersdk/resources/projects/projects.py#L475-L497
def post_track(session, user_id, project_id, latitude, longitude): """ Start tracking a project by creating a track """ tracking_data = { 'user_id': user_id, 'project_id': project_id, 'track_point': { 'latitude': latitude, 'longitude': longitude } } # POST /api/projects/0.1/tracks/ response = make_post_request(session, 'tracks', json_data=tracking_data) json_data = response.json() if response.status_code == 200: return json_data['result'] else: raise TrackNotCreatedException(message=json_data['message'], error_code=json_data['error_code'], request_id=json_data['request_id'])
[ "def", "post_track", "(", "session", ",", "user_id", ",", "project_id", ",", "latitude", ",", "longitude", ")", ":", "tracking_data", "=", "{", "'user_id'", ":", "user_id", ",", "'project_id'", ":", "project_id", ",", "'track_point'", ":", "{", "'latitude'", ...
Start tracking a project by creating a track
[ "Start", "tracking", "a", "project", "by", "creating", "a", "track" ]
python
valid
synw/dataswim
dataswim/charts/bokeh.py
https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/charts/bokeh.py#L77-L88
def _sline_bokeh(self, window_size, y_label): """ Returns a chart with a smooth line from a serie """ try: ds2 = self.clone_() window = np.ones(int(window_size)) / float(window_size) ds2.df[y_label] = np.convolve(self.df[self.y], window, 'same') ds2.chart(self.x, y_label) return ds2.line_() except Exception as e: self.err(e, self._sline_bokeh, "Can not draw smooth line chart")
[ "def", "_sline_bokeh", "(", "self", ",", "window_size", ",", "y_label", ")", ":", "try", ":", "ds2", "=", "self", ".", "clone_", "(", ")", "window", "=", "np", ".", "ones", "(", "int", "(", "window_size", ")", ")", "/", "float", "(", "window_size", ...
Returns a chart with a smooth line from a serie
[ "Returns", "a", "chart", "with", "a", "smooth", "line", "from", "a", "serie" ]
python
train
saltstack/salt
salt/modules/panos.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/panos.py#L265-L297
def download_software_version(version=None, synch=False): ''' Download software packages by version number. Args: version(str): The version of the PANOS file to download. synch (bool): If true then the file will synch to the peer unit. CLI Example: .. code-block:: bash salt '*' panos.download_software_version 8.0.0 salt '*' panos.download_software_version 8.0.0 True ''' if not version: raise CommandExecutionError("Version option must not be none.") if not isinstance(synch, bool): raise CommandExecutionError("Synch option must be boolean..") if synch is True: query = {'type': 'op', 'cmd': '<request><system><software><download>' '<version>{0}</version></download></software></system></request>'.format(version)} else: query = {'type': 'op', 'cmd': '<request><system><software><download><sync-to-peer>yes</sync-to-peer>' '<version>{0}</version></download></software></system></request>'.format(version)} return _get_job_results(query)
[ "def", "download_software_version", "(", "version", "=", "None", ",", "synch", "=", "False", ")", ":", "if", "not", "version", ":", "raise", "CommandExecutionError", "(", "\"Version option must not be none.\"", ")", "if", "not", "isinstance", "(", "synch", ",", ...
Download software packages by version number. Args: version(str): The version of the PANOS file to download. synch (bool): If true then the file will synch to the peer unit. CLI Example: .. code-block:: bash salt '*' panos.download_software_version 8.0.0 salt '*' panos.download_software_version 8.0.0 True
[ "Download", "software", "packages", "by", "version", "number", "." ]
python
train
rsgalloway/grit
grit/server/cherrypy/__init__.py
https://github.com/rsgalloway/grit/blob/e6434ad8a1f4ac5d0903ebad630c81f8a5164d78/grit/server/cherrypy/__init__.py#L1397-L1406
def start(self): """Start the pool of threads.""" for i in range(self.min): self._threads.append(WorkerThread(self.server)) for worker in self._threads: worker.setName("CP Server " + worker.getName()) worker.start() for worker in self._threads: while not worker.ready: time.sleep(.1)
[ "def", "start", "(", "self", ")", ":", "for", "i", "in", "range", "(", "self", ".", "min", ")", ":", "self", ".", "_threads", ".", "append", "(", "WorkerThread", "(", "self", ".", "server", ")", ")", "for", "worker", "in", "self", ".", "_threads", ...
Start the pool of threads.
[ "Start", "the", "pool", "of", "threads", "." ]
python
train
VingtCinq/python-mailchimp
mailchimp3/entities/segments.py
https://github.com/VingtCinq/python-mailchimp/blob/1b472f1b64fdde974732ac4b7ed48908bb707260/mailchimp3/entities/segments.py#L17-L21
def get(self, list_id, segment_id): """ returns the specified list segment. """ return self._mc_client._get(url=self._build_path(list_id, 'segments', segment_id))
[ "def", "get", "(", "self", ",", "list_id", ",", "segment_id", ")", ":", "return", "self", ".", "_mc_client", ".", "_get", "(", "url", "=", "self", ".", "_build_path", "(", "list_id", ",", "'segments'", ",", "segment_id", ")", ")" ]
returns the specified list segment.
[ "returns", "the", "specified", "list", "segment", "." ]
python
valid
Netflix-Skunkworks/cloudaux
cloudaux/aws/s3.py
https://github.com/Netflix-Skunkworks/cloudaux/blob/c4b0870c3ac68b1c69e71d33cf78b6a8bdf437ea/cloudaux/aws/s3.py#L165-L173
def list_bucket_inventory_configurations(client=None, **kwargs): """ Bucket='string' """ result = client.list_bucket_inventory_configurations(**kwargs) if not result.get("InventoryConfigurationList"): result.update({"InventoryConfigurationList": []}) return result
[ "def", "list_bucket_inventory_configurations", "(", "client", "=", "None", ",", "*", "*", "kwargs", ")", ":", "result", "=", "client", ".", "list_bucket_inventory_configurations", "(", "*", "*", "kwargs", ")", "if", "not", "result", ".", "get", "(", "\"Invento...
Bucket='string'
[ "Bucket", "=", "string" ]
python
valid
shopkick/flawless
flawless/server/server.py
https://github.com/shopkick/flawless/blob/c54b63ca1991c153e6f75080536f6df445aacc64/flawless/server/server.py#L101-L151
def serve(conf_path, storage_factory=None): """This method starts the server. There are two processes, one is an HTTP server that shows and admin interface and the second is a Thrift server that the client code calls. Arguments: `conf_path` - The path to your flawless.cfg file `storage_factory` - You can pass in your own storage class that implements StorageInterface. You must implement storage_cls if you want Flawless to be horizontally scalable, since by default it will just store everything on the local disk. """ flawless.lib.config.init_config(conf_path) # Try and create datadir if it doesn't exist. For instance it might be in /tmp if not os.path.exists(config.data_dir_path): os.makedirs(config.data_dir_path) storage_factory = storage_factory or (lambda partition: DiskStorage(partition=partition)) # Setup root logger root_logger = logging.getLogger() root_handler = logging.handlers.TimedRotatingFileHandler( filename=config.log_file, when='d', interval=1, backupCount=config.log_days_to_keep) root_logger.setLevel(getattr(logging, config.log_level)) root_logger.addHandler(root_handler) child_pid = os.fork() if child_pid == 0: # Setup HTTP server handler = FlawlessWebServiceHandler(storage_factory=storage_factory) server = SimpleThreadedHTTPServer(('', config.http_port), SimpleRequestHTTPHandler) server.attach_service(handler) server.request_queue_size = 50 try: server.serve_forever() except (KeyboardInterrupt, SystemExit): server.server_close() else: # Setup Thrift server handler = FlawlessThriftServiceHandler(storage_factory=storage_factory) processor = Flawless.Processor(handler) transport = TSocket.TServerSocket(port=config.port) tfactory = TTransport.TFramedTransportFactory() pfactory = TBinaryProtocol.TBinaryProtocolFactory() server = TServer.TThreadedServer(processor, transport, tfactory, pfactory) try: server.serve() except (KeyboardInterrupt, SystemExit): handler.errors_seen.sync() transport.close() os.kill(child_pid, signal.SIGINT)
[ "def", "serve", "(", "conf_path", ",", "storage_factory", "=", "None", ")", ":", "flawless", ".", "lib", ".", "config", ".", "init_config", "(", "conf_path", ")", "# Try and create datadir if it doesn't exist. For instance it might be in /tmp", "if", "not", "os", ".",...
This method starts the server. There are two processes, one is an HTTP server that shows and admin interface and the second is a Thrift server that the client code calls. Arguments: `conf_path` - The path to your flawless.cfg file `storage_factory` - You can pass in your own storage class that implements StorageInterface. You must implement storage_cls if you want Flawless to be horizontally scalable, since by default it will just store everything on the local disk.
[ "This", "method", "starts", "the", "server", ".", "There", "are", "two", "processes", "one", "is", "an", "HTTP", "server", "that", "shows", "and", "admin", "interface", "and", "the", "second", "is", "a", "Thrift", "server", "that", "the", "client", "code",...
python
test
JNRowe/jnrbase
jnrbase/i18n.py
https://github.com/JNRowe/jnrbase/blob/ae505ef69a9feb739b5f4e62c5a8e6533104d3ea/jnrbase/i18n.py#L27-L40
def setup(__pkg: ModuleType) -> Tuple[Callable[[str], str], Callable[[str, str, int], str]]: """Configure ``gettext`` for given package. Args: __pkg: Package to use as location for :program:`gettext` files Returns: :program:`gettext` functions for singular and plural translations """ package_locale = path.join(path.dirname(__pkg.__file__), 'locale') gettext.install(__pkg.__name__, package_locale) return gettext.gettext, gettext.ngettext
[ "def", "setup", "(", "__pkg", ":", "ModuleType", ")", "->", "Tuple", "[", "Callable", "[", "[", "str", "]", ",", "str", "]", ",", "Callable", "[", "[", "str", ",", "str", ",", "int", "]", ",", "str", "]", "]", ":", "package_locale", "=", "path", ...
Configure ``gettext`` for given package. Args: __pkg: Package to use as location for :program:`gettext` files Returns: :program:`gettext` functions for singular and plural translations
[ "Configure", "gettext", "for", "given", "package", "." ]
python
train
sawcordwell/pymdptoolbox
src/experimental/mdpsql.py
https://github.com/sawcordwell/pymdptoolbox/blob/7c96789cc80e280437005c12065cf70266c11636/src/experimental/mdpsql.py#L230-L242
def _calculatePolicy(self): """This implements argmax() over the actions of Q.""" cmd = ''' UPDATE policy SET action = ( SELECT action FROM (SELECT state, action, MAX(value) FROM Q GROUP BY state) AS A WHERE policy.state = A.state GROUP BY state);''' self._cur.execute(cmd) self._conn.commit()
[ "def", "_calculatePolicy", "(", "self", ")", ":", "cmd", "=", "'''\n UPDATE policy\n SET action = (\n SELECT action\n FROM (SELECT state, action, MAX(value)\n FROM Q\n GR...
This implements argmax() over the actions of Q.
[ "This", "implements", "argmax", "()", "over", "the", "actions", "of", "Q", "." ]
python
train
jason-weirather/py-seq-tools
seqtools/range/__init__.py
https://github.com/jason-weirather/py-seq-tools/blob/f642c2c73ffef2acc83656a78059a476fc734ca1/seqtools/range/__init__.py#L208-L245
def overlaps(self,in_genomic_range,padding=0): """do the ranges overlap? :param in_genomic_range: range to compare to :param padding: add to the ends this many (default 0) :type in_genomic_range: GenomicRange :type padding: int :return: True if they overlap :rtype: bool """ if padding > 0: in_genomic_range = GenomicRange(in_genomic_range.chr,max([1,in_genomic_range.start-padding]),in_genomic_range.end+padding) if self.chr != in_genomic_range.chr: return False if self.end < in_genomic_range.start: return False if in_genomic_range.end < self.start: return False if self.start > in_genomic_range.end: return False if in_genomic_range.start > self.end: return False if self.start <= in_genomic_range.start and self.end >= in_genomic_range.start: return True if self.start <= in_genomic_range.end and self.end >= in_genomic_range.end: return True if self.start >= in_genomic_range.start and self.end <= in_genomic_range.end: return True if self.start <= in_genomic_range.start and self.end >= in_genomic_range.end: return True if in_genomic_range.start <= self.start and in_genomic_range.end >= self.start: return True if in_genomic_range.start <= self.end and in_genomic_range.end >= self.end: return True sys.stderr.write("overlaps: unprogrammed error\n") return False
[ "def", "overlaps", "(", "self", ",", "in_genomic_range", ",", "padding", "=", "0", ")", ":", "if", "padding", ">", "0", ":", "in_genomic_range", "=", "GenomicRange", "(", "in_genomic_range", ".", "chr", ",", "max", "(", "[", "1", ",", "in_genomic_range", ...
do the ranges overlap? :param in_genomic_range: range to compare to :param padding: add to the ends this many (default 0) :type in_genomic_range: GenomicRange :type padding: int :return: True if they overlap :rtype: bool
[ "do", "the", "ranges", "overlap?" ]
python
train
openvax/pyensembl
pyensembl/genome.py
https://github.com/openvax/pyensembl/blob/4b995fb72e848206d6fbf11950cf30964cd9b3aa/pyensembl/genome.py#L644-L658
def genes(self, contig=None, strand=None): """ Returns all Gene objects in the database. Can be restricted to a particular contig/chromosome and strand by the following arguments: Parameters ---------- contig : str Only return genes on the given contig. strand : str Only return genes on this strand. """ gene_ids = self.gene_ids(contig=contig, strand=strand) return [self.gene_by_id(gene_id) for gene_id in gene_ids]
[ "def", "genes", "(", "self", ",", "contig", "=", "None", ",", "strand", "=", "None", ")", ":", "gene_ids", "=", "self", ".", "gene_ids", "(", "contig", "=", "contig", ",", "strand", "=", "strand", ")", "return", "[", "self", ".", "gene_by_id", "(", ...
Returns all Gene objects in the database. Can be restricted to a particular contig/chromosome and strand by the following arguments: Parameters ---------- contig : str Only return genes on the given contig. strand : str Only return genes on this strand.
[ "Returns", "all", "Gene", "objects", "in", "the", "database", ".", "Can", "be", "restricted", "to", "a", "particular", "contig", "/", "chromosome", "and", "strand", "by", "the", "following", "arguments", ":" ]
python
train
ansible/molecule
molecule/provisioner/ansible.py
https://github.com/ansible/molecule/blob/766dc35b0b0ce498cd5e3a62b40f828742d0d08c/molecule/provisioner/ansible.py#L709-L717
def verify(self): """ Executes ``ansible-playbook`` against the verify playbook and returns None. :return: None """ pb = self._get_ansible_playbook(self.playbooks.verify) pb.execute()
[ "def", "verify", "(", "self", ")", ":", "pb", "=", "self", ".", "_get_ansible_playbook", "(", "self", ".", "playbooks", ".", "verify", ")", "pb", ".", "execute", "(", ")" ]
Executes ``ansible-playbook`` against the verify playbook and returns None. :return: None
[ "Executes", "ansible", "-", "playbook", "against", "the", "verify", "playbook", "and", "returns", "None", "." ]
python
train
mabuchilab/QNET
src/qnet/algebra/pattern_matching/__init__.py
https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/algebra/pattern_matching/__init__.py#L248-L268
def _check_last_arg_pattern(self, current_arg_pattern, last_arg_pattern): """Given a "current" arg pattern (that was used to match the last actual argument of an expression), and another ("last") argument pattern, raise a ValueError, unless the "last" argument pattern is a "zero or more" wildcard. In that case, return a dict that maps the wildcard name to an empty list """ try: if last_arg_pattern.mode == self.single: raise ValueError("insufficient number of arguments") elif last_arg_pattern.mode == self.zero_or_more: if last_arg_pattern.wc_name is not None: if last_arg_pattern != current_arg_pattern: # we have to record an empty match return {last_arg_pattern.wc_name: []} elif last_arg_pattern.mode == self.one_or_more: if last_arg_pattern != current_arg_pattern: raise ValueError("insufficient number of arguments") except AttributeError: raise ValueError("insufficient number of arguments") return {}
[ "def", "_check_last_arg_pattern", "(", "self", ",", "current_arg_pattern", ",", "last_arg_pattern", ")", ":", "try", ":", "if", "last_arg_pattern", ".", "mode", "==", "self", ".", "single", ":", "raise", "ValueError", "(", "\"insufficient number of arguments\"", ")"...
Given a "current" arg pattern (that was used to match the last actual argument of an expression), and another ("last") argument pattern, raise a ValueError, unless the "last" argument pattern is a "zero or more" wildcard. In that case, return a dict that maps the wildcard name to an empty list
[ "Given", "a", "current", "arg", "pattern", "(", "that", "was", "used", "to", "match", "the", "last", "actual", "argument", "of", "an", "expression", ")", "and", "another", "(", "last", ")", "argument", "pattern", "raise", "a", "ValueError", "unless", "the"...
python
train
gautammishra/lyft-rides-python-sdk
lyft_rides/client.py
https://github.com/gautammishra/lyft-rides-python-sdk/blob/b6d96a0fceaf7dc3425153c418a8e25c57803431/lyft_rides/client.py#L179-L232
def request_ride( self, ride_type=None, start_latitude=None, start_longitude=None, start_address=None, end_latitude=None, end_longitude=None, end_address=None, primetime_confirmation_token=None, ): """Request a ride on behalf of an Lyft user. Parameters ride_type (str) Name of the type of ride you're requesting. E.g., lyft, lyft_plus start_latitude (float) Latitude component of a start location. start_longitude (float) Longitude component of a start location. start_address (str) Optional pickup address. end_latitude (float) Optional latitude component of a end location. Destination would be NULL in this case. end_longitude (float) Optional longitude component of a end location. Destination would be NULL in this case. end_address (str) Optional destination address. primetime_confirmation_token (str) Optional string containing the Prime Time confirmation token to book rides having Prime Time Pricing. Returns (Response) A Response object containing the ride request ID and other details about the requested ride.. """ args = { 'ride_type': ride_type, 'origin': { 'lat': start_latitude, 'lng': start_longitude, 'address': start_address, }, 'destination': { 'lat': end_latitude, 'lng': end_longitude, 'address': end_address, }, 'primetime_confirmation_token': primetime_confirmation_token, } return self._api_call('POST', 'v1/rides', args=args)
[ "def", "request_ride", "(", "self", ",", "ride_type", "=", "None", ",", "start_latitude", "=", "None", ",", "start_longitude", "=", "None", ",", "start_address", "=", "None", ",", "end_latitude", "=", "None", ",", "end_longitude", "=", "None", ",", "end_addr...
Request a ride on behalf of an Lyft user. Parameters ride_type (str) Name of the type of ride you're requesting. E.g., lyft, lyft_plus start_latitude (float) Latitude component of a start location. start_longitude (float) Longitude component of a start location. start_address (str) Optional pickup address. end_latitude (float) Optional latitude component of a end location. Destination would be NULL in this case. end_longitude (float) Optional longitude component of a end location. Destination would be NULL in this case. end_address (str) Optional destination address. primetime_confirmation_token (str) Optional string containing the Prime Time confirmation token to book rides having Prime Time Pricing. Returns (Response) A Response object containing the ride request ID and other details about the requested ride..
[ "Request", "a", "ride", "on", "behalf", "of", "an", "Lyft", "user", ".", "Parameters", "ride_type", "(", "str", ")", "Name", "of", "the", "type", "of", "ride", "you", "re", "requesting", ".", "E", ".", "g", ".", "lyft", "lyft_plus", "start_latitude", "...
python
train
GNS3/gns3-server
gns3server/controller/gns3vm/vmware_gns3_vm.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/gns3vm/vmware_gns3_vm.py#L123-L180
def start(self): """ Starts the GNS3 VM. """ vms = yield from self.list() for vm in vms: if vm["vmname"] == self.vmname: self._vmx_path = vm["vmx_path"] break # check we have a valid VMX file path if not self._vmx_path: raise GNS3VMError("VMWare VM {} not found".format(self.vmname)) if not os.path.exists(self._vmx_path): raise GNS3VMError("VMware VMX file {} doesn't exist".format(self._vmx_path)) # check if the VMware guest tools are installed vmware_tools_state = yield from self._execute("checkToolsState", [self._vmx_path]) if vmware_tools_state not in ("installed", "running"): raise GNS3VMError("VMware tools are not installed in {}".format(self.vmname)) try: running = yield from self._is_running() except VMwareError as e: raise GNS3VMError("Could not list VMware VMs: {}".format(str(e))) if not running: log.info("Update GNS3 VM settings") # set the number of vCPUs and amount of RAM yield from self._set_vcpus_ram(self.vcpus, self.ram) yield from self._set_extra_options() # start the VM args = [self._vmx_path] if self._headless: args.extend(["nogui"]) yield from self._execute("start", args) log.info("GNS3 VM has been started") # get the guest IP address (first adapter only) trial = 120 guest_ip_address = "" log.info("Waiting for GNS3 VM IP") while True: guest_ip_address = yield from self._execute("readVariable", [self._vmx_path, "guestVar", "gns3.eth0"], timeout=120, log_level=logging.DEBUG) guest_ip_address = guest_ip_address.strip() if len(guest_ip_address) != 0: break trial -= 1 # If ip not found fallback on old method if trial == 0: log.warning("No IP found for the VM via readVariable fallback to getGuestIPAddress") guest_ip_address = yield from self._execute("getGuestIPAddress", [self._vmx_path, "-wait"], timeout=120) break yield from asyncio.sleep(1) self.ip_address = guest_ip_address log.info("GNS3 VM IP address set to {}".format(guest_ip_address)) self.running = True
[ "def", "start", "(", "self", ")", ":", "vms", "=", "yield", "from", "self", ".", "list", "(", ")", "for", "vm", "in", "vms", ":", "if", "vm", "[", "\"vmname\"", "]", "==", "self", ".", "vmname", ":", "self", ".", "_vmx_path", "=", "vm", "[", "\...
Starts the GNS3 VM.
[ "Starts", "the", "GNS3", "VM", "." ]
python
train
rameshg87/pyremotevbox
pyremotevbox/ZSI/twisted/wsgi.py
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/twisted/wsgi.py#L158-L172
def _request_cb(self, env, start_response): """process request, """ if env['REQUEST_METHOD'] == 'GET': return self._handle_GET(env, start_response) if env['REQUEST_METHOD'] == 'POST': return self._handle_POST(env, start_response) start_response("500 ERROR", [('Content-Type','text/plain')]) s = StringIO() h = env.items(); h.sort() for k,v in h: print >>s, k,'=',`v` return [s.getvalue()]
[ "def", "_request_cb", "(", "self", ",", "env", ",", "start_response", ")", ":", "if", "env", "[", "'REQUEST_METHOD'", "]", "==", "'GET'", ":", "return", "self", ".", "_handle_GET", "(", "env", ",", "start_response", ")", "if", "env", "[", "'REQUEST_METHOD'...
process request,
[ "process", "request" ]
python
train
tomnor/channelpack
channelpack/pullxl.py
https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pullxl.py#L384-L419
def letter2num(letters, zbase=False): """A = 1, C = 3 and so on. Convert spreadsheet style column enumeration to a number. Answers: A = 1, Z = 26, AA = 27, AZ = 52, ZZ = 702, AMJ = 1024 >>> from channelpack.pullxl import letter2num >>> letter2num('A') == 1 True >>> letter2num('Z') == 26 True >>> letter2num('AZ') == 52 True >>> letter2num('ZZ') == 702 True >>> letter2num('AMJ') == 1024 True >>> letter2num('AMJ', zbase=True) == 1023 True >>> letter2num('A', zbase=True) == 0 True """ letters = letters.upper() res = 0 weight = len(letters) - 1 assert weight >= 0, letters for i, c in enumerate(letters): assert 65 <= ord(c) <= 90, c # A-Z res += (ord(c) - 64) * 26**(weight - i) if not zbase: return res return res - 1
[ "def", "letter2num", "(", "letters", ",", "zbase", "=", "False", ")", ":", "letters", "=", "letters", ".", "upper", "(", ")", "res", "=", "0", "weight", "=", "len", "(", "letters", ")", "-", "1", "assert", "weight", ">=", "0", ",", "letters", "for"...
A = 1, C = 3 and so on. Convert spreadsheet style column enumeration to a number. Answers: A = 1, Z = 26, AA = 27, AZ = 52, ZZ = 702, AMJ = 1024 >>> from channelpack.pullxl import letter2num >>> letter2num('A') == 1 True >>> letter2num('Z') == 26 True >>> letter2num('AZ') == 52 True >>> letter2num('ZZ') == 702 True >>> letter2num('AMJ') == 1024 True >>> letter2num('AMJ', zbase=True) == 1023 True >>> letter2num('A', zbase=True) == 0 True
[ "A", "=", "1", "C", "=", "3", "and", "so", "on", ".", "Convert", "spreadsheet", "style", "column", "enumeration", "to", "a", "number", "." ]
python
train
hobson/aima
aima/nlp.py
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/nlp.py#L20-L27
def Lexicon(**rules): """Create a dictionary mapping symbols to alternative words. >>> Lexicon(Art = "the | a | an") {'Art': ['the', 'a', 'an']} """ for (lhs, rhs) in rules.items(): rules[lhs] = [word.strip() for word in rhs.split('|')] return rules
[ "def", "Lexicon", "(", "*", "*", "rules", ")", ":", "for", "(", "lhs", ",", "rhs", ")", "in", "rules", ".", "items", "(", ")", ":", "rules", "[", "lhs", "]", "=", "[", "word", ".", "strip", "(", ")", "for", "word", "in", "rhs", ".", "split", ...
Create a dictionary mapping symbols to alternative words. >>> Lexicon(Art = "the | a | an") {'Art': ['the', 'a', 'an']}
[ "Create", "a", "dictionary", "mapping", "symbols", "to", "alternative", "words", ".", ">>>", "Lexicon", "(", "Art", "=", "the", "|", "a", "|", "an", ")", "{", "Art", ":", "[", "the", "a", "an", "]", "}" ]
python
valid
MycroftAI/mycroft-precise
precise/scripts/train_generated.py
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/precise/scripts/train_generated.py#L122-L129
def generate_wakeword_pieces(self, volume): """Generates chunks of audio that represent the wakeword stream""" while True: target = 1 if random() > 0.5 else 0 it = self.pos_files_it if target else self.neg_files_it sample_file = next(it) yield self.layer_with(self.normalize_volume_to(load_audio(sample_file), volume), target) yield self.layer_with(np.zeros(int(pr.sample_rate * (0.5 + 2.0 * random()))), 0)
[ "def", "generate_wakeword_pieces", "(", "self", ",", "volume", ")", ":", "while", "True", ":", "target", "=", "1", "if", "random", "(", ")", ">", "0.5", "else", "0", "it", "=", "self", ".", "pos_files_it", "if", "target", "else", "self", ".", "neg_file...
Generates chunks of audio that represent the wakeword stream
[ "Generates", "chunks", "of", "audio", "that", "represent", "the", "wakeword", "stream" ]
python
train
pandas-dev/pandas
pandas/core/nanops.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/nanops.py#L984-L1016
def nanprod(values, axis=None, skipna=True, min_count=0, mask=None): """ Parameters ---------- values : ndarray[dtype] axis: int, optional skipna : bool, default True min_count: int, default 0 mask : ndarray[bool], optional nan-mask if known Returns ------- result : dtype Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2, 3, np.nan]) >>> nanops.nanprod(s) 6.0 Returns -------- The product of all elements on a given axis. ( NaNs are treated as 1) """ if mask is None: mask = isna(values) if skipna and not is_any_int_dtype(values): values = values.copy() values[mask] = 1 result = values.prod(axis) return _maybe_null_out(result, axis, mask, min_count=min_count)
[ "def", "nanprod", "(", "values", ",", "axis", "=", "None", ",", "skipna", "=", "True", ",", "min_count", "=", "0", ",", "mask", "=", "None", ")", ":", "if", "mask", "is", "None", ":", "mask", "=", "isna", "(", "values", ")", "if", "skipna", "and"...
Parameters ---------- values : ndarray[dtype] axis: int, optional skipna : bool, default True min_count: int, default 0 mask : ndarray[bool], optional nan-mask if known Returns ------- result : dtype Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2, 3, np.nan]) >>> nanops.nanprod(s) 6.0 Returns -------- The product of all elements on a given axis. ( NaNs are treated as 1)
[ "Parameters", "----------", "values", ":", "ndarray", "[", "dtype", "]", "axis", ":", "int", "optional", "skipna", ":", "bool", "default", "True", "min_count", ":", "int", "default", "0", "mask", ":", "ndarray", "[", "bool", "]", "optional", "nan", "-", ...
python
train
Azure/blobxfer
blobxfer/operations/azure/file.py
https://github.com/Azure/blobxfer/blob/3eccbe7530cc6a20ab2d30f9e034b6f021817f34/blobxfer/operations/azure/file.py#L105-L135
def get_file_properties( client, fileshare, prefix, timeout=None, snapshot=None): # type: (azure.storage.file.FileService, str, str, int, str) -> # azure.storage.file.models.File """Get file properties :param FileService client: blob client :param str fileshare: file share name :param str prefix: path prefix :param int timeout: timeout :param str snapshot: snapshot :rtype: azure.storage.file.models.File :return: file properties """ dirname, fname, ss = parse_file_path(prefix) if ss is not None: if snapshot is not None: raise RuntimeError( 'snapshot specified as {} but parsed {} from prefix {}'.format( snapshot, ss, prefix)) else: snapshot = ss try: return client.get_file_properties( share_name=fileshare, directory_name=dirname, file_name=fname, timeout=timeout, snapshot=snapshot, ) except azure.common.AzureMissingResourceHttpError: return None
[ "def", "get_file_properties", "(", "client", ",", "fileshare", ",", "prefix", ",", "timeout", "=", "None", ",", "snapshot", "=", "None", ")", ":", "# type: (azure.storage.file.FileService, str, str, int, str) ->", "# azure.storage.file.models.File", "dirname", ",", ...
Get file properties :param FileService client: blob client :param str fileshare: file share name :param str prefix: path prefix :param int timeout: timeout :param str snapshot: snapshot :rtype: azure.storage.file.models.File :return: file properties
[ "Get", "file", "properties", ":", "param", "FileService", "client", ":", "blob", "client", ":", "param", "str", "fileshare", ":", "file", "share", "name", ":", "param", "str", "prefix", ":", "path", "prefix", ":", "param", "int", "timeout", ":", "timeout",...
python
train
Metatab/metatab
metatab/terms.py
https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/terms.py#L199-L204
def new_children(self, **kwargs): """Create new children from kwargs""" for k, v in kwargs.items(): self.new_child(k, v) return self
[ "def", "new_children", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "self", ".", "new_child", "(", "k", ",", "v", ")", "return", "self" ]
Create new children from kwargs
[ "Create", "new", "children", "from", "kwargs" ]
python
train
jeffrimko/Auxly
lib/auxly/shell.py
https://github.com/jeffrimko/Auxly/blob/5aae876bcb6ca117c81d904f9455764cdc78cd48/lib/auxly/shell.py#L64-L83
def iterstd(cmd, std="out", **kwargs): """Iterates through the lines of a stderr/stdout stream for the given shell command.""" def _readline(): while True: line = getattr(proc, "std"+std).readline() if line != b"": yield line.rstrip().decode("UTF-8", "replace") else: break kwargs['shell'] = True kwargs['stdout'] = subprocess.PIPE kwargs['stderr'] = subprocess.PIPE if sys.version_info >= (3,0): with subprocess.Popen(cmd, **kwargs) as proc: for line in _readline(): yield line else: proc = subprocess.Popen(cmd, **kwargs) for line in _readline(): yield line proc.kill()
[ "def", "iterstd", "(", "cmd", ",", "std", "=", "\"out\"", ",", "*", "*", "kwargs", ")", ":", "def", "_readline", "(", ")", ":", "while", "True", ":", "line", "=", "getattr", "(", "proc", ",", "\"std\"", "+", "std", ")", ".", "readline", "(", ")",...
Iterates through the lines of a stderr/stdout stream for the given shell command.
[ "Iterates", "through", "the", "lines", "of", "a", "stderr", "/", "stdout", "stream", "for", "the", "given", "shell", "command", "." ]
python
train
pkkid/python-plexapi
plexapi/utils.py
https://github.com/pkkid/python-plexapi/blob/9efbde96441c2bfbf410eacfb46e811e108e8bbc/plexapi/utils.py#L98-L126
def rget(obj, attrstr, default=None, delim='.'): # pragma: no cover """ Returns the value at the specified attrstr location within a nexted tree of dicts, lists, tuples, functions, classes, etc. The lookup is done recursivley for each key in attrstr (split by by the delimiter) This function is heavily influenced by the lookups used in Django templates. Parameters: obj (any): Object to start the lookup in (dict, obj, list, tuple, etc). attrstr (str): String to lookup (ex: 'foo.bar.baz.value') default (any): Default value to return if not found. delim (str): Delimiter separating keys in attrstr. """ try: parts = attrstr.split(delim, 1) attr = parts[0] attrstr = parts[1] if len(parts) == 2 else None if isinstance(obj, dict): value = obj[attr] elif isinstance(obj, list): value = obj[int(attr)] elif isinstance(obj, tuple): value = obj[int(attr)] elif isinstance(obj, object): value = getattr(obj, attr) if attrstr: return rget(value, attrstr, default, delim) return value except: # noqa: E722 return default
[ "def", "rget", "(", "obj", ",", "attrstr", ",", "default", "=", "None", ",", "delim", "=", "'.'", ")", ":", "# pragma: no cover", "try", ":", "parts", "=", "attrstr", ".", "split", "(", "delim", ",", "1", ")", "attr", "=", "parts", "[", "0", "]", ...
Returns the value at the specified attrstr location within a nexted tree of dicts, lists, tuples, functions, classes, etc. The lookup is done recursivley for each key in attrstr (split by by the delimiter) This function is heavily influenced by the lookups used in Django templates. Parameters: obj (any): Object to start the lookup in (dict, obj, list, tuple, etc). attrstr (str): String to lookup (ex: 'foo.bar.baz.value') default (any): Default value to return if not found. delim (str): Delimiter separating keys in attrstr.
[ "Returns", "the", "value", "at", "the", "specified", "attrstr", "location", "within", "a", "nexted", "tree", "of", "dicts", "lists", "tuples", "functions", "classes", "etc", ".", "The", "lookup", "is", "done", "recursivley", "for", "each", "key", "in", "attr...
python
train
rocky/python3-trepan
trepan/lib/file.py
https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/lib/file.py#L74-L97
def parse_position(errmsg, arg): """parse_position(errmsg, arg)->(fn, name, lineno) Parse arg as [filename|module:]lineno Make sure it works for C:\foo\bar.py:12 """ colon = arg.rfind(':') if colon >= 0: filename = arg[:colon].rstrip() m, f = lookupmodule(filename) if not f: errmsg("'%s' not found using sys.path" % filename) return (None, None, None) else: filename = pyficache.pyc2py(f) arg = arg[colon+1:].lstrip() pass try: lineno = int(arg) except TypeError: errmsg("Bad line number: %s", str(arg)) return (None, filename, None) return (None, filename, lineno) return (None, None, None)
[ "def", "parse_position", "(", "errmsg", ",", "arg", ")", ":", "colon", "=", "arg", ".", "rfind", "(", "':'", ")", "if", "colon", ">=", "0", ":", "filename", "=", "arg", "[", ":", "colon", "]", ".", "rstrip", "(", ")", "m", ",", "f", "=", "looku...
parse_position(errmsg, arg)->(fn, name, lineno) Parse arg as [filename|module:]lineno Make sure it works for C:\foo\bar.py:12
[ "parse_position", "(", "errmsg", "arg", ")", "-", ">", "(", "fn", "name", "lineno", ")" ]
python
test
astroswego/plotypus
src/plotypus/utils.py
https://github.com/astroswego/plotypus/blob/b1162194ca1d4f6c00e79afe3e6fb40f0eaffcb9/src/plotypus/utils.py#L44-L86
def pmap(func, args, processes=None, callback=lambda *_, **__: None, **kwargs): """pmap(func, args, processes=None, callback=do_nothing, **kwargs) Parallel equivalent of ``map(func, args)``, with the additional ability of providing keyword arguments to func, and a callback function which is applied to each element in the returned list. Unlike map, the output is a non-lazy list. If *processes* is 1, no thread pool is used. **Parameters** func : function The function to map. args : iterable The arguments to map *func* over. processes : int or None, optional The number of processes in the thread pool. If only 1, no thread pool is used to avoid useless overhead. If None, the number is chosen based on your system by :class:`multiprocessing.Pool` (default None). callback : function, optional Function to call on the return value of ``func(arg)`` for each *arg* in *args* (default do_nothing). kwargs : dict Extra keyword arguments are unpacked in each call of *func*. **Returns** results : list A list equivalent to ``[func(x, **kwargs) for x in args]``. """ if processes is 1: results = [] for arg in args: result = func(arg, **kwargs) results.append(result) callback(result) return results else: with Pool() if processes is None else Pool(processes) as p: results = [p.apply_async(func, (arg,), kwargs, callback) for arg in args] return [result.get() for result in results]
[ "def", "pmap", "(", "func", ",", "args", ",", "processes", "=", "None", ",", "callback", "=", "lambda", "*", "_", ",", "*", "*", "__", ":", "None", ",", "*", "*", "kwargs", ")", ":", "if", "processes", "is", "1", ":", "results", "=", "[", "]", ...
pmap(func, args, processes=None, callback=do_nothing, **kwargs) Parallel equivalent of ``map(func, args)``, with the additional ability of providing keyword arguments to func, and a callback function which is applied to each element in the returned list. Unlike map, the output is a non-lazy list. If *processes* is 1, no thread pool is used. **Parameters** func : function The function to map. args : iterable The arguments to map *func* over. processes : int or None, optional The number of processes in the thread pool. If only 1, no thread pool is used to avoid useless overhead. If None, the number is chosen based on your system by :class:`multiprocessing.Pool` (default None). callback : function, optional Function to call on the return value of ``func(arg)`` for each *arg* in *args* (default do_nothing). kwargs : dict Extra keyword arguments are unpacked in each call of *func*. **Returns** results : list A list equivalent to ``[func(x, **kwargs) for x in args]``.
[ "pmap", "(", "func", "args", "processes", "=", "None", "callback", "=", "do_nothing", "**", "kwargs", ")" ]
python
train
ghukill/pyfc4
pyfc4/models.py
https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L1829-L1859
def range(self, byte_start, byte_end, stream=True): ''' method to return a particular byte range from NonRDF resource's binary data https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html Args: byte_start(int): position of range start byte_end(int): position of range end Returns: (requests.Response): streamable response ''' response = self.resource.repo.api.http_request( 'GET', self.resource.uri, data=None, headers={ 'Content-Type':self.mimetype, 'Range':'bytes=%s-%s' % (byte_start, byte_end) }, is_rdf=False, stream=stream) # expects 206 if response.status_code == 206: return response else: raise Exception('HTTP %s, but was expecting 206' % response.status_code)
[ "def", "range", "(", "self", ",", "byte_start", ",", "byte_end", ",", "stream", "=", "True", ")", ":", "response", "=", "self", ".", "resource", ".", "repo", ".", "api", ".", "http_request", "(", "'GET'", ",", "self", ".", "resource", ".", "uri", ","...
method to return a particular byte range from NonRDF resource's binary data https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html Args: byte_start(int): position of range start byte_end(int): position of range end Returns: (requests.Response): streamable response
[ "method", "to", "return", "a", "particular", "byte", "range", "from", "NonRDF", "resource", "s", "binary", "data", "https", ":", "//", "www", ".", "w3", ".", "org", "/", "Protocols", "/", "rfc2616", "/", "rfc2616", "-", "sec14", ".", "html" ]
python
train
bcbio/bcbio-nextgen
bcbio/broad/picardrun.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/picardrun.py#L244-L257
def picard_fixmate(picard, align_bam): """Run Picard's FixMateInformation generating an aligned output file. """ base, ext = os.path.splitext(align_bam) out_file = "%s-sort%s" % (base, ext) if not file_exists(out_file): with tx_tmpdir(picard._config) as tmp_dir: with file_transaction(picard._config, out_file) as tx_out_file: opts = [("INPUT", align_bam), ("OUTPUT", tx_out_file), ("TMP_DIR", tmp_dir), ("SORT_ORDER", "coordinate")] picard.run("FixMateInformation", opts) return out_file
[ "def", "picard_fixmate", "(", "picard", ",", "align_bam", ")", ":", "base", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "align_bam", ")", "out_file", "=", "\"%s-sort%s\"", "%", "(", "base", ",", "ext", ")", "if", "not", "file_exists", "("...
Run Picard's FixMateInformation generating an aligned output file.
[ "Run", "Picard", "s", "FixMateInformation", "generating", "an", "aligned", "output", "file", "." ]
python
train
hydpy-dev/hydpy
hydpy/models/dam/dam_model.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/models/dam/dam_model.py#L877-L930
def calc_requiredrelease_v2(self): """Calculate the water release (immediately downstream) required for reducing drought events. Required control parameter: |NearDischargeMinimumThreshold| Required derived parameter: |dam_derived.TOY| Calculated flux sequence: |RequiredRelease| Basic equation: :math:`RequiredRelease = NearDischargeMinimumThreshold` Examples: As in the examples above, define a short simulation time period first: >>> from hydpy import pub >>> pub.timegrids = '2001.03.30', '2001.04.03', '1d' Prepare the dam model: >>> from hydpy.models.dam import * >>> parameterstep() >>> derived.toy.update() Define a minimum discharge value for a cross section immediately downstream of 4 m³/s for the summer months and of 0 m³/s for the winter months: >>> neardischargeminimumthreshold(_11_1_12=0.0, _03_31_12=0.0, ... _04_1_12=4.0, _10_31_12=4.0) As to be expected, the calculated required release is 0.0 m³/s on May 31 and 4.0 m³/s on April 1: >>> model.idx_sim = pub.timegrids.init['2001.03.31'] >>> model.calc_requiredrelease_v2() >>> fluxes.requiredrelease requiredrelease(0.0) >>> model.idx_sim = pub.timegrids.init['2001.04.01'] >>> model.calc_requiredrelease_v2() >>> fluxes.requiredrelease requiredrelease(4.0) """ con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess flu.requiredrelease = con.neardischargeminimumthreshold[ der.toy[self.idx_sim]]
[ "def", "calc_requiredrelease_v2", "(", "self", ")", ":", "con", "=", "self", ".", "parameters", ".", "control", ".", "fastaccess", "der", "=", "self", ".", "parameters", ".", "derived", ".", "fastaccess", "flu", "=", "self", ".", "sequences", ".", "fluxes"...
Calculate the water release (immediately downstream) required for reducing drought events. Required control parameter: |NearDischargeMinimumThreshold| Required derived parameter: |dam_derived.TOY| Calculated flux sequence: |RequiredRelease| Basic equation: :math:`RequiredRelease = NearDischargeMinimumThreshold` Examples: As in the examples above, define a short simulation time period first: >>> from hydpy import pub >>> pub.timegrids = '2001.03.30', '2001.04.03', '1d' Prepare the dam model: >>> from hydpy.models.dam import * >>> parameterstep() >>> derived.toy.update() Define a minimum discharge value for a cross section immediately downstream of 4 m³/s for the summer months and of 0 m³/s for the winter months: >>> neardischargeminimumthreshold(_11_1_12=0.0, _03_31_12=0.0, ... _04_1_12=4.0, _10_31_12=4.0) As to be expected, the calculated required release is 0.0 m³/s on May 31 and 4.0 m³/s on April 1: >>> model.idx_sim = pub.timegrids.init['2001.03.31'] >>> model.calc_requiredrelease_v2() >>> fluxes.requiredrelease requiredrelease(0.0) >>> model.idx_sim = pub.timegrids.init['2001.04.01'] >>> model.calc_requiredrelease_v2() >>> fluxes.requiredrelease requiredrelease(4.0)
[ "Calculate", "the", "water", "release", "(", "immediately", "downstream", ")", "required", "for", "reducing", "drought", "events", "." ]
python
train
peergradeio/flask-mongo-profiler
flask_mongo_profiler/contrib/flask_admin/formatters/profiling.py
https://github.com/peergradeio/flask-mongo-profiler/blob/a267eeb49fea07c9a24fb370bd9d7a90ed313ccf/flask_mongo_profiler/contrib/flask_admin/formatters/profiling.py#L48-L63
def profiling_request_formatter(view, context, model, name): """Wrap HTTP method value in a bs3 label.""" document = model[name] return Markup( ''.join( [ '<p class="profiling-request">', '<a href="{}">'.format(document.get_admin_url(_external=True)), http_method_formatter(view, context, document, 'method'), '&nbsp;', document.path, '</a>', '</p>', ] ) )
[ "def", "profiling_request_formatter", "(", "view", ",", "context", ",", "model", ",", "name", ")", ":", "document", "=", "model", "[", "name", "]", "return", "Markup", "(", "''", ".", "join", "(", "[", "'<p class=\"profiling-request\">'", ",", "'<a href=\"{}\"...
Wrap HTTP method value in a bs3 label.
[ "Wrap", "HTTP", "method", "value", "in", "a", "bs3", "label", "." ]
python
train
zomux/deepy
deepy/dataset/basic.py
https://github.com/zomux/deepy/blob/090fbad22a08a809b12951cd0d4984f5bd432698/deepy/dataset/basic.py#L51-L60
def vectorize_target(self, size): """ Make targets be one-hot vectors. """ if self._train_set: self._train_set = self._vectorize_set(self._train_set, size) if self._valid_set: self._valid_set = self._vectorize_set(self._valid_set, size) if self._test_set: self._test_set = self._vectorize_set(self._test_set, size)
[ "def", "vectorize_target", "(", "self", ",", "size", ")", ":", "if", "self", ".", "_train_set", ":", "self", ".", "_train_set", "=", "self", ".", "_vectorize_set", "(", "self", ".", "_train_set", ",", "size", ")", "if", "self", ".", "_valid_set", ":", ...
Make targets be one-hot vectors.
[ "Make", "targets", "be", "one", "-", "hot", "vectors", "." ]
python
test
nrcharles/caelum
caelum/tools.py
https://github.com/nrcharles/caelum/blob/9a8e65806385978556d7bb2e6870f003ff82023e/caelum/tools.py#L32-L46
def download_extract(url): """download and extract file.""" logger.info("Downloading %s", url) request = urllib2.Request(url) request.add_header('User-Agent', 'caelum/0.1 +https://github.com/nrcharles/caelum') opener = urllib2.build_opener() with tempfile.TemporaryFile(suffix='.zip', dir=env.WEATHER_DATA_PATH) \ as local_file: logger.debug('Saving to temporary file %s', local_file.name) local_file.write(opener.open(request).read()) compressed_file = zipfile.ZipFile(local_file, 'r') logger.debug('Extracting %s', compressed_file) compressed_file.extractall(env.WEATHER_DATA_PATH) local_file.close()
[ "def", "download_extract", "(", "url", ")", ":", "logger", ".", "info", "(", "\"Downloading %s\"", ",", "url", ")", "request", "=", "urllib2", ".", "Request", "(", "url", ")", "request", ".", "add_header", "(", "'User-Agent'", ",", "'caelum/0.1 +https://github...
download and extract file.
[ "download", "and", "extract", "file", "." ]
python
train
hsolbrig/pyjsg
pyjsg/parser_impl/jsg_pairdef_parser.py
https://github.com/hsolbrig/pyjsg/blob/9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7/pyjsg/parser_impl/jsg_pairdef_parser.py#L161-L165
def visitName(self, ctx: jsgParser.NameContext): """ name: ID | STRING """ rtkn = get_terminal(ctx) tkn = esc_kw(rtkn) self._names[rtkn] = tkn
[ "def", "visitName", "(", "self", ",", "ctx", ":", "jsgParser", ".", "NameContext", ")", ":", "rtkn", "=", "get_terminal", "(", "ctx", ")", "tkn", "=", "esc_kw", "(", "rtkn", ")", "self", ".", "_names", "[", "rtkn", "]", "=", "tkn" ]
name: ID | STRING
[ "name", ":", "ID", "|", "STRING" ]
python
train
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/image_batches.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/image_batches.py#L135-L144
def add_image(self, batch_id, image_id, image_properties=None): """Adds image to given batch.""" if batch_id not in self._data: raise KeyError('Batch with ID "{0}" does not exist'.format(batch_id)) if image_properties is None: image_properties = {} if not isinstance(image_properties, dict): raise ValueError('image_properties has to be dict, however it was: ' + str(type(image_properties))) self._data[batch_id]['images'][image_id] = image_properties.copy()
[ "def", "add_image", "(", "self", ",", "batch_id", ",", "image_id", ",", "image_properties", "=", "None", ")", ":", "if", "batch_id", "not", "in", "self", ".", "_data", ":", "raise", "KeyError", "(", "'Batch with ID \"{0}\" does not exist'", ".", "format", "(",...
Adds image to given batch.
[ "Adds", "image", "to", "given", "batch", "." ]
python
train
yahoo/TensorFlowOnSpark
examples/imagenet/inception/data/process_bounding_boxes.py
https://github.com/yahoo/TensorFlowOnSpark/blob/5e4b6c185ab722fd0104ede0377e1149ea8d6f7c/examples/imagenet/inception/data/process_bounding_boxes.py#L119-L168
def ProcessXMLAnnotation(xml_file): """Process a single XML file containing a bounding box.""" # pylint: disable=broad-except try: tree = ET.parse(xml_file) except Exception: print('Failed to parse: ' + xml_file, file=sys.stderr) return None # pylint: enable=broad-except root = tree.getroot() num_boxes = FindNumberBoundingBoxes(root) boxes = [] for index in range(num_boxes): box = BoundingBox() # Grab the 'index' annotation. box.xmin = GetInt('xmin', root, index) box.ymin = GetInt('ymin', root, index) box.xmax = GetInt('xmax', root, index) box.ymax = GetInt('ymax', root, index) box.width = GetInt('width', root) box.height = GetInt('height', root) box.filename = GetItem('filename', root) + '.JPEG' box.label = GetItem('name', root) xmin = float(box.xmin) / float(box.width) xmax = float(box.xmax) / float(box.width) ymin = float(box.ymin) / float(box.height) ymax = float(box.ymax) / float(box.height) # Some images contain bounding box annotations that # extend outside of the supplied image. See, e.g. # n03127925/n03127925_147.xml # Additionally, for some bounding boxes, the min > max # or the box is entirely outside of the image. min_x = min(xmin, xmax) max_x = max(xmin, xmax) box.xmin_scaled = min(max(min_x, 0.0), 1.0) box.xmax_scaled = min(max(max_x, 0.0), 1.0) min_y = min(ymin, ymax) max_y = max(ymin, ymax) box.ymin_scaled = min(max(min_y, 0.0), 1.0) box.ymax_scaled = min(max(max_y, 0.0), 1.0) boxes.append(box) return boxes
[ "def", "ProcessXMLAnnotation", "(", "xml_file", ")", ":", "# pylint: disable=broad-except", "try", ":", "tree", "=", "ET", ".", "parse", "(", "xml_file", ")", "except", "Exception", ":", "print", "(", "'Failed to parse: '", "+", "xml_file", ",", "file", "=", "...
Process a single XML file containing a bounding box.
[ "Process", "a", "single", "XML", "file", "containing", "a", "bounding", "box", "." ]
python
train
juju/charm-helpers
charmhelpers/contrib/openstack/amulet/utils.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/openstack/amulet/utils.py#L534-L546
def get_keystone_endpoint(self, keystone_ip, api_version=None, admin_port=False): """Return keystone endpoint""" port = 5000 if admin_port: port = 35357 base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), port) if api_version == 2: ep = base_ep + "/v2.0" else: ep = base_ep + "/v3" return ep
[ "def", "get_keystone_endpoint", "(", "self", ",", "keystone_ip", ",", "api_version", "=", "None", ",", "admin_port", "=", "False", ")", ":", "port", "=", "5000", "if", "admin_port", ":", "port", "=", "35357", "base_ep", "=", "\"http://{}:{}\"", ".", "format"...
Return keystone endpoint
[ "Return", "keystone", "endpoint" ]
python
train
unixsurfer/anycast_healthchecker
anycast_healthchecker/utils.py
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/utils.py#L781-L827
def write_temp_bird_conf(dummy_ip_prefix, config_file, variable_name, prefixes): """Write in a temporary file the list of IP-Prefixes. A failure to create and write the temporary file will exit main program. Arguments: dummy_ip_prefix (str): The dummy IP prefix, which must be always config_file (str): The file name of bird configuration variable_name (str): The name of the variable set in bird configuration prefixes (list): The list of IP-Prefixes to write Returns: The filename of the temporary file """ log = logging.getLogger(PROGRAM_NAME) comment = ("# {i} is a dummy IP Prefix. It should NOT be used and " "REMOVED from the constant.".format(i=dummy_ip_prefix)) # the temporary file must be on the same filesystem as the bird config # as we use os.rename to perform an atomic update on the bird config. # Thus, we create it in the same directory that bird config is stored. tm_file = os.path.join(os.path.dirname(config_file), str(time.time())) log.debug("going to write to %s", tm_file) try: with open(tm_file, 'w') as tmpf: tmpf.write("# Generated {t} by {n} (pid={p})\n" .format(t=datetime.datetime.now(), n=PROGRAM_NAME, p=os.getpid())) tmpf.write("{c}\n".format(c=comment)) tmpf.write("define {n} =\n".format(n=variable_name)) tmpf.write("{s}[\n".format(s=4 * ' ')) # all entries of the array need a trailing comma except the last # one. A single element array doesn't need a trailing comma. tmpf.write(',\n'.join([' '*8 + n for n in prefixes])) tmpf.write("\n{s}];\n".format(s=4 * ' ')) except OSError as error: log.critical("failed to write temporary file %s: %s. This is a FATAL " "error, this exiting main program", tm_file, error) sys.exit(1) else: return tm_file
[ "def", "write_temp_bird_conf", "(", "dummy_ip_prefix", ",", "config_file", ",", "variable_name", ",", "prefixes", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "PROGRAM_NAME", ")", "comment", "=", "(", "\"# {i} is a dummy IP Prefix. It should NOT be used and \...
Write in a temporary file the list of IP-Prefixes. A failure to create and write the temporary file will exit main program. Arguments: dummy_ip_prefix (str): The dummy IP prefix, which must be always config_file (str): The file name of bird configuration variable_name (str): The name of the variable set in bird configuration prefixes (list): The list of IP-Prefixes to write Returns: The filename of the temporary file
[ "Write", "in", "a", "temporary", "file", "the", "list", "of", "IP", "-", "Prefixes", "." ]
python
train
Netflix-Skunkworks/cloudaux
cloudaux/orchestration/aws/lambda_function.py
https://github.com/Netflix-Skunkworks/cloudaux/blob/c4b0870c3ac68b1c69e71d33cf78b6a8bdf437ea/cloudaux/orchestration/aws/lambda_function.py#L13-L45
def _get_policy(lambda_function, **conn): """Get LambdaFunction Policies. (there can be many of these!) Lambda Function Policies are overly complicated. They can be attached to a label, a version, and there is also a default policy. This method attempts to gather all three types. AWS returns an exception if the policy requested does not exist. We catch and ignore these exceptions. """ policies = dict(Versions=dict(), Aliases=dict(), DEFAULT=dict()) for version in [v['Version'] for v in lambda_function['versions']]: try: policies['Versions'][version] = get_policy(FunctionName=lambda_function['FunctionName'], Qualifier=version, **conn) policies['Versions'][version] = json.loads(policies['Versions'][version]) except Exception as e: pass for alias in [v['Name'] for v in lambda_function['aliases']]: try: policies['Aliases'][alias] = get_policy(FunctionName=lambda_function['FunctionName'], Qualifier=alias, **conn) policies['Aliases'][alias] = json.loads(policies['Aliases'][alias]) except Exception as e: pass try: policies['DEFAULT'] = get_policy(FunctionName=lambda_function['FunctionName'], **conn) policies['DEFAULT'] = json.loads(policies['DEFAULT']) except Exception as e: pass return policies
[ "def", "_get_policy", "(", "lambda_function", ",", "*", "*", "conn", ")", ":", "policies", "=", "dict", "(", "Versions", "=", "dict", "(", ")", ",", "Aliases", "=", "dict", "(", ")", ",", "DEFAULT", "=", "dict", "(", ")", ")", "for", "version", "in...
Get LambdaFunction Policies. (there can be many of these!) Lambda Function Policies are overly complicated. They can be attached to a label, a version, and there is also a default policy. This method attempts to gather all three types. AWS returns an exception if the policy requested does not exist. We catch and ignore these exceptions.
[ "Get", "LambdaFunction", "Policies", ".", "(", "there", "can", "be", "many", "of", "these!", ")", "Lambda", "Function", "Policies", "are", "overly", "complicated", ".", "They", "can", "be", "attached", "to", "a", "label", "a", "version", "and", "there", "i...
python
valid
geophysics-ubonn/reda
lib/reda/containers/sEIT.py
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/containers/sEIT.py#L307-L355
def filter_incomplete_spectra(self, flimit=1000, percAccept=85): """Remove all data points that belong to spectra that did not retain at least **percAccept** percent of the number of data points. ..warning:: This function does not honor additional dimensions (e.g., timesteps) yet! """ assert percAccept > 0 and percAccept < 100 def _retain_only_complete_spectra(item, fmax, acceptN): """Function called using pd.filter, applied to all spectra in the data set. Return true if the number of data points <= **fmax** in item is equal, or larger, than **acceptN**. Parameters ---------- item : :py:class:`pandas.DataFrame` dataframe containing one spectrum fmax : float maximum frequency up to which data points are counted acceptN : int the number of data points required to pass this test Returns ------- true : bool if enough data points are present false : bool if not enough data points are present """ frequencies = item['frequency'].loc[item['frequency'] < fmax] fN = frequencies.size if fN >= acceptN: return True return False group_abmn = self.data.groupby(['a', 'b', 'm', 'n']) frequencies = np.array( list(sorted(self.data.groupby('frequency').groups.keys())) ) assert flimit >= frequencies.min() and flimit <= frequencies.max() Nlimit = len(np.where(frequencies <= flimit)[0]) Naccept = np.ceil(Nlimit * percAccept / 100.0) self.data = group_abmn.filter( _retain_only_complete_spectra, fmax=flimit, acceptN=Naccept ).copy()
[ "def", "filter_incomplete_spectra", "(", "self", ",", "flimit", "=", "1000", ",", "percAccept", "=", "85", ")", ":", "assert", "percAccept", ">", "0", "and", "percAccept", "<", "100", "def", "_retain_only_complete_spectra", "(", "item", ",", "fmax", ",", "ac...
Remove all data points that belong to spectra that did not retain at least **percAccept** percent of the number of data points. ..warning:: This function does not honor additional dimensions (e.g., timesteps) yet!
[ "Remove", "all", "data", "points", "that", "belong", "to", "spectra", "that", "did", "not", "retain", "at", "least", "**", "percAccept", "**", "percent", "of", "the", "number", "of", "data", "points", "." ]
python
train
BD2KGenomics/protect
src/protect/common.py
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/common.py#L557-L582
def chrom_sorted(in_chroms): """ Sort a list of chromosomes in the order 1..22, X, Y, M, <others in alphabetical order>. :param list in_chroms: Input chromosomes :return: Sorted chromosomes :rtype: list[str] """ in_chroms.sort() canonicals = [str(c) for c in range(1, 23)] + ['X', 'Y', 'M', 'MT'] canonical_chr = ['chr' + c for c in canonicals] out_chroms_dict = { 'can': [c for c in in_chroms if c in canonicals], 'can_chr': [c for c in in_chroms if c in canonical_chr], 'others': [c for c in in_chroms if c not in canonicals + canonical_chr]} assert not (out_chroms_dict['can'] and out_chroms_dict['can_chr']) assert not ('M' in out_chroms_dict['can']and 'MT' in out_chroms_dict['can']) assert not ('chrM' in out_chroms_dict['can_chr'] and 'chrMT' in out_chroms_dict['can_chr']) out_chroms_dict['can'] = canonical_chrom_sorted(out_chroms_dict['can']) out_chroms_dict['can_chr'] = canonical_chrom_sorted(out_chroms_dict['can_chr']) out_chroms = out_chroms_dict['can'] or out_chroms_dict['can_chr'] out_chroms.extend(out_chroms_dict['others']) return out_chroms
[ "def", "chrom_sorted", "(", "in_chroms", ")", ":", "in_chroms", ".", "sort", "(", ")", "canonicals", "=", "[", "str", "(", "c", ")", "for", "c", "in", "range", "(", "1", ",", "23", ")", "]", "+", "[", "'X'", ",", "'Y'", ",", "'M'", ",", "'MT'",...
Sort a list of chromosomes in the order 1..22, X, Y, M, <others in alphabetical order>. :param list in_chroms: Input chromosomes :return: Sorted chromosomes :rtype: list[str]
[ "Sort", "a", "list", "of", "chromosomes", "in", "the", "order", "1", "..", "22", "X", "Y", "M", "<others", "in", "alphabetical", "order", ">", "." ]
python
train
cggh/scikit-allel
allel/stats/sf.py
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L753-L775
def plot_joint_sfs_scaled(*args, **kwargs): """Plot a scaled joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes_pop1, n_chromosomes_pop2) Joint site frequency spectrum. ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like Additional keyword arguments, passed through to ax.imshow(). Returns ------- ax : axes The axes on which the plot was drawn. """ imshow_kwargs = kwargs.get('imshow_kwargs', dict()) imshow_kwargs.setdefault('norm', None) kwargs['imshow_kwargs'] = imshow_kwargs ax = plot_joint_sfs(*args, **kwargs) return ax
[ "def", "plot_joint_sfs_scaled", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "imshow_kwargs", "=", "kwargs", ".", "get", "(", "'imshow_kwargs'", ",", "dict", "(", ")", ")", "imshow_kwargs", ".", "setdefault", "(", "'norm'", ",", "None", ")", "kwa...
Plot a scaled joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes_pop1, n_chromosomes_pop2) Joint site frequency spectrum. ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like Additional keyword arguments, passed through to ax.imshow(). Returns ------- ax : axes The axes on which the plot was drawn.
[ "Plot", "a", "scaled", "joint", "site", "frequency", "spectrum", "." ]
python
train
litaotao/IPython-Dashboard
dashboard/server/resources/storage.py
https://github.com/litaotao/IPython-Dashboard/blob/b28a6b447c86bcec562e554efe96c64660ddf7a2/dashboard/server/resources/storage.py#L40-L47
def get(self, key): """Get a key-value from storage according to the key name. """ data = r_kv.get(key) # data = json.dumps(data) if isinstance(data, str) else data # data = json.loads(data) if data else {} return build_response(dict(data=data, code=200))
[ "def", "get", "(", "self", ",", "key", ")", ":", "data", "=", "r_kv", ".", "get", "(", "key", ")", "# data = json.dumps(data) if isinstance(data, str) else data", "# data = json.loads(data) if data else {}", "return", "build_response", "(", "dict", "(", "data", "=", ...
Get a key-value from storage according to the key name.
[ "Get", "a", "key", "-", "value", "from", "storage", "according", "to", "the", "key", "name", "." ]
python
train
pypa/setuptools
setuptools/command/egg_info.py
https://github.com/pypa/setuptools/blob/83c667e0b2a98193851c07115d1af65011ed0fb6/setuptools/command/egg_info.py#L701-L713
def get_pkg_info_revision(): """ Get a -r### off of PKG-INFO Version in case this is an sdist of a subversion revision. """ warnings.warn("get_pkg_info_revision is deprecated.", EggInfoDeprecationWarning) if os.path.exists('PKG-INFO'): with io.open('PKG-INFO') as f: for line in f: match = re.match(r"Version:.*-r(\d+)\s*$", line) if match: return int(match.group(1)) return 0
[ "def", "get_pkg_info_revision", "(", ")", ":", "warnings", ".", "warn", "(", "\"get_pkg_info_revision is deprecated.\"", ",", "EggInfoDeprecationWarning", ")", "if", "os", ".", "path", ".", "exists", "(", "'PKG-INFO'", ")", ":", "with", "io", ".", "open", "(", ...
Get a -r### off of PKG-INFO Version in case this is an sdist of a subversion revision.
[ "Get", "a", "-", "r###", "off", "of", "PKG", "-", "INFO", "Version", "in", "case", "this", "is", "an", "sdist", "of", "a", "subversion", "revision", "." ]
python
train
mitsei/dlkit
dlkit/json_/proxy/rules.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/proxy/rules.py#L358-L369
def set_http_request(self, http_request): """Support the HTTPRequest ProxyConditionRecordType and checks for special effective agent ids""" self._http_request = http_request if 'HTTP_LTI_USER_ID' in http_request.META: try: authority = http_request.META['HTTP_LTI_TOOL_CONSUMER_INSTANCE_GUID'] except (AttributeError, KeyError): authority = 'unknown_lti_consumer_instance' self.set_effective_agent_id(Id( authority=authority, namespace='agent.Agent', identifier=http_request.META['HTTP_LTI_USER_ID']))
[ "def", "set_http_request", "(", "self", ",", "http_request", ")", ":", "self", ".", "_http_request", "=", "http_request", "if", "'HTTP_LTI_USER_ID'", "in", "http_request", ".", "META", ":", "try", ":", "authority", "=", "http_request", ".", "META", "[", "'HTTP...
Support the HTTPRequest ProxyConditionRecordType and checks for special effective agent ids
[ "Support", "the", "HTTPRequest", "ProxyConditionRecordType", "and", "checks", "for", "special", "effective", "agent", "ids" ]
python
train
erdewit/ib_insync
ib_insync/ib.py
https://github.com/erdewit/ib_insync/blob/d0646a482590f5cb7bfddbd1f0870f8c4bc1df80/ib_insync/ib.py#L1397-L1416
def calculateOptionPrice( self, contract: Contract, volatility: float, underPrice: float, optPrcOptions=None) -> OptionComputation: """ Calculate the option price given the volatility. This method is blocking. https://interactivebrokers.github.io/tws-api/option_computations.html Args: contract: Option contract. volatility: Option volatility to use in calculation. underPrice: Price of the underlier to use in calculation implVolOptions: Unknown """ return self._run( self.calculateOptionPriceAsync( contract, volatility, underPrice, optPrcOptions))
[ "def", "calculateOptionPrice", "(", "self", ",", "contract", ":", "Contract", ",", "volatility", ":", "float", ",", "underPrice", ":", "float", ",", "optPrcOptions", "=", "None", ")", "->", "OptionComputation", ":", "return", "self", ".", "_run", "(", "self"...
Calculate the option price given the volatility. This method is blocking. https://interactivebrokers.github.io/tws-api/option_computations.html Args: contract: Option contract. volatility: Option volatility to use in calculation. underPrice: Price of the underlier to use in calculation implVolOptions: Unknown
[ "Calculate", "the", "option", "price", "given", "the", "volatility", "." ]
python
train
go-macaroon-bakery/py-macaroon-bakery
macaroonbakery/httpbakery/_client.py
https://github.com/go-macaroon-bakery/py-macaroon-bakery/blob/63ce1ef1dabe816eb8aaec48fbb46761c34ddf77/macaroonbakery/httpbakery/_client.py#L347-L358
def _add_json_binary_field(b, serialized, field): '''' Set the given field to the given val (bytes) in the serialized dictionary. If the value isn't valid utf-8, we base64 encode it and use field+"64" as the field name. ''' try: val = b.decode('utf-8') serialized[field] = val except UnicodeDecodeError: val = base64.b64encode(b).decode('utf-8') serialized[field + '64'] = val
[ "def", "_add_json_binary_field", "(", "b", ",", "serialized", ",", "field", ")", ":", "try", ":", "val", "=", "b", ".", "decode", "(", "'utf-8'", ")", "serialized", "[", "field", "]", "=", "val", "except", "UnicodeDecodeError", ":", "val", "=", "base64",...
Set the given field to the given val (bytes) in the serialized dictionary. If the value isn't valid utf-8, we base64 encode it and use field+"64" as the field name.
[ "Set", "the", "given", "field", "to", "the", "given", "val", "(", "bytes", ")", "in", "the", "serialized", "dictionary", ".", "If", "the", "value", "isn", "t", "valid", "utf", "-", "8", "we", "base64", "encode", "it", "and", "use", "field", "+", "64"...
python
train
apache/airflow
airflow/task/task_runner/base_task_runner.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/task/task_runner/base_task_runner.py#L157-L165
def on_finish(self): """ A callback that should be called when this is done running. """ if self._cfg_path and os.path.isfile(self._cfg_path): if self.run_as_user: subprocess.call(['sudo', 'rm', self._cfg_path], close_fds=True) else: os.remove(self._cfg_path)
[ "def", "on_finish", "(", "self", ")", ":", "if", "self", ".", "_cfg_path", "and", "os", ".", "path", ".", "isfile", "(", "self", ".", "_cfg_path", ")", ":", "if", "self", ".", "run_as_user", ":", "subprocess", ".", "call", "(", "[", "'sudo'", ",", ...
A callback that should be called when this is done running.
[ "A", "callback", "that", "should", "be", "called", "when", "this", "is", "done", "running", "." ]
python
test
benley/butcher
butcher/address.py
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/address.py#L115-L158
def __parse_target(targetstr, current_repo=None): """Parse a build target string. General form: //repo[gitref]/dir/path:target. These are all valid: //repo //repo[a038fi31d9e8bc11582ef1b1b1982d8fc] //repo[a039aa30853298]:foo //repo/dir //repo[a037928734]/dir //repo/dir/path //repo/dir/path:foo :foo dir/path dir/path:foo dir:foo Returns: {'repo': '//reponame', 'git_ref': 'a839a38fd...', 'path': 'dir/path', 'target': 'targetname} """ # 'blah' -> ':blah' if not (':' in targetstr or '/' in targetstr): targetstr = ':%s' % targetstr match = re.match( r'^(?://(?P<repo>[\w-]+)(?:\[(?P<git_ref>.*)\])?)?' r'(?:$|/?(?P<path>[\w/-]+)?(?::?(?P<target>[\w-]+)?))', targetstr) try: groups = match.groupdict() if not groups['repo']: groups['repo'] = current_repo if not groups['git_ref']: groups['git_ref'] = 'develop' if not groups['target']: groups['target'] = 'all' if not groups['path']: groups['path'] = '' except AttributeError: raise error.ButcherError('"%s" is not a valid build target.') #log.debug('parse_target: %s -> %s', targetstr, groups) return groups
[ "def", "__parse_target", "(", "targetstr", ",", "current_repo", "=", "None", ")", ":", "# 'blah' -> ':blah'", "if", "not", "(", "':'", "in", "targetstr", "or", "'/'", "in", "targetstr", ")", ":", "targetstr", "=", "':%s'", "%", "targetstr", "match", "=", "...
Parse a build target string. General form: //repo[gitref]/dir/path:target. These are all valid: //repo //repo[a038fi31d9e8bc11582ef1b1b1982d8fc] //repo[a039aa30853298]:foo //repo/dir //repo[a037928734]/dir //repo/dir/path //repo/dir/path:foo :foo dir/path dir/path:foo dir:foo Returns: {'repo': '//reponame', 'git_ref': 'a839a38fd...', 'path': 'dir/path', 'target': 'targetname}
[ "Parse", "a", "build", "target", "string", "." ]
python
train
SpriteLink/NIPAP
nipap/nipap/backend.py
https://github.com/SpriteLink/NIPAP/blob/f96069f11ab952d80b13cab06e0528f2d24b3de9/nipap/nipap/backend.py#L2240-L2309
def smart_search_pool(self, auth, query_str, search_options=None, extra_query=None): """ Perform a smart search on pool list. * `auth` [BaseAuth] AAA options. * `query_str` [string] Search string * `search_options` [options_dict] Search options. See :func:`search_pool`. * `extra_query` [dict_to_sql] Extra search terms, will be AND:ed together with what is extracted from the query string. Return a dict with three elements: * :attr:`interpretation` - How the query string was interpreted. * :attr:`search_options` - Various search_options. * :attr:`result` - The search result. The :attr:`interpretation` is given as a list of dicts, each explaining how a part of the search key was interpreted (ie. what pool attribute the search operation was performed on). The :attr:`result` is a list of dicts containing the search result. The smart search function tries to convert the query from a text string to a `query` dict which is passed to the :func:`search_pool` function. If multiple search keys are detected, they are combined with a logical AND. It will basically just take each search term and try to match it against the name or description column with regex match. See the :func:`search_pool` function for an explanation of the `search_options` argument. This is the documentation of the internal backend function. It's exposed over XML-RPC, please also see the XML-RPC documentation for :py:func:`nipap.xmlrpc.NipapXMLRPC.smart_search_pool` for full understanding. """ if search_options is None: search_options = {} self._logger.debug("smart_search_pool query string: %s" % query_str) success, query = self._parse_pool_query(query_str) if not success: return { 'interpretation': query, 'search_options': search_options, 'result': [], 'error': True, 'error_message': 'query interpretation failed' } if extra_query is not None: query = { 'operator': 'and', 'val1': query, 'val2': extra_query } self._logger.debug("smart_search_pool; query expanded to: %s" % unicode(query)) search_result = self.search_pool(auth, query, search_options) search_result['interpretation'] = query search_result['error'] = False return search_result
[ "def", "smart_search_pool", "(", "self", ",", "auth", ",", "query_str", ",", "search_options", "=", "None", ",", "extra_query", "=", "None", ")", ":", "if", "search_options", "is", "None", ":", "search_options", "=", "{", "}", "self", ".", "_logger", ".", ...
Perform a smart search on pool list. * `auth` [BaseAuth] AAA options. * `query_str` [string] Search string * `search_options` [options_dict] Search options. See :func:`search_pool`. * `extra_query` [dict_to_sql] Extra search terms, will be AND:ed together with what is extracted from the query string. Return a dict with three elements: * :attr:`interpretation` - How the query string was interpreted. * :attr:`search_options` - Various search_options. * :attr:`result` - The search result. The :attr:`interpretation` is given as a list of dicts, each explaining how a part of the search key was interpreted (ie. what pool attribute the search operation was performed on). The :attr:`result` is a list of dicts containing the search result. The smart search function tries to convert the query from a text string to a `query` dict which is passed to the :func:`search_pool` function. If multiple search keys are detected, they are combined with a logical AND. It will basically just take each search term and try to match it against the name or description column with regex match. See the :func:`search_pool` function for an explanation of the `search_options` argument. This is the documentation of the internal backend function. It's exposed over XML-RPC, please also see the XML-RPC documentation for :py:func:`nipap.xmlrpc.NipapXMLRPC.smart_search_pool` for full understanding.
[ "Perform", "a", "smart", "search", "on", "pool", "list", "." ]
python
train
ml4ai/delphi
delphi/apps/rest_api/api.py
https://github.com/ml4ai/delphi/blob/6d03d8aafeab99610387c51b89c99738ff2abbe3/delphi/apps/rest_api/api.py#L55-L59
def getICMByUUID(uuid: str): """ Fetch an ICM by UUID""" _metadata = ICMMetadata.query.filter_by(id=uuid).first().deserialize() del _metadata["model_id"] return jsonify(_metadata)
[ "def", "getICMByUUID", "(", "uuid", ":", "str", ")", ":", "_metadata", "=", "ICMMetadata", ".", "query", ".", "filter_by", "(", "id", "=", "uuid", ")", ".", "first", "(", ")", ".", "deserialize", "(", ")", "del", "_metadata", "[", "\"model_id\"", "]", ...
Fetch an ICM by UUID
[ "Fetch", "an", "ICM", "by", "UUID" ]
python
train
collectiveacuity/labPack
labpack/events/meetup.py
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/events/meetup.py#L2040-L2099
def join_event(self, group_url, event_id, additional_guests=0, attendance_answers=None, payment_service='', payment_code=''): ''' a method to create an rsvp for a meetup event :param group_url: string with meetup urlname for group :param event_id: integer with meetup id for event :param additional_guests: [optional] integer with number of additional guests :param attendance_answers: [optional] list with id & answer for event survey questions :param payment_service: [optional] string with name of payment service to use :param payment_code: [optional] string with token to authorize payment :return: dictionary with attendee details inside [json] key attendee_details = self._reconstruct_attendee({}) ''' # https://www.meetup.com/meetup_api/docs/:urlname/events/:event_id/rsvps/ title = '%s.join_event' % self.__class__.__name__ # validate permissions if not 'rsvp' in self.service_scope: raise ValueError('%s requires group_join as part of oauth2 service_scope permissions.' % title) # validate inputs input_fields = { 'group_url': group_url, 'event_id': event_id, 'additional_guests': additional_guests, 'attendance_answers': attendance_answers, 'payment_service': payment_service, 'payment_code': payment_code } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # construct request fields url = '%s/%s/events/%s/rsvps' % (self.endpoint, group_url, event_id) params = { 'response': 'yes' } if additional_guests: params['guests'] = additional_guests if attendance_answers: for answer in attendance_answers: key = 'answer_%s' % str(answer['question_id']) params[key] = answer['answer_text'] if payment_service: params['agree_to_refund'] = True params['opt_to_pay'] = True # send request response_details = self._post_request(url, params=params) # construct method output if response_details['json']: response_details['json'] = self._reconstruct_attendee(response_details['json']) return response_details
[ "def", "join_event", "(", "self", ",", "group_url", ",", "event_id", ",", "additional_guests", "=", "0", ",", "attendance_answers", "=", "None", ",", "payment_service", "=", "''", ",", "payment_code", "=", "''", ")", ":", "# https://www.meetup.com/meetup_api/docs/...
a method to create an rsvp for a meetup event :param group_url: string with meetup urlname for group :param event_id: integer with meetup id for event :param additional_guests: [optional] integer with number of additional guests :param attendance_answers: [optional] list with id & answer for event survey questions :param payment_service: [optional] string with name of payment service to use :param payment_code: [optional] string with token to authorize payment :return: dictionary with attendee details inside [json] key attendee_details = self._reconstruct_attendee({})
[ "a", "method", "to", "create", "an", "rsvp", "for", "a", "meetup", "event", ":", "param", "group_url", ":", "string", "with", "meetup", "urlname", "for", "group", ":", "param", "event_id", ":", "integer", "with", "meetup", "id", "for", "event", ":", "par...
python
train
pyviz/imagen
imagen/image.py
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/image.py#L322-L332
def _process_channels(self,p,**params_to_override): """ Add the channel information to the channel_data attribute. """ orig_image = self._image for i in range(len(self._channel_data)): self._image = self._original_channel_data[i] self._channel_data[i] = self._reduced_call(**params_to_override) self._image = orig_image return self._channel_data
[ "def", "_process_channels", "(", "self", ",", "p", ",", "*", "*", "params_to_override", ")", ":", "orig_image", "=", "self", ".", "_image", "for", "i", "in", "range", "(", "len", "(", "self", ".", "_channel_data", ")", ")", ":", "self", ".", "_image", ...
Add the channel information to the channel_data attribute.
[ "Add", "the", "channel", "information", "to", "the", "channel_data", "attribute", "." ]
python
train
rwl/pylon
pylon/util.py
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/util.py#L243-L256
def fair_max(x): """ Takes a single iterable as an argument and returns the same output as the built-in function max with two output parameters, except that where the maximum value occurs at more than one position in the vector, the index is chosen randomly from these positions as opposed to just choosing the first occurance. """ value = max(x) # List indexes of max value. i = [x.index(v) for v in x if v == value] # Select index randomly among occurances. idx = random.choice(i) return idx, value
[ "def", "fair_max", "(", "x", ")", ":", "value", "=", "max", "(", "x", ")", "# List indexes of max value.", "i", "=", "[", "x", ".", "index", "(", "v", ")", "for", "v", "in", "x", "if", "v", "==", "value", "]", "# Select index randomly among occurances.",...
Takes a single iterable as an argument and returns the same output as the built-in function max with two output parameters, except that where the maximum value occurs at more than one position in the vector, the index is chosen randomly from these positions as opposed to just choosing the first occurance.
[ "Takes", "a", "single", "iterable", "as", "an", "argument", "and", "returns", "the", "same", "output", "as", "the", "built", "-", "in", "function", "max", "with", "two", "output", "parameters", "except", "that", "where", "the", "maximum", "value", "occurs", ...
python
train
ArtoLabs/SimpleSteem
simplesteem/simplesteem.py
https://github.com/ArtoLabs/SimpleSteem/blob/ce8be0ae81f8878b460bc156693f1957f7dd34a3/simplesteem/simplesteem.py#L117-L135
def steem_instance(self): ''' Returns the steem instance if it already exists otherwise uses the goodnode method to fetch a node and instantiate the Steem class. ''' if self.s: return self.s for num_of_retries in range(default.max_retry): node = self.util.goodnode(self.nodes) try: self.s = Steem(keys=self.keys, nodes=[node]) except Exception as e: self.util.retry("COULD NOT GET STEEM INSTANCE", e, num_of_retries, default.wait_time) self.s = None else: return self.s return False
[ "def", "steem_instance", "(", "self", ")", ":", "if", "self", ".", "s", ":", "return", "self", ".", "s", "for", "num_of_retries", "in", "range", "(", "default", ".", "max_retry", ")", ":", "node", "=", "self", ".", "util", ".", "goodnode", "(", "self...
Returns the steem instance if it already exists otherwise uses the goodnode method to fetch a node and instantiate the Steem class.
[ "Returns", "the", "steem", "instance", "if", "it", "already", "exists", "otherwise", "uses", "the", "goodnode", "method", "to", "fetch", "a", "node", "and", "instantiate", "the", "Steem", "class", "." ]
python
train
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L11017-L11034
def scs2e(sc, sclkch): """ Convert a spacecraft clock string to ephemeris seconds past J2000 (ET). http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/scs2e_c.html :param sc: NAIF integer code for a spacecraft. :type sc: int :param sclkch: An SCLK string. :type sclkch: str :return: Ephemeris time, seconds past J2000. :rtype: float """ sc = ctypes.c_int(sc) sclkch = stypes.stringToCharP(sclkch) et = ctypes.c_double() libspice.scs2e_c(sc, sclkch, ctypes.byref(et)) return et.value
[ "def", "scs2e", "(", "sc", ",", "sclkch", ")", ":", "sc", "=", "ctypes", ".", "c_int", "(", "sc", ")", "sclkch", "=", "stypes", ".", "stringToCharP", "(", "sclkch", ")", "et", "=", "ctypes", ".", "c_double", "(", ")", "libspice", ".", "scs2e_c", "(...
Convert a spacecraft clock string to ephemeris seconds past J2000 (ET). http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/scs2e_c.html :param sc: NAIF integer code for a spacecraft. :type sc: int :param sclkch: An SCLK string. :type sclkch: str :return: Ephemeris time, seconds past J2000. :rtype: float
[ "Convert", "a", "spacecraft", "clock", "string", "to", "ephemeris", "seconds", "past", "J2000", "(", "ET", ")", "." ]
python
train
manns/pyspread
pyspread/src/actions/_main_window_actions.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/actions/_main_window_actions.py#L695-L705
def execute_macros(self): """Executes macros and marks grid as changed""" # Mark content as changed post_command_event(self.main_window, self.ContentChangedMsg) (result, err) = self.grid.code_array.execute_macros() # Post event to macro dialog post_command_event(self.main_window, self.MacroErrorMsg, msg=result, err=err)
[ "def", "execute_macros", "(", "self", ")", ":", "# Mark content as changed", "post_command_event", "(", "self", ".", "main_window", ",", "self", ".", "ContentChangedMsg", ")", "(", "result", ",", "err", ")", "=", "self", ".", "grid", ".", "code_array", ".", ...
Executes macros and marks grid as changed
[ "Executes", "macros", "and", "marks", "grid", "as", "changed" ]
python
train