code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def default(restart_cb=None, restart_func=None, close_fds=True): '''Sets up lazarus in default mode. See the :py:func:`custom` function for a more powerful mode of use. The default mode of lazarus is to watch all modules rooted at ``PYTHONPATH`` for changes and restart when they take place. Keyword arguments: restart_cb -- Callback invoked prior to restarting the process; allows for any cleanup to occur prior to restarting. Returning anything other than *None* in the callback will cancel the restart. restart_func -- Function invoked to restart the process. This supplants the default behavior of using *sys.executable* and *sys.argv*. close_fds -- Whether all file descriptors other than *stdin*, *stdout*, and *stderr* should be closed A simple example: >>> import lazarus >>> lazarus.default() >>> lazarus.stop() ''' if _active: msg = 'lazarus is already active' raise RuntimeWarning(msg) _python_path = os.getenv('PYTHONPATH') if not _python_path: msg = 'PYTHONPATH is not set' raise RuntimeError(msg) if restart_cb and not callable(restart_cb): msg = 'restart_cb keyword argument is not callable' raise TypeError(msg) if restart_func and not callable(restart_func): msg = 'restart_func keyword argument is not callable' raise TypeError(msg) global _close_fds _close_fds = close_fds try: from watchdog.observers import Observer from watchdog.events import FileSystemEventHandler except ImportError as ie: msg = 'no watchdog support (%s)' % str(ie) raise RuntimeError(msg) class _Handler(FileSystemEventHandler): def __init__(self): self.active = True def dispatch(self, event): if not self.active: return super(_Handler, self).dispatch(event) def all_events(self, event): if is_restart_event(event): cancelled = _restart() if not cancelled: self.active = False def on_created(self, event): self.all_events(event) def on_deleted(self, event): self.all_events(event) def on_modified(self, event): self.all_events(event) def on_moved(self, event): self.all_events(event) global _observer _observer = Observer() handler = _Handler() _observer.schedule(handler, _python_path, recursive=True) global _restart_cb _restart_cb = restart_cb global _restart_func _restart_func = restart_func _activate() _observer.start()
Sets up lazarus in default mode. See the :py:func:`custom` function for a more powerful mode of use. The default mode of lazarus is to watch all modules rooted at ``PYTHONPATH`` for changes and restart when they take place. Keyword arguments: restart_cb -- Callback invoked prior to restarting the process; allows for any cleanup to occur prior to restarting. Returning anything other than *None* in the callback will cancel the restart. restart_func -- Function invoked to restart the process. This supplants the default behavior of using *sys.executable* and *sys.argv*. close_fds -- Whether all file descriptors other than *stdin*, *stdout*, and *stderr* should be closed A simple example: >>> import lazarus >>> lazarus.default() >>> lazarus.stop()
Below is the the instruction that describes the task: ### Input: Sets up lazarus in default mode. See the :py:func:`custom` function for a more powerful mode of use. The default mode of lazarus is to watch all modules rooted at ``PYTHONPATH`` for changes and restart when they take place. Keyword arguments: restart_cb -- Callback invoked prior to restarting the process; allows for any cleanup to occur prior to restarting. Returning anything other than *None* in the callback will cancel the restart. restart_func -- Function invoked to restart the process. This supplants the default behavior of using *sys.executable* and *sys.argv*. close_fds -- Whether all file descriptors other than *stdin*, *stdout*, and *stderr* should be closed A simple example: >>> import lazarus >>> lazarus.default() >>> lazarus.stop() ### Response: def default(restart_cb=None, restart_func=None, close_fds=True): '''Sets up lazarus in default mode. See the :py:func:`custom` function for a more powerful mode of use. The default mode of lazarus is to watch all modules rooted at ``PYTHONPATH`` for changes and restart when they take place. Keyword arguments: restart_cb -- Callback invoked prior to restarting the process; allows for any cleanup to occur prior to restarting. Returning anything other than *None* in the callback will cancel the restart. restart_func -- Function invoked to restart the process. This supplants the default behavior of using *sys.executable* and *sys.argv*. close_fds -- Whether all file descriptors other than *stdin*, *stdout*, and *stderr* should be closed A simple example: >>> import lazarus >>> lazarus.default() >>> lazarus.stop() ''' if _active: msg = 'lazarus is already active' raise RuntimeWarning(msg) _python_path = os.getenv('PYTHONPATH') if not _python_path: msg = 'PYTHONPATH is not set' raise RuntimeError(msg) if restart_cb and not callable(restart_cb): msg = 'restart_cb keyword argument is not callable' raise TypeError(msg) if restart_func and not callable(restart_func): msg = 'restart_func keyword argument is not callable' raise TypeError(msg) global _close_fds _close_fds = close_fds try: from watchdog.observers import Observer from watchdog.events import FileSystemEventHandler except ImportError as ie: msg = 'no watchdog support (%s)' % str(ie) raise RuntimeError(msg) class _Handler(FileSystemEventHandler): def __init__(self): self.active = True def dispatch(self, event): if not self.active: return super(_Handler, self).dispatch(event) def all_events(self, event): if is_restart_event(event): cancelled = _restart() if not cancelled: self.active = False def on_created(self, event): self.all_events(event) def on_deleted(self, event): self.all_events(event) def on_modified(self, event): self.all_events(event) def on_moved(self, event): self.all_events(event) global _observer _observer = Observer() handler = _Handler() _observer.schedule(handler, _python_path, recursive=True) global _restart_cb _restart_cb = restart_cb global _restart_func _restart_func = restart_func _activate() _observer.start()
def introspect_table(table): """Given a SQLAlchemy Table object, return a Shelf description suitable for passing to Shelf.from_config. """ d = {} for c in table.columns: if isinstance(c.type, String): d[c.name] = {'kind': 'Dimension', 'field': c.name} if isinstance(c.type, (Integer, Float)): d[c.name] = {'kind': 'Metric', 'field': c.name} return d
Given a SQLAlchemy Table object, return a Shelf description suitable for passing to Shelf.from_config.
Below is the the instruction that describes the task: ### Input: Given a SQLAlchemy Table object, return a Shelf description suitable for passing to Shelf.from_config. ### Response: def introspect_table(table): """Given a SQLAlchemy Table object, return a Shelf description suitable for passing to Shelf.from_config. """ d = {} for c in table.columns: if isinstance(c.type, String): d[c.name] = {'kind': 'Dimension', 'field': c.name} if isinstance(c.type, (Integer, Float)): d[c.name] = {'kind': 'Metric', 'field': c.name} return d
def _check_parameter_dependencies(self, parameters): """Find out in what order parameters should be called.""" # Warning: parameters only provide reverse dependencies; we reverse them into standard dependencies. # deep_revdeps: set of fields a field depend indirectly upon deep_revdeps = collections.defaultdict(set) # Actual, direct dependencies deps = collections.defaultdict(set) for name, parameter in parameters.items(): if isinstance(parameter, declarations.Parameter): field_revdeps = parameter.get_revdeps(parameters) if not field_revdeps: continue deep_revdeps[name] = set.union(*(deep_revdeps[dep] for dep in field_revdeps)) deep_revdeps[name] |= set(field_revdeps) for dep in field_revdeps: deps[dep].add(name) # Check for cyclical dependencies cyclic = [name for name, field_deps in deep_revdeps.items() if name in field_deps] if cyclic: raise errors.CyclicDefinitionError( "Cyclic definition detected on %r; Params around %s" % (self.factory, ', '.join(cyclic))) return deps
Find out in what order parameters should be called.
Below is the the instruction that describes the task: ### Input: Find out in what order parameters should be called. ### Response: def _check_parameter_dependencies(self, parameters): """Find out in what order parameters should be called.""" # Warning: parameters only provide reverse dependencies; we reverse them into standard dependencies. # deep_revdeps: set of fields a field depend indirectly upon deep_revdeps = collections.defaultdict(set) # Actual, direct dependencies deps = collections.defaultdict(set) for name, parameter in parameters.items(): if isinstance(parameter, declarations.Parameter): field_revdeps = parameter.get_revdeps(parameters) if not field_revdeps: continue deep_revdeps[name] = set.union(*(deep_revdeps[dep] for dep in field_revdeps)) deep_revdeps[name] |= set(field_revdeps) for dep in field_revdeps: deps[dep].add(name) # Check for cyclical dependencies cyclic = [name for name, field_deps in deep_revdeps.items() if name in field_deps] if cyclic: raise errors.CyclicDefinitionError( "Cyclic definition detected on %r; Params around %s" % (self.factory, ', '.join(cyclic))) return deps
def _decompress_dicom(dicom_file, output_file): """ This function can be used to convert a jpeg compressed image to an uncompressed one for further conversion :param input_file: single dicom file to decompress """ gdcmconv_executable = _get_gdcmconv() subprocess.check_output([gdcmconv_executable, '-w', dicom_file, output_file])
This function can be used to convert a jpeg compressed image to an uncompressed one for further conversion :param input_file: single dicom file to decompress
Below is the the instruction that describes the task: ### Input: This function can be used to convert a jpeg compressed image to an uncompressed one for further conversion :param input_file: single dicom file to decompress ### Response: def _decompress_dicom(dicom_file, output_file): """ This function can be used to convert a jpeg compressed image to an uncompressed one for further conversion :param input_file: single dicom file to decompress """ gdcmconv_executable = _get_gdcmconv() subprocess.check_output([gdcmconv_executable, '-w', dicom_file, output_file])
def iter(self, start=0, stop=-1, withscores=False, reverse=None): """ Return a range of values from sorted set name between @start and @end sorted in ascending order unless @reverse or :prop:reversed. @start and @end: #int, can be negative, indicating the end of the range. @withscores: #bool indicates to return the scores along with the members, as a list of |(member, score)| pairs @reverse: #bool indicating whether to sort the results descendingly -> yields members or |(member, score)| #tuple pairs """ reverse = reverse if reverse is not None else self.reversed _loads = self._loads for member in self._client.zrange( self.key_prefix, start=start, end=stop, withscores=withscores, desc=reverse, score_cast_func=self.cast): if withscores: yield (_loads(member[0]), self.cast(member[1])) else: yield _loads(member)
Return a range of values from sorted set name between @start and @end sorted in ascending order unless @reverse or :prop:reversed. @start and @end: #int, can be negative, indicating the end of the range. @withscores: #bool indicates to return the scores along with the members, as a list of |(member, score)| pairs @reverse: #bool indicating whether to sort the results descendingly -> yields members or |(member, score)| #tuple pairs
Below is the the instruction that describes the task: ### Input: Return a range of values from sorted set name between @start and @end sorted in ascending order unless @reverse or :prop:reversed. @start and @end: #int, can be negative, indicating the end of the range. @withscores: #bool indicates to return the scores along with the members, as a list of |(member, score)| pairs @reverse: #bool indicating whether to sort the results descendingly -> yields members or |(member, score)| #tuple pairs ### Response: def iter(self, start=0, stop=-1, withscores=False, reverse=None): """ Return a range of values from sorted set name between @start and @end sorted in ascending order unless @reverse or :prop:reversed. @start and @end: #int, can be negative, indicating the end of the range. @withscores: #bool indicates to return the scores along with the members, as a list of |(member, score)| pairs @reverse: #bool indicating whether to sort the results descendingly -> yields members or |(member, score)| #tuple pairs """ reverse = reverse if reverse is not None else self.reversed _loads = self._loads for member in self._client.zrange( self.key_prefix, start=start, end=stop, withscores=withscores, desc=reverse, score_cast_func=self.cast): if withscores: yield (_loads(member[0]), self.cast(member[1])) else: yield _loads(member)
def process(): """Get process overview.""" pmi = ProcessMemoryInfo() threads = get_current_threads() return dict(info=pmi, threads=threads)
Get process overview.
Below is the the instruction that describes the task: ### Input: Get process overview. ### Response: def process(): """Get process overview.""" pmi = ProcessMemoryInfo() threads = get_current_threads() return dict(info=pmi, threads=threads)
def set(self, name, value, **kwargs): """Dict-like set() that also supports optional domain and path args in order to resolve naming collisions from using one cookie jar over multiple domains. """ # support client code that unsets cookies by assignment of a None value: if value is None: remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path')) return if isinstance(value, Morsel): c = morsel_to_cookie(value) else: c = create_cookie(name, value, **kwargs) self.set_cookie(c) return c
Dict-like set() that also supports optional domain and path args in order to resolve naming collisions from using one cookie jar over multiple domains.
Below is the the instruction that describes the task: ### Input: Dict-like set() that also supports optional domain and path args in order to resolve naming collisions from using one cookie jar over multiple domains. ### Response: def set(self, name, value, **kwargs): """Dict-like set() that also supports optional domain and path args in order to resolve naming collisions from using one cookie jar over multiple domains. """ # support client code that unsets cookies by assignment of a None value: if value is None: remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path')) return if isinstance(value, Morsel): c = morsel_to_cookie(value) else: c = create_cookie(name, value, **kwargs) self.set_cookie(c) return c
def select_as_dict(self, table_name, columns=None, where=None, extra=None): """ Get data in the database and return fetched data as a |OrderedDict| list. :param str table_name: |arg_select_table_name| :param list columns: |arg_select_as_xx_columns| :param where: |arg_select_where| :type where: |arg_where_type| :param str extra: |arg_select_extra| :return: Table data as |OrderedDict| instances. :rtype: |list| of |OrderedDict| :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| :raises simplesqlite.OperationalError: |raises_operational_error| :Example: :ref:`example-select-as-dict` """ return self.select_as_tabledata(table_name, columns, where, extra).as_dict().get(table_name)
Get data in the database and return fetched data as a |OrderedDict| list. :param str table_name: |arg_select_table_name| :param list columns: |arg_select_as_xx_columns| :param where: |arg_select_where| :type where: |arg_where_type| :param str extra: |arg_select_extra| :return: Table data as |OrderedDict| instances. :rtype: |list| of |OrderedDict| :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| :raises simplesqlite.OperationalError: |raises_operational_error| :Example: :ref:`example-select-as-dict`
Below is the the instruction that describes the task: ### Input: Get data in the database and return fetched data as a |OrderedDict| list. :param str table_name: |arg_select_table_name| :param list columns: |arg_select_as_xx_columns| :param where: |arg_select_where| :type where: |arg_where_type| :param str extra: |arg_select_extra| :return: Table data as |OrderedDict| instances. :rtype: |list| of |OrderedDict| :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| :raises simplesqlite.OperationalError: |raises_operational_error| :Example: :ref:`example-select-as-dict` ### Response: def select_as_dict(self, table_name, columns=None, where=None, extra=None): """ Get data in the database and return fetched data as a |OrderedDict| list. :param str table_name: |arg_select_table_name| :param list columns: |arg_select_as_xx_columns| :param where: |arg_select_where| :type where: |arg_where_type| :param str extra: |arg_select_extra| :return: Table data as |OrderedDict| instances. :rtype: |list| of |OrderedDict| :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| :raises simplesqlite.OperationalError: |raises_operational_error| :Example: :ref:`example-select-as-dict` """ return self.select_as_tabledata(table_name, columns, where, extra).as_dict().get(table_name)
def __run_pre_all(self): """Execute the pre-all.py and pre-all.sql files if they exist""" # if the list of delta dirs is [delta1, delta2] the pre scripts of delta2 are # executed before the pre scripts of delta1 for d in reversed(self.dirs): pre_all_py_path = os.path.join(d, 'pre-all.py') if os.path.isfile(pre_all_py_path): print(' Applying pre-all.py...', end=' ') self.__run_py_file(pre_all_py_path, 'pre-all') print('OK') pre_all_sql_path = os.path.join(d, 'pre-all.sql') if os.path.isfile(pre_all_sql_path): print(' Applying pre-all.sql...', end=' ') self.__run_sql_file(pre_all_sql_path) print('OK')
Execute the pre-all.py and pre-all.sql files if they exist
Below is the the instruction that describes the task: ### Input: Execute the pre-all.py and pre-all.sql files if they exist ### Response: def __run_pre_all(self): """Execute the pre-all.py and pre-all.sql files if they exist""" # if the list of delta dirs is [delta1, delta2] the pre scripts of delta2 are # executed before the pre scripts of delta1 for d in reversed(self.dirs): pre_all_py_path = os.path.join(d, 'pre-all.py') if os.path.isfile(pre_all_py_path): print(' Applying pre-all.py...', end=' ') self.__run_py_file(pre_all_py_path, 'pre-all') print('OK') pre_all_sql_path = os.path.join(d, 'pre-all.sql') if os.path.isfile(pre_all_sql_path): print(' Applying pre-all.sql...', end=' ') self.__run_sql_file(pre_all_sql_path) print('OK')
def _get_error(self, code, errors, indentation=0): """Get error and show the faulty line + some context Other GLIR implementations may omit this. """ # Init results = [] lines = None if code is not None: lines = [line.strip() for line in code.split('\n')] for error in errors.split('\n'): # Strip; skip empy lines error = error.strip() if not error: continue # Separate line number from description (if we can) linenr, error = self._parse_error(error) if None in (linenr, lines): results.append('%s' % error) else: results.append('on line %i: %s' % (linenr, error)) if linenr > 0 and linenr < len(lines): results.append(' %s' % lines[linenr - 1]) # Add indentation and return results = [' ' * indentation + r for r in results] return '\n'.join(results)
Get error and show the faulty line + some context Other GLIR implementations may omit this.
Below is the the instruction that describes the task: ### Input: Get error and show the faulty line + some context Other GLIR implementations may omit this. ### Response: def _get_error(self, code, errors, indentation=0): """Get error and show the faulty line + some context Other GLIR implementations may omit this. """ # Init results = [] lines = None if code is not None: lines = [line.strip() for line in code.split('\n')] for error in errors.split('\n'): # Strip; skip empy lines error = error.strip() if not error: continue # Separate line number from description (if we can) linenr, error = self._parse_error(error) if None in (linenr, lines): results.append('%s' % error) else: results.append('on line %i: %s' % (linenr, error)) if linenr > 0 and linenr < len(lines): results.append(' %s' % lines[linenr - 1]) # Add indentation and return results = [' ' * indentation + r for r in results] return '\n'.join(results)
def do_archive(self, line): """archive <identifier> [identifier ...] Mark one or more existing Science Objects as archived.""" pids = self._split_args(line, 1, -1) self._command_processor.science_object_archive(pids) self._print_info_if_verbose( "Added archive operation for identifier(s) {} to write queue".format( ", ".join(pids) ) )
archive <identifier> [identifier ...] Mark one or more existing Science Objects as archived.
Below is the the instruction that describes the task: ### Input: archive <identifier> [identifier ...] Mark one or more existing Science Objects as archived. ### Response: def do_archive(self, line): """archive <identifier> [identifier ...] Mark one or more existing Science Objects as archived.""" pids = self._split_args(line, 1, -1) self._command_processor.science_object_archive(pids) self._print_info_if_verbose( "Added archive operation for identifier(s) {} to write queue".format( ", ".join(pids) ) )
def lexical_parent(self): """Return the lexical parent for this cursor.""" if not hasattr(self, '_lexical_parent'): self._lexical_parent = conf.lib.clang_getCursorLexicalParent(self) return self._lexical_parent
Return the lexical parent for this cursor.
Below is the the instruction that describes the task: ### Input: Return the lexical parent for this cursor. ### Response: def lexical_parent(self): """Return the lexical parent for this cursor.""" if not hasattr(self, '_lexical_parent'): self._lexical_parent = conf.lib.clang_getCursorLexicalParent(self) return self._lexical_parent
def action_approve(self): """Set a change request as approved.""" for rec in self: if rec.state not in ['draft', 'to approve']: raise UserError( _("Can't approve page in '%s' state.") % rec.state) if not rec.am_i_approver: raise UserError(_( 'You are not authorized to do this.\r\n' 'Only approvers with these groups can approve this: ' ) % ', '.join( [g.display_name for g in rec.page_id.approver_group_ids])) # Update state rec.write({ 'state': 'approved', 'approved_date': fields.datetime.now(), 'approved_uid': self.env.uid, }) # Trigger computed field update rec.page_id._compute_history_head() # Notify state change rec.message_post( subtype='mt_comment', body=_( 'Change request has been approved by %s.' ) % (self.env.user.name) ) # Notify followers a new version is available rec.page_id.message_post( subtype='mt_comment', body=_( 'New version of the document %s approved.' ) % (rec.page_id.name) )
Set a change request as approved.
Below is the the instruction that describes the task: ### Input: Set a change request as approved. ### Response: def action_approve(self): """Set a change request as approved.""" for rec in self: if rec.state not in ['draft', 'to approve']: raise UserError( _("Can't approve page in '%s' state.") % rec.state) if not rec.am_i_approver: raise UserError(_( 'You are not authorized to do this.\r\n' 'Only approvers with these groups can approve this: ' ) % ', '.join( [g.display_name for g in rec.page_id.approver_group_ids])) # Update state rec.write({ 'state': 'approved', 'approved_date': fields.datetime.now(), 'approved_uid': self.env.uid, }) # Trigger computed field update rec.page_id._compute_history_head() # Notify state change rec.message_post( subtype='mt_comment', body=_( 'Change request has been approved by %s.' ) % (self.env.user.name) ) # Notify followers a new version is available rec.page_id.message_post( subtype='mt_comment', body=_( 'New version of the document %s approved.' ) % (rec.page_id.name) )
def _update_message_request(self, message): """Add row keys and row range to given request message :type message: class:`data_messages_v2_pb2.ReadRowsRequest` :param message: The ``ReadRowsRequest`` protobuf """ for each in self.row_keys: message.rows.row_keys.append(_to_bytes(each)) for each in self.row_ranges: r_kwrags = each.get_range_kwargs() message.rows.row_ranges.add(**r_kwrags)
Add row keys and row range to given request message :type message: class:`data_messages_v2_pb2.ReadRowsRequest` :param message: The ``ReadRowsRequest`` protobuf
Below is the the instruction that describes the task: ### Input: Add row keys and row range to given request message :type message: class:`data_messages_v2_pb2.ReadRowsRequest` :param message: The ``ReadRowsRequest`` protobuf ### Response: def _update_message_request(self, message): """Add row keys and row range to given request message :type message: class:`data_messages_v2_pb2.ReadRowsRequest` :param message: The ``ReadRowsRequest`` protobuf """ for each in self.row_keys: message.rows.row_keys.append(_to_bytes(each)) for each in self.row_ranges: r_kwrags = each.get_range_kwargs() message.rows.row_ranges.add(**r_kwrags)
def itertrain(self, train, valid=None, **kwargs): '''Train a model using a training and validation set. This method yields a series of monitor values to the caller. After every iteration, a pair of monitor dictionaries is generated: one evaluated on the training dataset, and another evaluated on the validation dataset. The validation monitors might not be updated during every training iteration; in this case, the most recent validation monitors will be yielded along with the training monitors. Parameters ---------- train : :class:`Dataset <theanets.dataset.Dataset>` A set of training data for computing updates to model parameters. valid : :class:`Dataset <theanets.dataset.Dataset>` A set of validation data for computing monitor values and determining when the loss has stopped improving. Yields ------ training : dict A dictionary mapping monitor names to values, evaluated on the training dataset. validation : dict A dictionary containing monitor values evaluated on the validation dataset. ''' ifci = itertools.chain.from_iterable def first(x): return x[0] if isinstance(x, (tuple, list)) else x def last(x): return x[-1] if isinstance(x, (tuple, list)) else x odim = idim = None for t in train: idim = first(t).shape[-1] odim = last(t).shape[-1] rng = kwargs.get('rng') if rng is None or isinstance(rng, int): rng = np.random.RandomState(rng) # set output (decoding) weights on the network. samples = ifci(last(t) for t in train) for param in self.network.layers[-1].params: shape = param.get_value(borrow=True).shape if len(shape) == 2 and shape[1] == odim: arr = np.vstack(SampleTrainer.reservoir(samples, shape[0], rng)) util.log('setting {}: {}', param.name, shape) param.set_value(arr / np.sqrt((arr * arr).sum(axis=1))[:, None]) # set input (encoding) weights on the network. samples = ifci(first(t) for t in train) for layer in self.network.layers: for param in layer.params: shape = param.get_value(borrow=True).shape if len(shape) == 2 and shape[0] == idim: arr = np.vstack(SampleTrainer.reservoir(samples, shape[1], rng)).T util.log('setting {}: {}', param.name, shape) param.set_value(arr / np.sqrt((arr * arr).sum(axis=0))) samples = ifci(self.network.feed_forward( first(t))[i-1] for t in train) yield dict(loss=0), dict(loss=0)
Train a model using a training and validation set. This method yields a series of monitor values to the caller. After every iteration, a pair of monitor dictionaries is generated: one evaluated on the training dataset, and another evaluated on the validation dataset. The validation monitors might not be updated during every training iteration; in this case, the most recent validation monitors will be yielded along with the training monitors. Parameters ---------- train : :class:`Dataset <theanets.dataset.Dataset>` A set of training data for computing updates to model parameters. valid : :class:`Dataset <theanets.dataset.Dataset>` A set of validation data for computing monitor values and determining when the loss has stopped improving. Yields ------ training : dict A dictionary mapping monitor names to values, evaluated on the training dataset. validation : dict A dictionary containing monitor values evaluated on the validation dataset.
Below is the the instruction that describes the task: ### Input: Train a model using a training and validation set. This method yields a series of monitor values to the caller. After every iteration, a pair of monitor dictionaries is generated: one evaluated on the training dataset, and another evaluated on the validation dataset. The validation monitors might not be updated during every training iteration; in this case, the most recent validation monitors will be yielded along with the training monitors. Parameters ---------- train : :class:`Dataset <theanets.dataset.Dataset>` A set of training data for computing updates to model parameters. valid : :class:`Dataset <theanets.dataset.Dataset>` A set of validation data for computing monitor values and determining when the loss has stopped improving. Yields ------ training : dict A dictionary mapping monitor names to values, evaluated on the training dataset. validation : dict A dictionary containing monitor values evaluated on the validation dataset. ### Response: def itertrain(self, train, valid=None, **kwargs): '''Train a model using a training and validation set. This method yields a series of monitor values to the caller. After every iteration, a pair of monitor dictionaries is generated: one evaluated on the training dataset, and another evaluated on the validation dataset. The validation monitors might not be updated during every training iteration; in this case, the most recent validation monitors will be yielded along with the training monitors. Parameters ---------- train : :class:`Dataset <theanets.dataset.Dataset>` A set of training data for computing updates to model parameters. valid : :class:`Dataset <theanets.dataset.Dataset>` A set of validation data for computing monitor values and determining when the loss has stopped improving. Yields ------ training : dict A dictionary mapping monitor names to values, evaluated on the training dataset. validation : dict A dictionary containing monitor values evaluated on the validation dataset. ''' ifci = itertools.chain.from_iterable def first(x): return x[0] if isinstance(x, (tuple, list)) else x def last(x): return x[-1] if isinstance(x, (tuple, list)) else x odim = idim = None for t in train: idim = first(t).shape[-1] odim = last(t).shape[-1] rng = kwargs.get('rng') if rng is None or isinstance(rng, int): rng = np.random.RandomState(rng) # set output (decoding) weights on the network. samples = ifci(last(t) for t in train) for param in self.network.layers[-1].params: shape = param.get_value(borrow=True).shape if len(shape) == 2 and shape[1] == odim: arr = np.vstack(SampleTrainer.reservoir(samples, shape[0], rng)) util.log('setting {}: {}', param.name, shape) param.set_value(arr / np.sqrt((arr * arr).sum(axis=1))[:, None]) # set input (encoding) weights on the network. samples = ifci(first(t) for t in train) for layer in self.network.layers: for param in layer.params: shape = param.get_value(borrow=True).shape if len(shape) == 2 and shape[0] == idim: arr = np.vstack(SampleTrainer.reservoir(samples, shape[1], rng)).T util.log('setting {}: {}', param.name, shape) param.set_value(arr / np.sqrt((arr * arr).sum(axis=0))) samples = ifci(self.network.feed_forward( first(t))[i-1] for t in train) yield dict(loss=0), dict(loss=0)
def set_window_option(self, option, value): """ Wrapper for ``$ tmux set-window-option <option> <value>``. Parameters ---------- option : str option to set, e.g. 'aggressive-resize' value : str window option value. True/False will turn in 'on' and 'off', also accepts string of 'on' or 'off' directly. Raises ------ :exc:`exc.OptionError`, :exc:`exc.UnknownOption`, :exc:`exc.InvalidOption`, :exc:`exc.AmbiguousOption` """ self.server._update_windows() if isinstance(value, bool) and value: value = 'on' elif isinstance(value, bool) and not value: value = 'off' cmd = self.cmd( 'set-window-option', '-t%s:%s' % (self.get('session_id'), self.index), # '-t%s' % self.id, option, value, ) if isinstance(cmd.stderr, list) and len(cmd.stderr): handle_option_error(cmd.stderr[0])
Wrapper for ``$ tmux set-window-option <option> <value>``. Parameters ---------- option : str option to set, e.g. 'aggressive-resize' value : str window option value. True/False will turn in 'on' and 'off', also accepts string of 'on' or 'off' directly. Raises ------ :exc:`exc.OptionError`, :exc:`exc.UnknownOption`, :exc:`exc.InvalidOption`, :exc:`exc.AmbiguousOption`
Below is the the instruction that describes the task: ### Input: Wrapper for ``$ tmux set-window-option <option> <value>``. Parameters ---------- option : str option to set, e.g. 'aggressive-resize' value : str window option value. True/False will turn in 'on' and 'off', also accepts string of 'on' or 'off' directly. Raises ------ :exc:`exc.OptionError`, :exc:`exc.UnknownOption`, :exc:`exc.InvalidOption`, :exc:`exc.AmbiguousOption` ### Response: def set_window_option(self, option, value): """ Wrapper for ``$ tmux set-window-option <option> <value>``. Parameters ---------- option : str option to set, e.g. 'aggressive-resize' value : str window option value. True/False will turn in 'on' and 'off', also accepts string of 'on' or 'off' directly. Raises ------ :exc:`exc.OptionError`, :exc:`exc.UnknownOption`, :exc:`exc.InvalidOption`, :exc:`exc.AmbiguousOption` """ self.server._update_windows() if isinstance(value, bool) and value: value = 'on' elif isinstance(value, bool) and not value: value = 'off' cmd = self.cmd( 'set-window-option', '-t%s:%s' % (self.get('session_id'), self.index), # '-t%s' % self.id, option, value, ) if isinstance(cmd.stderr, list) and len(cmd.stderr): handle_option_error(cmd.stderr[0])
def previousExon(self) : """Returns the previous exon of the transcript, or None if there is none""" if self.number == 0 : return None try : return self.transcript.exons[self.number-1] except IndexError : return None
Returns the previous exon of the transcript, or None if there is none
Below is the the instruction that describes the task: ### Input: Returns the previous exon of the transcript, or None if there is none ### Response: def previousExon(self) : """Returns the previous exon of the transcript, or None if there is none""" if self.number == 0 : return None try : return self.transcript.exons[self.number-1] except IndexError : return None
def _should_ignore(self, path): """ Return True iff path should be ignored. """ for ignore in self.options.ignores: if fnmatch.fnmatch(path, ignore): return True return False
Return True iff path should be ignored.
Below is the the instruction that describes the task: ### Input: Return True iff path should be ignored. ### Response: def _should_ignore(self, path): """ Return True iff path should be ignored. """ for ignore in self.options.ignores: if fnmatch.fnmatch(path, ignore): return True return False
def priority(self, item): """ The priority of the item depends of the number of entries published in the cache divided by the maximum of entries. """ return '%.1f' % max(self.cache[item.pk][0] / self.max_entries, 0.1)
The priority of the item depends of the number of entries published in the cache divided by the maximum of entries.
Below is the the instruction that describes the task: ### Input: The priority of the item depends of the number of entries published in the cache divided by the maximum of entries. ### Response: def priority(self, item): """ The priority of the item depends of the number of entries published in the cache divided by the maximum of entries. """ return '%.1f' % max(self.cache[item.pk][0] / self.max_entries, 0.1)
async def read(self, count=-1): """ :py:func:`asyncio.coroutine` :py:meth:`aioftp.StreamIO.read` proxy """ await self.wait("read") start = _now() data = await super().read(count) self.append("read", data, start) return data
:py:func:`asyncio.coroutine` :py:meth:`aioftp.StreamIO.read` proxy
Below is the the instruction that describes the task: ### Input: :py:func:`asyncio.coroutine` :py:meth:`aioftp.StreamIO.read` proxy ### Response: async def read(self, count=-1): """ :py:func:`asyncio.coroutine` :py:meth:`aioftp.StreamIO.read` proxy """ await self.wait("read") start = _now() data = await super().read(count) self.append("read", data, start) return data
def vendorize(vendor_requirements): """ This is the main entry point for vendorizing requirements. It expects a list of tuples that should contain the name of the library and the version. For example, a library ``foo`` with version ``0.0.1`` would look like:: vendor_requirements = [ ('foo', '0.0.1'), ] """ for library in vendor_requirements: if len(library) == 2: name, version = library cmd = None elif len(library) == 3: # a possible cmd we need to run name, version, cmd = library vendor_library(name, version, cmd)
This is the main entry point for vendorizing requirements. It expects a list of tuples that should contain the name of the library and the version. For example, a library ``foo`` with version ``0.0.1`` would look like:: vendor_requirements = [ ('foo', '0.0.1'), ]
Below is the the instruction that describes the task: ### Input: This is the main entry point for vendorizing requirements. It expects a list of tuples that should contain the name of the library and the version. For example, a library ``foo`` with version ``0.0.1`` would look like:: vendor_requirements = [ ('foo', '0.0.1'), ] ### Response: def vendorize(vendor_requirements): """ This is the main entry point for vendorizing requirements. It expects a list of tuples that should contain the name of the library and the version. For example, a library ``foo`` with version ``0.0.1`` would look like:: vendor_requirements = [ ('foo', '0.0.1'), ] """ for library in vendor_requirements: if len(library) == 2: name, version = library cmd = None elif len(library) == 3: # a possible cmd we need to run name, version, cmd = library vendor_library(name, version, cmd)
def _search_show_id(self, series, year=None): """Search the show id from the `series` and `year`. :param str series: series of the episode. :param year: year of the series, if any. :type year: int :return: the show id, if found. :rtype: int """ # addic7ed doesn't support search with quotes series = series.replace('\'', ' ') # build the params series_year = '%s %d' % (series, year) if year is not None else series params = {'search': series_year, 'Submit': 'Search'} # make the search logger.info('Searching show ids with %r', params) r = self.session.get(self.server_url + 'search.php', params=params, timeout=10) r.raise_for_status() if r.status_code == 304: raise TooManyRequests() soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser']) # get the suggestion suggestion = soup.select('span.titulo > a[href^="/show/"]') if not suggestion: logger.warning('Show id not found: no suggestion') return None if not sanitize(suggestion[0].i.text.replace('\'', ' ')) == sanitize(series_year): logger.warning('Show id not found: suggestion does not match') return None show_id = int(suggestion[0]['href'][6:]) logger.debug('Found show id %d', show_id) return show_id
Search the show id from the `series` and `year`. :param str series: series of the episode. :param year: year of the series, if any. :type year: int :return: the show id, if found. :rtype: int
Below is the the instruction that describes the task: ### Input: Search the show id from the `series` and `year`. :param str series: series of the episode. :param year: year of the series, if any. :type year: int :return: the show id, if found. :rtype: int ### Response: def _search_show_id(self, series, year=None): """Search the show id from the `series` and `year`. :param str series: series of the episode. :param year: year of the series, if any. :type year: int :return: the show id, if found. :rtype: int """ # addic7ed doesn't support search with quotes series = series.replace('\'', ' ') # build the params series_year = '%s %d' % (series, year) if year is not None else series params = {'search': series_year, 'Submit': 'Search'} # make the search logger.info('Searching show ids with %r', params) r = self.session.get(self.server_url + 'search.php', params=params, timeout=10) r.raise_for_status() if r.status_code == 304: raise TooManyRequests() soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser']) # get the suggestion suggestion = soup.select('span.titulo > a[href^="/show/"]') if not suggestion: logger.warning('Show id not found: no suggestion') return None if not sanitize(suggestion[0].i.text.replace('\'', ' ')) == sanitize(series_year): logger.warning('Show id not found: suggestion does not match') return None show_id = int(suggestion[0]['href'][6:]) logger.debug('Found show id %d', show_id) return show_id
def update_xml_element(self): """ Updates the xml element contents to matches the instance contents. :returns: Updated XML element. :rtype: lxml.etree._Element """ super(Description, self).update_xml_element() if hasattr(self, 'lang'): self.xml_element.set( '{http://www.w3.org/XML/1998/namespace}lang', self.lang) if hasattr(self, 'override'): self.xml_element.set('override', str(self.override)) return self.xml_element
Updates the xml element contents to matches the instance contents. :returns: Updated XML element. :rtype: lxml.etree._Element
Below is the the instruction that describes the task: ### Input: Updates the xml element contents to matches the instance contents. :returns: Updated XML element. :rtype: lxml.etree._Element ### Response: def update_xml_element(self): """ Updates the xml element contents to matches the instance contents. :returns: Updated XML element. :rtype: lxml.etree._Element """ super(Description, self).update_xml_element() if hasattr(self, 'lang'): self.xml_element.set( '{http://www.w3.org/XML/1998/namespace}lang', self.lang) if hasattr(self, 'override'): self.xml_element.set('override', str(self.override)) return self.xml_element
def _translate_nd(self, source: mx.nd.NDArray, source_length: int, restrict_lexicon: Optional[lexicon.TopKLexicon], raw_constraints: List[Optional[constrained.RawConstraintList]], raw_avoid_list: List[Optional[constrained.RawConstraintList]], max_output_lengths: mx.nd.NDArray) -> List[Translation]: """ Translates source of source_length, given a bucket_key. :param source: Source ids. Shape: (batch_size, bucket_key, num_factors). :param source_length: Bucket key. :param restrict_lexicon: Lexicon to use for vocabulary restriction. :param raw_constraints: A list of optional constraint lists. :return: Sequence of translations. """ return self._get_best_from_beam(*self._beam_search(source, source_length, restrict_lexicon, raw_constraints, raw_avoid_list, max_output_lengths))
Translates source of source_length, given a bucket_key. :param source: Source ids. Shape: (batch_size, bucket_key, num_factors). :param source_length: Bucket key. :param restrict_lexicon: Lexicon to use for vocabulary restriction. :param raw_constraints: A list of optional constraint lists. :return: Sequence of translations.
Below is the the instruction that describes the task: ### Input: Translates source of source_length, given a bucket_key. :param source: Source ids. Shape: (batch_size, bucket_key, num_factors). :param source_length: Bucket key. :param restrict_lexicon: Lexicon to use for vocabulary restriction. :param raw_constraints: A list of optional constraint lists. :return: Sequence of translations. ### Response: def _translate_nd(self, source: mx.nd.NDArray, source_length: int, restrict_lexicon: Optional[lexicon.TopKLexicon], raw_constraints: List[Optional[constrained.RawConstraintList]], raw_avoid_list: List[Optional[constrained.RawConstraintList]], max_output_lengths: mx.nd.NDArray) -> List[Translation]: """ Translates source of source_length, given a bucket_key. :param source: Source ids. Shape: (batch_size, bucket_key, num_factors). :param source_length: Bucket key. :param restrict_lexicon: Lexicon to use for vocabulary restriction. :param raw_constraints: A list of optional constraint lists. :return: Sequence of translations. """ return self._get_best_from_beam(*self._beam_search(source, source_length, restrict_lexicon, raw_constraints, raw_avoid_list, max_output_lengths))
def show_fabric_trunk_info_input_rbridge_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_fabric_trunk_info = ET.Element("show_fabric_trunk_info") config = show_fabric_trunk_info input = ET.SubElement(show_fabric_trunk_info, "input") rbridge_id = ET.SubElement(input, "rbridge-id") rbridge_id.text = kwargs.pop('rbridge_id') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
Below is the the instruction that describes the task: ### Input: Auto Generated Code ### Response: def show_fabric_trunk_info_input_rbridge_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_fabric_trunk_info = ET.Element("show_fabric_trunk_info") config = show_fabric_trunk_info input = ET.SubElement(show_fabric_trunk_info, "input") rbridge_id = ET.SubElement(input, "rbridge-id") rbridge_id.text = kwargs.pop('rbridge_id') callback = kwargs.pop('callback', self._callback) return callback(config)
def addScalarBar3D( obj=None, at=0, pos=(0, 0, 0), normal=(0, 0, 1), sx=0.1, sy=2, nlabels=9, ncols=256, cmap=None, c=None, alpha=1, ): """Draw a 3D scalar bar. ``obj`` input can be: - a list of numbers, - a list of two numbers in the form `(min, max)`, - a ``vtkActor`` already containing a set of scalars associated to vertices or cells, - if ``None`` the last actor in the list of actors will be used. .. hint:: |scalbar| |mesh_coloring.py|_ """ from vtk.util.numpy_support import vtk_to_numpy, numpy_to_vtk vp = settings.plotter_instance if c is None: # automatic black or white c = (0.8, 0.8, 0.8) if numpy.sum(colors.getColor(vp.backgrcol)) > 1.5: c = (0.2, 0.2, 0.2) c = colors.getColor(c) gap = 0.4 # space btw nrs and scale vtkscalars_name = "" if obj is None: obj = vp.lastActor() if isinstance(obj, vtk.vtkActor): poly = obj.GetMapper().GetInput() vtkscalars = poly.GetPointData().GetScalars() if vtkscalars is None: vtkscalars = poly.GetCellData().GetScalars() if vtkscalars is None: print("Error in addScalarBar3D: actor has no scalar array.", [obj]) exit() npscalars = vtk_to_numpy(vtkscalars) vmin, vmax = numpy.min(npscalars), numpy.max(npscalars) vtkscalars_name = vtkscalars.GetName().split("_")[-1] elif utils.isSequence(obj): vmin, vmax = numpy.min(obj), numpy.max(obj) vtkscalars_name = "jet" else: print("Error in addScalarBar3D(): input must be vtkActor or list.", type(obj)) exit() if cmap is None: cmap = vtkscalars_name # build the color scale part scale = shapes.Grid([-sx * gap, 0, 0], c=c, alpha=alpha, sx=sx, sy=sy, resx=1, resy=ncols) scale.GetProperty().SetRepresentationToSurface() cscals = scale.cellCenters()[:, 1] def _cellColors(scale, scalars, cmap, alpha): mapper = scale.GetMapper() cpoly = mapper.GetInput() n = len(scalars) lut = vtk.vtkLookupTable() lut.SetNumberOfTableValues(n) lut.Build() for i in range(n): r, g, b = colors.colorMap(i, cmap, 0, n) lut.SetTableValue(i, r, g, b, alpha) arr = numpy_to_vtk(numpy.ascontiguousarray(scalars), deep=True) vmin, vmax = numpy.min(scalars), numpy.max(scalars) mapper.SetScalarRange(vmin, vmax) mapper.SetLookupTable(lut) mapper.ScalarVisibilityOn() cpoly.GetCellData().SetScalars(arr) _cellColors(scale, cscals, cmap, alpha) # build text nlabels = numpy.min([nlabels, ncols]) tlabs = numpy.linspace(vmin, vmax, num=nlabels, endpoint=True) tacts = [] prec = (vmax - vmin) / abs(vmax + vmin) * 2 prec = int(3 + abs(numpy.log10(prec + 1))) for i, t in enumerate(tlabs): tx = utils.precision(t, prec) y = -sy / 1.98 + sy * i / (nlabels - 1) a = shapes.Text(tx, pos=[sx * gap, y, 0], s=sy / 50, c=c, alpha=alpha, depth=0) a.PickableOff() tacts.append(a) sact = Assembly([scale] + tacts) nax = numpy.linalg.norm(normal) if nax: normal = numpy.array(normal) / nax theta = numpy.arccos(normal[2]) phi = numpy.arctan2(normal[1], normal[0]) sact.RotateZ(phi * 57.3) sact.RotateY(theta * 57.3) sact.SetPosition(pos) if not vp.renderers[at]: save_int = vp.interactive vp.show(interactive=0) vp.interactive = save_int vp.renderers[at].AddActor(sact) vp.renderers[at].Render() sact.PickableOff() vp.scalarbars.append(sact) if isinstance(obj, Actor): obj.scalarbar_actor = sact return sact
Draw a 3D scalar bar. ``obj`` input can be: - a list of numbers, - a list of two numbers in the form `(min, max)`, - a ``vtkActor`` already containing a set of scalars associated to vertices or cells, - if ``None`` the last actor in the list of actors will be used. .. hint:: |scalbar| |mesh_coloring.py|_
Below is the the instruction that describes the task: ### Input: Draw a 3D scalar bar. ``obj`` input can be: - a list of numbers, - a list of two numbers in the form `(min, max)`, - a ``vtkActor`` already containing a set of scalars associated to vertices or cells, - if ``None`` the last actor in the list of actors will be used. .. hint:: |scalbar| |mesh_coloring.py|_ ### Response: def addScalarBar3D( obj=None, at=0, pos=(0, 0, 0), normal=(0, 0, 1), sx=0.1, sy=2, nlabels=9, ncols=256, cmap=None, c=None, alpha=1, ): """Draw a 3D scalar bar. ``obj`` input can be: - a list of numbers, - a list of two numbers in the form `(min, max)`, - a ``vtkActor`` already containing a set of scalars associated to vertices or cells, - if ``None`` the last actor in the list of actors will be used. .. hint:: |scalbar| |mesh_coloring.py|_ """ from vtk.util.numpy_support import vtk_to_numpy, numpy_to_vtk vp = settings.plotter_instance if c is None: # automatic black or white c = (0.8, 0.8, 0.8) if numpy.sum(colors.getColor(vp.backgrcol)) > 1.5: c = (0.2, 0.2, 0.2) c = colors.getColor(c) gap = 0.4 # space btw nrs and scale vtkscalars_name = "" if obj is None: obj = vp.lastActor() if isinstance(obj, vtk.vtkActor): poly = obj.GetMapper().GetInput() vtkscalars = poly.GetPointData().GetScalars() if vtkscalars is None: vtkscalars = poly.GetCellData().GetScalars() if vtkscalars is None: print("Error in addScalarBar3D: actor has no scalar array.", [obj]) exit() npscalars = vtk_to_numpy(vtkscalars) vmin, vmax = numpy.min(npscalars), numpy.max(npscalars) vtkscalars_name = vtkscalars.GetName().split("_")[-1] elif utils.isSequence(obj): vmin, vmax = numpy.min(obj), numpy.max(obj) vtkscalars_name = "jet" else: print("Error in addScalarBar3D(): input must be vtkActor or list.", type(obj)) exit() if cmap is None: cmap = vtkscalars_name # build the color scale part scale = shapes.Grid([-sx * gap, 0, 0], c=c, alpha=alpha, sx=sx, sy=sy, resx=1, resy=ncols) scale.GetProperty().SetRepresentationToSurface() cscals = scale.cellCenters()[:, 1] def _cellColors(scale, scalars, cmap, alpha): mapper = scale.GetMapper() cpoly = mapper.GetInput() n = len(scalars) lut = vtk.vtkLookupTable() lut.SetNumberOfTableValues(n) lut.Build() for i in range(n): r, g, b = colors.colorMap(i, cmap, 0, n) lut.SetTableValue(i, r, g, b, alpha) arr = numpy_to_vtk(numpy.ascontiguousarray(scalars), deep=True) vmin, vmax = numpy.min(scalars), numpy.max(scalars) mapper.SetScalarRange(vmin, vmax) mapper.SetLookupTable(lut) mapper.ScalarVisibilityOn() cpoly.GetCellData().SetScalars(arr) _cellColors(scale, cscals, cmap, alpha) # build text nlabels = numpy.min([nlabels, ncols]) tlabs = numpy.linspace(vmin, vmax, num=nlabels, endpoint=True) tacts = [] prec = (vmax - vmin) / abs(vmax + vmin) * 2 prec = int(3 + abs(numpy.log10(prec + 1))) for i, t in enumerate(tlabs): tx = utils.precision(t, prec) y = -sy / 1.98 + sy * i / (nlabels - 1) a = shapes.Text(tx, pos=[sx * gap, y, 0], s=sy / 50, c=c, alpha=alpha, depth=0) a.PickableOff() tacts.append(a) sact = Assembly([scale] + tacts) nax = numpy.linalg.norm(normal) if nax: normal = numpy.array(normal) / nax theta = numpy.arccos(normal[2]) phi = numpy.arctan2(normal[1], normal[0]) sact.RotateZ(phi * 57.3) sact.RotateY(theta * 57.3) sact.SetPosition(pos) if not vp.renderers[at]: save_int = vp.interactive vp.show(interactive=0) vp.interactive = save_int vp.renderers[at].AddActor(sact) vp.renderers[at].Render() sact.PickableOff() vp.scalarbars.append(sact) if isinstance(obj, Actor): obj.scalarbar_actor = sact return sact
def registry_storage(cls): """ Get registry storage :return: WTaskDependencyRegistryStorage """ if cls.__registry_storage__ is None: raise ValueError('__registry_storage__ must be defined') if isinstance(cls.__registry_storage__, WTaskDependencyRegistryStorage) is False: raise TypeError( "Property '__registry_storage__' is invalid (must derived from WTaskRegistryBase)" ) return cls.__registry_storage__
Get registry storage :return: WTaskDependencyRegistryStorage
Below is the the instruction that describes the task: ### Input: Get registry storage :return: WTaskDependencyRegistryStorage ### Response: def registry_storage(cls): """ Get registry storage :return: WTaskDependencyRegistryStorage """ if cls.__registry_storage__ is None: raise ValueError('__registry_storage__ must be defined') if isinstance(cls.__registry_storage__, WTaskDependencyRegistryStorage) is False: raise TypeError( "Property '__registry_storage__' is invalid (must derived from WTaskRegistryBase)" ) return cls.__registry_storage__
def receive_message(self, message, data): # noqa: E501 pylint: disable=too-many-return-statements """ Called when a multizone message is received. """ if data[MESSAGE_TYPE] == TYPE_DEVICE_ADDED: uuid = data['device']['deviceId'] name = data['device']['name'] self._add_member(uuid, name) return True if data[MESSAGE_TYPE] == TYPE_DEVICE_REMOVED: uuid = data['deviceId'] self._remove_member(uuid) return True if data[MESSAGE_TYPE] == TYPE_DEVICE_UPDATED: uuid = data['device']['deviceId'] name = data['device']['name'] self._add_member(uuid, name) return True if data[MESSAGE_TYPE] == TYPE_MULTIZONE_STATUS: members = data['status']['devices'] members = \ {member['deviceId']: member['name'] for member in members} removed_members = \ list(set(self._members.keys())-set(members.keys())) added_members = list(set(members.keys())-set(self._members.keys())) _LOGGER.debug("(%s) Added members %s, Removed members: %s", self._uuid, added_members, removed_members) for uuid in removed_members: self._remove_member(uuid) for uuid in added_members: self._add_member(uuid, members[uuid]) for listener in list(self._status_listeners): listener.multizone_status_received() return True if data[MESSAGE_TYPE] == TYPE_SESSION_UPDATED: # A temporary group has been formed return True if data[MESSAGE_TYPE] == TYPE_CASTING_GROUPS: # Answer to GET_CASTING_GROUPS return True return False
Called when a multizone message is received.
Below is the the instruction that describes the task: ### Input: Called when a multizone message is received. ### Response: def receive_message(self, message, data): # noqa: E501 pylint: disable=too-many-return-statements """ Called when a multizone message is received. """ if data[MESSAGE_TYPE] == TYPE_DEVICE_ADDED: uuid = data['device']['deviceId'] name = data['device']['name'] self._add_member(uuid, name) return True if data[MESSAGE_TYPE] == TYPE_DEVICE_REMOVED: uuid = data['deviceId'] self._remove_member(uuid) return True if data[MESSAGE_TYPE] == TYPE_DEVICE_UPDATED: uuid = data['device']['deviceId'] name = data['device']['name'] self._add_member(uuid, name) return True if data[MESSAGE_TYPE] == TYPE_MULTIZONE_STATUS: members = data['status']['devices'] members = \ {member['deviceId']: member['name'] for member in members} removed_members = \ list(set(self._members.keys())-set(members.keys())) added_members = list(set(members.keys())-set(self._members.keys())) _LOGGER.debug("(%s) Added members %s, Removed members: %s", self._uuid, added_members, removed_members) for uuid in removed_members: self._remove_member(uuid) for uuid in added_members: self._add_member(uuid, members[uuid]) for listener in list(self._status_listeners): listener.multizone_status_received() return True if data[MESSAGE_TYPE] == TYPE_SESSION_UPDATED: # A temporary group has been formed return True if data[MESSAGE_TYPE] == TYPE_CASTING_GROUPS: # Answer to GET_CASTING_GROUPS return True return False
def error(self, message, *args, **kwargs): """Log error with stack trace and locals information. By default, enables stack trace information in logging messages, so that stacktrace and locals appear in Sentry. """ kwargs.setdefault('extra', {}).setdefault('stack', True) return self.logger.error(message, *args, **kwargs)
Log error with stack trace and locals information. By default, enables stack trace information in logging messages, so that stacktrace and locals appear in Sentry.
Below is the the instruction that describes the task: ### Input: Log error with stack trace and locals information. By default, enables stack trace information in logging messages, so that stacktrace and locals appear in Sentry. ### Response: def error(self, message, *args, **kwargs): """Log error with stack trace and locals information. By default, enables stack trace information in logging messages, so that stacktrace and locals appear in Sentry. """ kwargs.setdefault('extra', {}).setdefault('stack', True) return self.logger.error(message, *args, **kwargs)
def resize(self): """ Get target size for a cropped image and do the resizing if we got anything usable. """ resized_size = self.get_resized_size() if not resized_size: return self.image = self.image.resize(resized_size, Image.ANTIALIAS)
Get target size for a cropped image and do the resizing if we got anything usable.
Below is the the instruction that describes the task: ### Input: Get target size for a cropped image and do the resizing if we got anything usable. ### Response: def resize(self): """ Get target size for a cropped image and do the resizing if we got anything usable. """ resized_size = self.get_resized_size() if not resized_size: return self.image = self.image.resize(resized_size, Image.ANTIALIAS)
def strip_wsgi(request): """Strip WSGI data out of the request META data.""" meta = copy(request.META) for key in meta: if key[:4] == 'wsgi': meta[key] = None return meta
Strip WSGI data out of the request META data.
Below is the the instruction that describes the task: ### Input: Strip WSGI data out of the request META data. ### Response: def strip_wsgi(request): """Strip WSGI data out of the request META data.""" meta = copy(request.META) for key in meta: if key[:4] == 'wsgi': meta[key] = None return meta
def _handle_location(self, location): """Return an element located at location with flexible args. Args: location: String xpath to use in an Element.find search OR an Element (which is simply returned). Returns: The found Element. Raises: ValueError if the location is a string that results in a find of None. """ if not isinstance(location, ElementTree.Element): element = self.find(location) if element is None: raise ValueError("Invalid path!") else: element = location return element
Return an element located at location with flexible args. Args: location: String xpath to use in an Element.find search OR an Element (which is simply returned). Returns: The found Element. Raises: ValueError if the location is a string that results in a find of None.
Below is the the instruction that describes the task: ### Input: Return an element located at location with flexible args. Args: location: String xpath to use in an Element.find search OR an Element (which is simply returned). Returns: The found Element. Raises: ValueError if the location is a string that results in a find of None. ### Response: def _handle_location(self, location): """Return an element located at location with flexible args. Args: location: String xpath to use in an Element.find search OR an Element (which is simply returned). Returns: The found Element. Raises: ValueError if the location is a string that results in a find of None. """ if not isinstance(location, ElementTree.Element): element = self.find(location) if element is None: raise ValueError("Invalid path!") else: element = location return element
def log_request_fail(self, method, full_url, body, duration, status_code=None, exception=None): """ Log an unsuccessful API call. """ logger.warning( '%s %s [status:%s request:%.3fs]', method, full_url, status_code or 'N/A', duration, exc_info=exception is not None ) if body and not isinstance(body, dict): body = body.decode('utf-8') logger.debug('> %s', body)
Log an unsuccessful API call.
Below is the the instruction that describes the task: ### Input: Log an unsuccessful API call. ### Response: def log_request_fail(self, method, full_url, body, duration, status_code=None, exception=None): """ Log an unsuccessful API call. """ logger.warning( '%s %s [status:%s request:%.3fs]', method, full_url, status_code or 'N/A', duration, exc_info=exception is not None ) if body and not isinstance(body, dict): body = body.decode('utf-8') logger.debug('> %s', body)
def re_flags_str(flags, custom_flags): """Convert regexp flags to string. Parameters ---------- flags : `int` Flags. custom_flags : `int` Custom flags. Returns ------- `str` Flag string. """ res = '' for flag in RE_FLAGS: if flags & getattr(re, flag): res += flag for flag in RE_CUSTOM_FLAGS: if custom_flags & getattr(ReFlags, flag): res += flag return res
Convert regexp flags to string. Parameters ---------- flags : `int` Flags. custom_flags : `int` Custom flags. Returns ------- `str` Flag string.
Below is the the instruction that describes the task: ### Input: Convert regexp flags to string. Parameters ---------- flags : `int` Flags. custom_flags : `int` Custom flags. Returns ------- `str` Flag string. ### Response: def re_flags_str(flags, custom_flags): """Convert regexp flags to string. Parameters ---------- flags : `int` Flags. custom_flags : `int` Custom flags. Returns ------- `str` Flag string. """ res = '' for flag in RE_FLAGS: if flags & getattr(re, flag): res += flag for flag in RE_CUSTOM_FLAGS: if custom_flags & getattr(ReFlags, flag): res += flag return res
def _collect_data(directory, input_ext, transcription_ext): """Traverses directory collecting input and target files.""" # Directory from string to tuple pair of strings # key: the filepath to a datafile including the datafile's basename. Example, # if the datafile was "/path/to/datafile.wav" then the key would be # "/path/to/datafile" # value: a pair of strings (media_filepath, label) data_files = {} for root, _, filenames in os.walk(directory): transcripts = [filename for filename in filenames if transcription_ext in filename] for transcript in transcripts: transcript_path = os.path.join(root, transcript) with open(transcript_path, "r") as transcript_file: for transcript_line in transcript_file: line_contents = transcript_line.strip().split(" ", 1) media_base, label = line_contents key = os.path.join(root, media_base) assert key not in data_files media_name = "%s.%s"%(media_base, input_ext) media_path = os.path.join(root, media_name) data_files[key] = (media_base, media_path, label) return data_files
Traverses directory collecting input and target files.
Below is the the instruction that describes the task: ### Input: Traverses directory collecting input and target files. ### Response: def _collect_data(directory, input_ext, transcription_ext): """Traverses directory collecting input and target files.""" # Directory from string to tuple pair of strings # key: the filepath to a datafile including the datafile's basename. Example, # if the datafile was "/path/to/datafile.wav" then the key would be # "/path/to/datafile" # value: a pair of strings (media_filepath, label) data_files = {} for root, _, filenames in os.walk(directory): transcripts = [filename for filename in filenames if transcription_ext in filename] for transcript in transcripts: transcript_path = os.path.join(root, transcript) with open(transcript_path, "r") as transcript_file: for transcript_line in transcript_file: line_contents = transcript_line.strip().split(" ", 1) media_base, label = line_contents key = os.path.join(root, media_base) assert key not in data_files media_name = "%s.%s"%(media_base, input_ext) media_path = os.path.join(root, media_name) data_files[key] = (media_base, media_path, label) return data_files
def add_compound(self, compound): """Add the information of a compound variant This adds a compound dict to variant['compounds'] Args: compound (dict): A compound dictionary """ logger.debug("Adding compound {0} to variant {1}".format( compound, self['variant_id'])) self['compounds'].append(compound)
Add the information of a compound variant This adds a compound dict to variant['compounds'] Args: compound (dict): A compound dictionary
Below is the the instruction that describes the task: ### Input: Add the information of a compound variant This adds a compound dict to variant['compounds'] Args: compound (dict): A compound dictionary ### Response: def add_compound(self, compound): """Add the information of a compound variant This adds a compound dict to variant['compounds'] Args: compound (dict): A compound dictionary """ logger.debug("Adding compound {0} to variant {1}".format( compound, self['variant_id'])) self['compounds'].append(compound)
def service_table(format='simple', authenticated=False): """ Returns a string depicting all services currently installed. """ if authenticated: all_services = ExchangeUniverse.get_authenticated_services() else: all_services = ALL_SERVICES if format == 'html': linkify = lambda x: "<a href='{0}' target='_blank'>{0}</a>".format(x) else: linkify = lambda x: x ret = [] for service in sorted(all_services, key=lambda x: x.service_id): ret.append([ service.service_id, service.__name__, linkify(service.api_homepage.format( domain=service.domain, protocol=service.protocol )), ", ".join(service.supported_cryptos or []) ]) return tabulate(ret, headers=['ID', 'Name', 'URL', 'Supported Currencies'], tablefmt=format)
Returns a string depicting all services currently installed.
Below is the the instruction that describes the task: ### Input: Returns a string depicting all services currently installed. ### Response: def service_table(format='simple', authenticated=False): """ Returns a string depicting all services currently installed. """ if authenticated: all_services = ExchangeUniverse.get_authenticated_services() else: all_services = ALL_SERVICES if format == 'html': linkify = lambda x: "<a href='{0}' target='_blank'>{0}</a>".format(x) else: linkify = lambda x: x ret = [] for service in sorted(all_services, key=lambda x: x.service_id): ret.append([ service.service_id, service.__name__, linkify(service.api_homepage.format( domain=service.domain, protocol=service.protocol )), ", ".join(service.supported_cryptos or []) ]) return tabulate(ret, headers=['ID', 'Name', 'URL', 'Supported Currencies'], tablefmt=format)
def POST(self, courseid): # pylint: disable=arguments-differ """ POST request """ course = self.get_course(courseid) user_input = web.input() if "unregister" in user_input and course.allow_unregister(): self.user_manager.course_unregister_user(course, self.user_manager.session_username()) raise web.seeother(self.app.get_homepath() + '/mycourses') return self.show_page(course)
POST request
Below is the the instruction that describes the task: ### Input: POST request ### Response: def POST(self, courseid): # pylint: disable=arguments-differ """ POST request """ course = self.get_course(courseid) user_input = web.input() if "unregister" in user_input and course.allow_unregister(): self.user_manager.course_unregister_user(course, self.user_manager.session_username()) raise web.seeother(self.app.get_homepath() + '/mycourses') return self.show_page(course)
def is_list_of_dict_like(obj, attr=('keys', 'items')): """test if object is a list only containing dict like items """ try: if len(obj) == 0: return False return all([is_dict_like(i, attr) for i in obj]) except Exception: return False
test if object is a list only containing dict like items
Below is the the instruction that describes the task: ### Input: test if object is a list only containing dict like items ### Response: def is_list_of_dict_like(obj, attr=('keys', 'items')): """test if object is a list only containing dict like items """ try: if len(obj) == 0: return False return all([is_dict_like(i, attr) for i in obj]) except Exception: return False
def rotate(obj, axis, angle, origin=None): ''' Rotation around unit vector following the right hand rule Parameters: obj : obj to be rotated (e.g. neurite, neuron). Must implement a transform method. axis : unit vector for the axis of rotation angle : rotation angle in rads Returns: A copy of the object with the applied translation. ''' R = _rodrigues_to_dcm(axis, angle) try: return obj.transform(PivotRotation(R, origin)) except AttributeError: raise NotImplementedError
Rotation around unit vector following the right hand rule Parameters: obj : obj to be rotated (e.g. neurite, neuron). Must implement a transform method. axis : unit vector for the axis of rotation angle : rotation angle in rads Returns: A copy of the object with the applied translation.
Below is the the instruction that describes the task: ### Input: Rotation around unit vector following the right hand rule Parameters: obj : obj to be rotated (e.g. neurite, neuron). Must implement a transform method. axis : unit vector for the axis of rotation angle : rotation angle in rads Returns: A copy of the object with the applied translation. ### Response: def rotate(obj, axis, angle, origin=None): ''' Rotation around unit vector following the right hand rule Parameters: obj : obj to be rotated (e.g. neurite, neuron). Must implement a transform method. axis : unit vector for the axis of rotation angle : rotation angle in rads Returns: A copy of the object with the applied translation. ''' R = _rodrigues_to_dcm(axis, angle) try: return obj.transform(PivotRotation(R, origin)) except AttributeError: raise NotImplementedError
def get(self, key_use, key_type="", owner="", kid=None, **kwargs): """ Get all keys that matches a set of search criteria :param key_use: A key useful for this usage (enc, dec, sig, ver) :param key_type: Type of key (rsa, ec, oct, ..) :param owner: Who is the owner of the keys, "" == me (default) :param kid: A Key Identifier :return: A possibly empty list of keys """ if key_use in ["dec", "enc"]: use = "enc" else: use = "sig" _kj = None if owner != "": try: _kj = self.issuer_keys[owner] except KeyError: if owner.endswith("/"): try: _kj = self.issuer_keys[owner[:-1]] except KeyError: pass else: try: _kj = self.issuer_keys[owner + "/"] except KeyError: pass else: try: _kj = self.issuer_keys[owner] except KeyError: pass if _kj is None: return [] lst = [] for bundle in _kj: if key_type: if key_use in ['ver', 'dec']: _bkeys = bundle.get(key_type, only_active=False) else: _bkeys = bundle.get(key_type) else: _bkeys = bundle.keys() for key in _bkeys: if key.inactive_since and key_use != "sig": # Skip inactive keys unless for signature verification continue if not key.use or use == key.use: if kid: if key.kid == kid: lst.append(key) break else: continue else: lst.append(key) # if elliptic curve, have to check if I have a key of the right curve if key_type == "EC" and "alg" in kwargs: name = "P-{}".format(kwargs["alg"][2:]) # the type _lst = [] for key in lst: if name != key.crv: continue _lst.append(key) lst = _lst if use == 'enc' and key_type == 'oct' and owner != '': # Add my symmetric keys for kb in self.issuer_keys['']: for key in kb.get(key_type): if key.inactive_since: continue if not key.use or key.use == use: lst.append(key) return lst
Get all keys that matches a set of search criteria :param key_use: A key useful for this usage (enc, dec, sig, ver) :param key_type: Type of key (rsa, ec, oct, ..) :param owner: Who is the owner of the keys, "" == me (default) :param kid: A Key Identifier :return: A possibly empty list of keys
Below is the the instruction that describes the task: ### Input: Get all keys that matches a set of search criteria :param key_use: A key useful for this usage (enc, dec, sig, ver) :param key_type: Type of key (rsa, ec, oct, ..) :param owner: Who is the owner of the keys, "" == me (default) :param kid: A Key Identifier :return: A possibly empty list of keys ### Response: def get(self, key_use, key_type="", owner="", kid=None, **kwargs): """ Get all keys that matches a set of search criteria :param key_use: A key useful for this usage (enc, dec, sig, ver) :param key_type: Type of key (rsa, ec, oct, ..) :param owner: Who is the owner of the keys, "" == me (default) :param kid: A Key Identifier :return: A possibly empty list of keys """ if key_use in ["dec", "enc"]: use = "enc" else: use = "sig" _kj = None if owner != "": try: _kj = self.issuer_keys[owner] except KeyError: if owner.endswith("/"): try: _kj = self.issuer_keys[owner[:-1]] except KeyError: pass else: try: _kj = self.issuer_keys[owner + "/"] except KeyError: pass else: try: _kj = self.issuer_keys[owner] except KeyError: pass if _kj is None: return [] lst = [] for bundle in _kj: if key_type: if key_use in ['ver', 'dec']: _bkeys = bundle.get(key_type, only_active=False) else: _bkeys = bundle.get(key_type) else: _bkeys = bundle.keys() for key in _bkeys: if key.inactive_since and key_use != "sig": # Skip inactive keys unless for signature verification continue if not key.use or use == key.use: if kid: if key.kid == kid: lst.append(key) break else: continue else: lst.append(key) # if elliptic curve, have to check if I have a key of the right curve if key_type == "EC" and "alg" in kwargs: name = "P-{}".format(kwargs["alg"][2:]) # the type _lst = [] for key in lst: if name != key.crv: continue _lst.append(key) lst = _lst if use == 'enc' and key_type == 'oct' and owner != '': # Add my symmetric keys for kb in self.issuer_keys['']: for key in kb.get(key_type): if key.inactive_since: continue if not key.use or key.use == use: lst.append(key) return lst
def _to_relative_path(self, path): """:return: Version of path relative to our git directory or raise ValueError if it is not within our git direcotory""" if not osp.isabs(path): return path if self.repo.bare: raise InvalidGitRepositoryError("require non-bare repository") if not path.startswith(self.repo.working_tree_dir): raise ValueError("Absolute path %r is not in git repository at %r" % (path, self.repo.working_tree_dir)) return os.path.relpath(path, self.repo.working_tree_dir)
:return: Version of path relative to our git directory or raise ValueError if it is not within our git direcotory
Below is the the instruction that describes the task: ### Input: :return: Version of path relative to our git directory or raise ValueError if it is not within our git direcotory ### Response: def _to_relative_path(self, path): """:return: Version of path relative to our git directory or raise ValueError if it is not within our git direcotory""" if not osp.isabs(path): return path if self.repo.bare: raise InvalidGitRepositoryError("require non-bare repository") if not path.startswith(self.repo.working_tree_dir): raise ValueError("Absolute path %r is not in git repository at %r" % (path, self.repo.working_tree_dir)) return os.path.relpath(path, self.repo.working_tree_dir)
def _hardware_count(self): """ Amount of hardware resources. :return: integer """ return self._counts.get("hardware") + self._counts.get("serial") + self._counts.get("mbed")
Amount of hardware resources. :return: integer
Below is the the instruction that describes the task: ### Input: Amount of hardware resources. :return: integer ### Response: def _hardware_count(self): """ Amount of hardware resources. :return: integer """ return self._counts.get("hardware") + self._counts.get("serial") + self._counts.get("mbed")
def _CreateFeedMapping(client, feed_details): """Creates the feed mapping for DSA page feeds. Args: client: an AdWordsClient instance. feed_details: a _DSAFeedDetails instance. """ # Get the FeedMappingService. feed_mapping_service = client.GetService('FeedMappingService', version='v201809') # Create the operation. operation = { # Create the feed mapping. 'operand': { 'criterionType': DSA_PAGE_FEED_CRITERION_TYPE, 'feedId': feed_details.feed_id, # Map the feedAttributeIds to the fieldId constants. 'attributeFieldMappings': [ { 'feedAttributeId': feed_details.url_attribute_id, 'fieldId': DSA_PAGE_URLS_FIELD_ID }, { 'feedAttributeId': feed_details.label_attribute_id, 'fieldId': DSA_LABEL_FIELD_ID } ] }, 'operator': 'ADD' } # Add the feed mapping. feed_mapping_service.mutate([operation])
Creates the feed mapping for DSA page feeds. Args: client: an AdWordsClient instance. feed_details: a _DSAFeedDetails instance.
Below is the the instruction that describes the task: ### Input: Creates the feed mapping for DSA page feeds. Args: client: an AdWordsClient instance. feed_details: a _DSAFeedDetails instance. ### Response: def _CreateFeedMapping(client, feed_details): """Creates the feed mapping for DSA page feeds. Args: client: an AdWordsClient instance. feed_details: a _DSAFeedDetails instance. """ # Get the FeedMappingService. feed_mapping_service = client.GetService('FeedMappingService', version='v201809') # Create the operation. operation = { # Create the feed mapping. 'operand': { 'criterionType': DSA_PAGE_FEED_CRITERION_TYPE, 'feedId': feed_details.feed_id, # Map the feedAttributeIds to the fieldId constants. 'attributeFieldMappings': [ { 'feedAttributeId': feed_details.url_attribute_id, 'fieldId': DSA_PAGE_URLS_FIELD_ID }, { 'feedAttributeId': feed_details.label_attribute_id, 'fieldId': DSA_LABEL_FIELD_ID } ] }, 'operator': 'ADD' } # Add the feed mapping. feed_mapping_service.mutate([operation])
def list_contributors(self, project_id=None, language_code=None): """ Returns the list of contributors """ data = self._run( url_path="contributors/list", id=project_id, language=language_code ) return data['result'].get('contributors', [])
Returns the list of contributors
Below is the the instruction that describes the task: ### Input: Returns the list of contributors ### Response: def list_contributors(self, project_id=None, language_code=None): """ Returns the list of contributors """ data = self._run( url_path="contributors/list", id=project_id, language=language_code ) return data['result'].get('contributors', [])
def InitializeDownload(self, http_request, http=None, client=None): """Initialize this download by making a request. Args: http_request: The HttpRequest to use to initialize this download. http: The httplib2.Http instance for this request. client: If provided, let this client process the final URL before sending any additional requests. If client is provided and http is not, client.http will be used instead. """ self.EnsureUninitialized() if http is None and client is None: raise exceptions.UserError('Must provide client or http.') http = http or client.http if client is not None: http_request.url = client.FinalizeTransferUrl(http_request.url) url = http_request.url if self.auto_transfer: end_byte = self.__ComputeEndByte(0) self.__SetRangeHeader(http_request, 0, end_byte) response = http_wrapper.MakeRequest( self.bytes_http or http, http_request) if response.status_code not in self._ACCEPTABLE_STATUSES: raise exceptions.HttpError.FromResponse(response) self.__initial_response = response self.__SetTotal(response.info) url = response.info.get('content-location', response.request_url) if client is not None: url = client.FinalizeTransferUrl(url) self._Initialize(http, url) # Unless the user has requested otherwise, we want to just # go ahead and pump the bytes now. if self.auto_transfer: self.StreamInChunks()
Initialize this download by making a request. Args: http_request: The HttpRequest to use to initialize this download. http: The httplib2.Http instance for this request. client: If provided, let this client process the final URL before sending any additional requests. If client is provided and http is not, client.http will be used instead.
Below is the the instruction that describes the task: ### Input: Initialize this download by making a request. Args: http_request: The HttpRequest to use to initialize this download. http: The httplib2.Http instance for this request. client: If provided, let this client process the final URL before sending any additional requests. If client is provided and http is not, client.http will be used instead. ### Response: def InitializeDownload(self, http_request, http=None, client=None): """Initialize this download by making a request. Args: http_request: The HttpRequest to use to initialize this download. http: The httplib2.Http instance for this request. client: If provided, let this client process the final URL before sending any additional requests. If client is provided and http is not, client.http will be used instead. """ self.EnsureUninitialized() if http is None and client is None: raise exceptions.UserError('Must provide client or http.') http = http or client.http if client is not None: http_request.url = client.FinalizeTransferUrl(http_request.url) url = http_request.url if self.auto_transfer: end_byte = self.__ComputeEndByte(0) self.__SetRangeHeader(http_request, 0, end_byte) response = http_wrapper.MakeRequest( self.bytes_http or http, http_request) if response.status_code not in self._ACCEPTABLE_STATUSES: raise exceptions.HttpError.FromResponse(response) self.__initial_response = response self.__SetTotal(response.info) url = response.info.get('content-location', response.request_url) if client is not None: url = client.FinalizeTransferUrl(url) self._Initialize(http, url) # Unless the user has requested otherwise, we want to just # go ahead and pump the bytes now. if self.auto_transfer: self.StreamInChunks()
def lookup_family_by_name(name): """https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/mngt.c#L106. Positional arguments: name -- string. Returns: genl_ops class instance or None. """ for ops in nl_list_for_each_entry(genl_ops(), genl_ops_list, 'o_list'): if ops.o_name == name: return ops return None
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/mngt.c#L106. Positional arguments: name -- string. Returns: genl_ops class instance or None.
Below is the the instruction that describes the task: ### Input: https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/mngt.c#L106. Positional arguments: name -- string. Returns: genl_ops class instance or None. ### Response: def lookup_family_by_name(name): """https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/mngt.c#L106. Positional arguments: name -- string. Returns: genl_ops class instance or None. """ for ops in nl_list_for_each_entry(genl_ops(), genl_ops_list, 'o_list'): if ops.o_name == name: return ops return None
def is_unitless(ds, variable): ''' Returns true if the variable is unitless Note units of '1' are considered whole numbers or parts but still represent physical units and not the absence of units. :param netCDF4.Dataset ds: An open netCDF dataset :param str variable: Name of the variable ''' units = getattr(ds.variables[variable], 'units', None) return units is None or units == ''
Returns true if the variable is unitless Note units of '1' are considered whole numbers or parts but still represent physical units and not the absence of units. :param netCDF4.Dataset ds: An open netCDF dataset :param str variable: Name of the variable
Below is the the instruction that describes the task: ### Input: Returns true if the variable is unitless Note units of '1' are considered whole numbers or parts but still represent physical units and not the absence of units. :param netCDF4.Dataset ds: An open netCDF dataset :param str variable: Name of the variable ### Response: def is_unitless(ds, variable): ''' Returns true if the variable is unitless Note units of '1' are considered whole numbers or parts but still represent physical units and not the absence of units. :param netCDF4.Dataset ds: An open netCDF dataset :param str variable: Name of the variable ''' units = getattr(ds.variables[variable], 'units', None) return units is None or units == ''
def __getChannelId(self): """ Obtain channel id for channel name, if present in ``self.search_params``. """ if not self.search_params.get("channelId"): return api_fixed_url = "https://www.googleapis.com/youtube/v3/channels?part=id&maxResults=1&fields=items%2Fid&" url = api_fixed_url + urlencode({"key": self.api_key, "forUsername": self.search_params["channelId"]}) get = requests.get(url).json() try: self.search_params["channelId"] = get['items'][0]['id'] return # got it except IndexError: pass # try searching now... api_fixed_url = "https://www.googleapis.com/youtube/v3/search?part=snippet&type=channel&fields=items%2Fid&" url = api_fixed_url + urlencode({"key": self.api_key, "q": self.search_params['channelId']}) get = requests.get(url).json() try: self.search_params["channelId"] = get['items'][0]['id']['channelId'] except IndexError: del self.search_params["channelId"]
Obtain channel id for channel name, if present in ``self.search_params``.
Below is the the instruction that describes the task: ### Input: Obtain channel id for channel name, if present in ``self.search_params``. ### Response: def __getChannelId(self): """ Obtain channel id for channel name, if present in ``self.search_params``. """ if not self.search_params.get("channelId"): return api_fixed_url = "https://www.googleapis.com/youtube/v3/channels?part=id&maxResults=1&fields=items%2Fid&" url = api_fixed_url + urlencode({"key": self.api_key, "forUsername": self.search_params["channelId"]}) get = requests.get(url).json() try: self.search_params["channelId"] = get['items'][0]['id'] return # got it except IndexError: pass # try searching now... api_fixed_url = "https://www.googleapis.com/youtube/v3/search?part=snippet&type=channel&fields=items%2Fid&" url = api_fixed_url + urlencode({"key": self.api_key, "q": self.search_params['channelId']}) get = requests.get(url).json() try: self.search_params["channelId"] = get['items'][0]['id']['channelId'] except IndexError: del self.search_params["channelId"]
def _add_to_publish_stack(self, exchange, routing_key, message, properties): """Temporarily add the message to the stack to publish to RabbitMQ :param str exchange: The exchange to publish to :param str routing_key: The routing key to publish with :param str message: The message body :param pika.BasicProperties: The message properties """ global message_stack message_stack.append((exchange, routing_key, message, properties))
Temporarily add the message to the stack to publish to RabbitMQ :param str exchange: The exchange to publish to :param str routing_key: The routing key to publish with :param str message: The message body :param pika.BasicProperties: The message properties
Below is the the instruction that describes the task: ### Input: Temporarily add the message to the stack to publish to RabbitMQ :param str exchange: The exchange to publish to :param str routing_key: The routing key to publish with :param str message: The message body :param pika.BasicProperties: The message properties ### Response: def _add_to_publish_stack(self, exchange, routing_key, message, properties): """Temporarily add the message to the stack to publish to RabbitMQ :param str exchange: The exchange to publish to :param str routing_key: The routing key to publish with :param str message: The message body :param pika.BasicProperties: The message properties """ global message_stack message_stack.append((exchange, routing_key, message, properties))
def plot_embedding(coordinates, partition=None, add_sphere=False, point_size=8, colours=None, labels=None, legend=True, outfile=False, **kwargs): """ plot_embedding(coordinates, partition=None, add_sphere=False, point_size=8, colours=None, labels=None, legend=True, outfile=False, **kwargs): Plot a 2D / 3D scatterplot of coordinates, optionally coloured by group membership. Args: coordinates: numpy array or treeCl.CoordinateMatrix - The coordinates of the points to plot. The number of columns determines the number of dimensions in the plot. add_sphere: bool - Add a wireframe sphere to a 3D plot. Spectral clustering places points on the surface of a unit sphere. colours: list of rgb hexes, or 'auto', or None - Colours to use to colour the points, as a list of RGB hex values. If None, defaults (colorbrewer set3). If 'auto', generates a set of colours equally spaced from the colour wheel. labels: Tuple(xlab, ylab, title, zlab) - Plot labels. Must be given in the above order. Missing options will be replaced by None. E.g. to set the title: (None, None, "Some points") outfile: str - Save figure to this filename """ if isinstance(coordinates, CoordinateMatrix): coordinates = coordinates.values dimensions = min(3, coordinates.shape[1]) partition = (partition or Partition(tuple([0] * len(coordinates)))) ngrp = partition.num_groups() if colours is None: colours = SET2 elif colours == 'auto': colours = ggColorSlice(ngrp) colour_cycle = itertools.cycle(colours) colours = np.array([hex2color(c) for c in itertools.islice(colour_cycle, ngrp)]) if labels is None: xlab, ylab, zlab, title = None, None, None, None else: if isinstance(labels, (tuple, list)): labels = list(labels[:4]) labels.extend([None]*(4-len(labels))) xlab, ylab, title, zlab = labels fig = plt.figure() if dimensions == 3: ax = fig.add_subplot(111, projection='3d') if add_sphere: ax = _add_sphere(ax) else: ax = fig.add_subplot(111) members = partition.get_membership() for grp in range(ngrp): index = np.array(members[grp]) points = coordinates[index,:dimensions].T ax.scatter(*points, s=point_size, c=colours[grp], edgecolor=None, label='Group {}'.format(grp+1), **kwargs) if xlab: ax.set_xlabel(xlab) if ylab: ax.set_ylabel(ylab) if zlab: ax.set_zlabel(zlab) if title: ax.set_title(title) if legend: plt.legend() if outfile: fig.savefig('{0}.pdf'.format(outfile)) return fig
plot_embedding(coordinates, partition=None, add_sphere=False, point_size=8, colours=None, labels=None, legend=True, outfile=False, **kwargs): Plot a 2D / 3D scatterplot of coordinates, optionally coloured by group membership. Args: coordinates: numpy array or treeCl.CoordinateMatrix - The coordinates of the points to plot. The number of columns determines the number of dimensions in the plot. add_sphere: bool - Add a wireframe sphere to a 3D plot. Spectral clustering places points on the surface of a unit sphere. colours: list of rgb hexes, or 'auto', or None - Colours to use to colour the points, as a list of RGB hex values. If None, defaults (colorbrewer set3). If 'auto', generates a set of colours equally spaced from the colour wheel. labels: Tuple(xlab, ylab, title, zlab) - Plot labels. Must be given in the above order. Missing options will be replaced by None. E.g. to set the title: (None, None, "Some points") outfile: str - Save figure to this filename
Below is the the instruction that describes the task: ### Input: plot_embedding(coordinates, partition=None, add_sphere=False, point_size=8, colours=None, labels=None, legend=True, outfile=False, **kwargs): Plot a 2D / 3D scatterplot of coordinates, optionally coloured by group membership. Args: coordinates: numpy array or treeCl.CoordinateMatrix - The coordinates of the points to plot. The number of columns determines the number of dimensions in the plot. add_sphere: bool - Add a wireframe sphere to a 3D plot. Spectral clustering places points on the surface of a unit sphere. colours: list of rgb hexes, or 'auto', or None - Colours to use to colour the points, as a list of RGB hex values. If None, defaults (colorbrewer set3). If 'auto', generates a set of colours equally spaced from the colour wheel. labels: Tuple(xlab, ylab, title, zlab) - Plot labels. Must be given in the above order. Missing options will be replaced by None. E.g. to set the title: (None, None, "Some points") outfile: str - Save figure to this filename ### Response: def plot_embedding(coordinates, partition=None, add_sphere=False, point_size=8, colours=None, labels=None, legend=True, outfile=False, **kwargs): """ plot_embedding(coordinates, partition=None, add_sphere=False, point_size=8, colours=None, labels=None, legend=True, outfile=False, **kwargs): Plot a 2D / 3D scatterplot of coordinates, optionally coloured by group membership. Args: coordinates: numpy array or treeCl.CoordinateMatrix - The coordinates of the points to plot. The number of columns determines the number of dimensions in the plot. add_sphere: bool - Add a wireframe sphere to a 3D plot. Spectral clustering places points on the surface of a unit sphere. colours: list of rgb hexes, or 'auto', or None - Colours to use to colour the points, as a list of RGB hex values. If None, defaults (colorbrewer set3). If 'auto', generates a set of colours equally spaced from the colour wheel. labels: Tuple(xlab, ylab, title, zlab) - Plot labels. Must be given in the above order. Missing options will be replaced by None. E.g. to set the title: (None, None, "Some points") outfile: str - Save figure to this filename """ if isinstance(coordinates, CoordinateMatrix): coordinates = coordinates.values dimensions = min(3, coordinates.shape[1]) partition = (partition or Partition(tuple([0] * len(coordinates)))) ngrp = partition.num_groups() if colours is None: colours = SET2 elif colours == 'auto': colours = ggColorSlice(ngrp) colour_cycle = itertools.cycle(colours) colours = np.array([hex2color(c) for c in itertools.islice(colour_cycle, ngrp)]) if labels is None: xlab, ylab, zlab, title = None, None, None, None else: if isinstance(labels, (tuple, list)): labels = list(labels[:4]) labels.extend([None]*(4-len(labels))) xlab, ylab, title, zlab = labels fig = plt.figure() if dimensions == 3: ax = fig.add_subplot(111, projection='3d') if add_sphere: ax = _add_sphere(ax) else: ax = fig.add_subplot(111) members = partition.get_membership() for grp in range(ngrp): index = np.array(members[grp]) points = coordinates[index,:dimensions].T ax.scatter(*points, s=point_size, c=colours[grp], edgecolor=None, label='Group {}'.format(grp+1), **kwargs) if xlab: ax.set_xlabel(xlab) if ylab: ax.set_ylabel(ylab) if zlab: ax.set_zlabel(zlab) if title: ax.set_title(title) if legend: plt.legend() if outfile: fig.savefig('{0}.pdf'.format(outfile)) return fig
def nack(self, delivery_tag, multiple=False, requeue=False): '''Send a nack to the broker.''' args = Writer() args.write_longlong(delivery_tag).\ write_bits(multiple, requeue) self.send_frame(MethodFrame(self.channel_id, 60, 120, args))
Send a nack to the broker.
Below is the the instruction that describes the task: ### Input: Send a nack to the broker. ### Response: def nack(self, delivery_tag, multiple=False, requeue=False): '''Send a nack to the broker.''' args = Writer() args.write_longlong(delivery_tag).\ write_bits(multiple, requeue) self.send_frame(MethodFrame(self.channel_id, 60, 120, args))
def init_app(self, app): """Initialize actions with the app or blueprint. :param app: the Flask application or blueprint object :type app: :class:`~flask.Flask` or :class:`~flask.Blueprint` Examples:: api = Api() api.add_resource(...) api.init_app(blueprint) """ try: # Assume this is a blueprint and defer initialization if app._got_registered_once is True: raise ValueError("""Blueprint is already registered with an app.""") app.record(self._deferred_blueprint_init) except AttributeError: self._init_app(app) else: self.blueprint = app
Initialize actions with the app or blueprint. :param app: the Flask application or blueprint object :type app: :class:`~flask.Flask` or :class:`~flask.Blueprint` Examples:: api = Api() api.add_resource(...) api.init_app(blueprint)
Below is the the instruction that describes the task: ### Input: Initialize actions with the app or blueprint. :param app: the Flask application or blueprint object :type app: :class:`~flask.Flask` or :class:`~flask.Blueprint` Examples:: api = Api() api.add_resource(...) api.init_app(blueprint) ### Response: def init_app(self, app): """Initialize actions with the app or blueprint. :param app: the Flask application or blueprint object :type app: :class:`~flask.Flask` or :class:`~flask.Blueprint` Examples:: api = Api() api.add_resource(...) api.init_app(blueprint) """ try: # Assume this is a blueprint and defer initialization if app._got_registered_once is True: raise ValueError("""Blueprint is already registered with an app.""") app.record(self._deferred_blueprint_init) except AttributeError: self._init_app(app) else: self.blueprint = app
def retweeted_tweet(self): """ The retweeted Tweet as a Tweet object If the Tweet is not a Retweet, return None If the Retweet payload cannot be loaded as a Tweet, this will raise a `NotATweetError` Returns: Tweet: A Tweet representing the retweeted status (or None) (see tweet_embeds.get_retweet, this is that value as a Tweet) Raises: NotATweetError: if retweeted tweet is malformed """ retweet = tweet_embeds.get_retweeted_tweet(self) if retweet is not None: try: return Tweet(retweet) except NotATweetError as nate: raise(NotATweetError("The retweet payload appears malformed." + " Failed with '{}'".format(nate))) else: return None
The retweeted Tweet as a Tweet object If the Tweet is not a Retweet, return None If the Retweet payload cannot be loaded as a Tweet, this will raise a `NotATweetError` Returns: Tweet: A Tweet representing the retweeted status (or None) (see tweet_embeds.get_retweet, this is that value as a Tweet) Raises: NotATweetError: if retweeted tweet is malformed
Below is the the instruction that describes the task: ### Input: The retweeted Tweet as a Tweet object If the Tweet is not a Retweet, return None If the Retweet payload cannot be loaded as a Tweet, this will raise a `NotATweetError` Returns: Tweet: A Tweet representing the retweeted status (or None) (see tweet_embeds.get_retweet, this is that value as a Tweet) Raises: NotATweetError: if retweeted tweet is malformed ### Response: def retweeted_tweet(self): """ The retweeted Tweet as a Tweet object If the Tweet is not a Retweet, return None If the Retweet payload cannot be loaded as a Tweet, this will raise a `NotATweetError` Returns: Tweet: A Tweet representing the retweeted status (or None) (see tweet_embeds.get_retweet, this is that value as a Tweet) Raises: NotATweetError: if retweeted tweet is malformed """ retweet = tweet_embeds.get_retweeted_tweet(self) if retweet is not None: try: return Tweet(retweet) except NotATweetError as nate: raise(NotATweetError("The retweet payload appears malformed." + " Failed with '{}'".format(nate))) else: return None
def get_healthcheck(value): """ Converts input into a :class:`HealthCheck` tuple. Input can be passed as string, tuple, list, or a dictionary. If set to ``None``, the health check will be set to ``NONE``, i.e. override an existing configuration from the image. :param value: Health check input. :type value: unicode | str | tuple | list | NoneType :return: HealthCheck tuple :rtype: HealthCheck """ if isinstance(value, HealthCheck): return value elif isinstance(value, six.string_types + (lazy_type,)) or uses_type_registry(value): return HealthCheck(value) elif isinstance(value, (tuple, list)): return HealthCheck(*value) elif isinstance(value, dict): return HealthCheck(**value) raise ValueError( "Invalid type; expected a list, tuple, dict, or string type, found {0}.".format(type(value).__name__))
Converts input into a :class:`HealthCheck` tuple. Input can be passed as string, tuple, list, or a dictionary. If set to ``None``, the health check will be set to ``NONE``, i.e. override an existing configuration from the image. :param value: Health check input. :type value: unicode | str | tuple | list | NoneType :return: HealthCheck tuple :rtype: HealthCheck
Below is the the instruction that describes the task: ### Input: Converts input into a :class:`HealthCheck` tuple. Input can be passed as string, tuple, list, or a dictionary. If set to ``None``, the health check will be set to ``NONE``, i.e. override an existing configuration from the image. :param value: Health check input. :type value: unicode | str | tuple | list | NoneType :return: HealthCheck tuple :rtype: HealthCheck ### Response: def get_healthcheck(value): """ Converts input into a :class:`HealthCheck` tuple. Input can be passed as string, tuple, list, or a dictionary. If set to ``None``, the health check will be set to ``NONE``, i.e. override an existing configuration from the image. :param value: Health check input. :type value: unicode | str | tuple | list | NoneType :return: HealthCheck tuple :rtype: HealthCheck """ if isinstance(value, HealthCheck): return value elif isinstance(value, six.string_types + (lazy_type,)) or uses_type_registry(value): return HealthCheck(value) elif isinstance(value, (tuple, list)): return HealthCheck(*value) elif isinstance(value, dict): return HealthCheck(**value) raise ValueError( "Invalid type; expected a list, tuple, dict, or string type, found {0}.".format(type(value).__name__))
def create_subscription( self, topic_name, subscription_name, lock_duration=30, requires_session=None, default_message_time_to_live=None, dead_lettering_on_message_expiration=None, dead_lettering_on_filter_evaluation_exceptions=None, enable_batched_operations=None, max_delivery_count=None): """Create a subscription entity. :param topic_name: The name of the topic under which to create the subscription. :param subscription_name: The name of the new subscription. :type subscription_name: str :param lock_duration: The lock durection in seconds for each message in the subscription. :type lock_duration: int :param requires_session: Whether the subscription will be sessionful, and therefore require all message to have a Session ID and be received by a sessionful receiver. Default value is False. :type requires_session: bool :param default_message_time_to_live: The length of time a message will remain in the subscription before it is either discarded or moved to the dead letter queue. :type default_message_time_to_live: ~datetime.timedelta :param dead_lettering_on_message_expiration: Whether to move expired messages to the dead letter queue. Default value is False. :type dead_lettering_on_message_expiration: bool :param dead_lettering_on_filter_evaluation_exceptions: Whether to move messages that error on filtering into the dead letter queue. Default is False, and the messages will be discarded. :type dead_lettering_on_filter_evaluation_exceptions: bool :param max_delivery_count: The maximum number of times a message will attempt to be delivered before it is moved to the dead letter queue. :type max_delivery_count: int :param enable_batched_operations: :type: enable_batched_operations: bool :raises: ~azure.servicebus.common.errors.ServiceBusConnectionError if the namespace is not found. :raises: ~azure.common.AzureConflictHttpError if a queue of the same name already exists. """ sub_properties = Subscription( lock_duration="PT{}S".format(int(lock_duration)), requires_session=requires_session, default_message_time_to_live=default_message_time_to_live, dead_lettering_on_message_expiration=dead_lettering_on_message_expiration, dead_lettering_on_filter_evaluation_exceptions=dead_lettering_on_filter_evaluation_exceptions, max_delivery_count=max_delivery_count, enable_batched_operations=enable_batched_operations) try: return self.mgmt_client.create_subscription( topic_name, subscription_name, subscription=sub_properties, fail_on_exist=True) except requests.exceptions.ConnectionError as e: raise ServiceBusConnectionError("Namespace: {} not found".format(self.service_namespace), e)
Create a subscription entity. :param topic_name: The name of the topic under which to create the subscription. :param subscription_name: The name of the new subscription. :type subscription_name: str :param lock_duration: The lock durection in seconds for each message in the subscription. :type lock_duration: int :param requires_session: Whether the subscription will be sessionful, and therefore require all message to have a Session ID and be received by a sessionful receiver. Default value is False. :type requires_session: bool :param default_message_time_to_live: The length of time a message will remain in the subscription before it is either discarded or moved to the dead letter queue. :type default_message_time_to_live: ~datetime.timedelta :param dead_lettering_on_message_expiration: Whether to move expired messages to the dead letter queue. Default value is False. :type dead_lettering_on_message_expiration: bool :param dead_lettering_on_filter_evaluation_exceptions: Whether to move messages that error on filtering into the dead letter queue. Default is False, and the messages will be discarded. :type dead_lettering_on_filter_evaluation_exceptions: bool :param max_delivery_count: The maximum number of times a message will attempt to be delivered before it is moved to the dead letter queue. :type max_delivery_count: int :param enable_batched_operations: :type: enable_batched_operations: bool :raises: ~azure.servicebus.common.errors.ServiceBusConnectionError if the namespace is not found. :raises: ~azure.common.AzureConflictHttpError if a queue of the same name already exists.
Below is the the instruction that describes the task: ### Input: Create a subscription entity. :param topic_name: The name of the topic under which to create the subscription. :param subscription_name: The name of the new subscription. :type subscription_name: str :param lock_duration: The lock durection in seconds for each message in the subscription. :type lock_duration: int :param requires_session: Whether the subscription will be sessionful, and therefore require all message to have a Session ID and be received by a sessionful receiver. Default value is False. :type requires_session: bool :param default_message_time_to_live: The length of time a message will remain in the subscription before it is either discarded or moved to the dead letter queue. :type default_message_time_to_live: ~datetime.timedelta :param dead_lettering_on_message_expiration: Whether to move expired messages to the dead letter queue. Default value is False. :type dead_lettering_on_message_expiration: bool :param dead_lettering_on_filter_evaluation_exceptions: Whether to move messages that error on filtering into the dead letter queue. Default is False, and the messages will be discarded. :type dead_lettering_on_filter_evaluation_exceptions: bool :param max_delivery_count: The maximum number of times a message will attempt to be delivered before it is moved to the dead letter queue. :type max_delivery_count: int :param enable_batched_operations: :type: enable_batched_operations: bool :raises: ~azure.servicebus.common.errors.ServiceBusConnectionError if the namespace is not found. :raises: ~azure.common.AzureConflictHttpError if a queue of the same name already exists. ### Response: def create_subscription( self, topic_name, subscription_name, lock_duration=30, requires_session=None, default_message_time_to_live=None, dead_lettering_on_message_expiration=None, dead_lettering_on_filter_evaluation_exceptions=None, enable_batched_operations=None, max_delivery_count=None): """Create a subscription entity. :param topic_name: The name of the topic under which to create the subscription. :param subscription_name: The name of the new subscription. :type subscription_name: str :param lock_duration: The lock durection in seconds for each message in the subscription. :type lock_duration: int :param requires_session: Whether the subscription will be sessionful, and therefore require all message to have a Session ID and be received by a sessionful receiver. Default value is False. :type requires_session: bool :param default_message_time_to_live: The length of time a message will remain in the subscription before it is either discarded or moved to the dead letter queue. :type default_message_time_to_live: ~datetime.timedelta :param dead_lettering_on_message_expiration: Whether to move expired messages to the dead letter queue. Default value is False. :type dead_lettering_on_message_expiration: bool :param dead_lettering_on_filter_evaluation_exceptions: Whether to move messages that error on filtering into the dead letter queue. Default is False, and the messages will be discarded. :type dead_lettering_on_filter_evaluation_exceptions: bool :param max_delivery_count: The maximum number of times a message will attempt to be delivered before it is moved to the dead letter queue. :type max_delivery_count: int :param enable_batched_operations: :type: enable_batched_operations: bool :raises: ~azure.servicebus.common.errors.ServiceBusConnectionError if the namespace is not found. :raises: ~azure.common.AzureConflictHttpError if a queue of the same name already exists. """ sub_properties = Subscription( lock_duration="PT{}S".format(int(lock_duration)), requires_session=requires_session, default_message_time_to_live=default_message_time_to_live, dead_lettering_on_message_expiration=dead_lettering_on_message_expiration, dead_lettering_on_filter_evaluation_exceptions=dead_lettering_on_filter_evaluation_exceptions, max_delivery_count=max_delivery_count, enable_batched_operations=enable_batched_operations) try: return self.mgmt_client.create_subscription( topic_name, subscription_name, subscription=sub_properties, fail_on_exist=True) except requests.exceptions.ConnectionError as e: raise ServiceBusConnectionError("Namespace: {} not found".format(self.service_namespace), e)
def nekmin(omega_in,q,x0=0.5,z0=0.5): '''Computes the position of the neck (minimal radius) in an contact_binary star1''' def Omega_xz(q,x,z): return 1./np.sqrt(x**2+z**2)+q/np.sqrt((1-x)**2+z**2)+(q+1)*x**2/2.-q*x def Omega_xy(q,x,y): return 1./np.sqrt(x**2+y**2)+q/np.sqrt((1-x)**2+y**2)+(q+1)*(x**2+y**2)/2.-q*x def dOmegadx_z(q,x,z): return -x/(x**2+z**2)**(3./2)+q*(1-x)/((1-x)**2+z**2)**(3./2.)+(q+1)*x-q def dOmegadx_y(q,x,y): return -x/(x**2+y**2)**(3./2)+q*(1-x)/((1-x)**2+y**2)**(3./2.)+(q+1)*x-q def dOmegadz(q,x,z): return -z/(x**2+z**2)**(3./2)-q*z/((1-x)**2+z**2)**(3./2.) def dOmegady(q,x,y): return -y/(x**2+y**2)**(3./2)-q*y/((1-x)**2+y**2)**(3./2.)+(q+1)*y def d2Omegadx2_z(q,x,z): return (2*x**2-z**2)/(x**2+z**2)**(5./2)+q*(2*(1-x)**2-z**2)/((1-x)**2+z**2)**(5./2)+(q+1) def d2Omegadx2_y(q,x,y): return (2*x**2-y**2)/(x**2+y**2)**(5./2)+q*(2*(1-x)**2-y**2)/((1-x)**2+y**2)**(5./2)+(q+1) def d2Omegadxdz(q,x,z): return 3*x*z/(x**2+z**2)**(5./2)-3*q*x*(1-x)/((1-x)**2+z**2)**(5./2) def d2Omegadxdy(q,x,y): return 3*x*y/(x**2+y**2)**(5./2)-3*q*x*(1-x)/((1-x)**2+y**2)**(5./2) xz,z = x0,z0 dxz, dz = 1.,1. # find solution in xz plane while abs(dxz)>1e-8 and abs(dz)>1e-8: delz = 1. z=0.05 while abs(delz) > 0.000001: delom = omega_in - Omega_xz(q,xz,z) delz = delom/dOmegadz(q,xz,z) z = abs(z+delz) DN = np.array([[dOmegadx_z(q,xz,z),dOmegadz(q,xz,z)],[d2Omegadx2_z(q,xz,z),d2Omegadxdz(q,xz,z)]]) EN = np.array([omega_in-Omega_xz(q,xz,z),(-1)*dOmegadx_z(q,xz,z)]) a,b,c,d = DN[0][0],DN[0][1],DN[1][0],DN[1][1] if (a*d-b*c)!=0.: DNINV = 1./(a*d-b*c)*np.array([[d,(-1)*b],[(-1)*c,d]]) #DNINV = inv(DN) dd = np.dot(DNINV,EN) dxz,dz = dd[0],dd[1] xz=xz+dxz z=z+dz else: xz = xz+0.5 z = z+0.5 dxz = 1. dz = 1. return xz,z
Computes the position of the neck (minimal radius) in an contact_binary star1
Below is the the instruction that describes the task: ### Input: Computes the position of the neck (minimal radius) in an contact_binary star1 ### Response: def nekmin(omega_in,q,x0=0.5,z0=0.5): '''Computes the position of the neck (minimal radius) in an contact_binary star1''' def Omega_xz(q,x,z): return 1./np.sqrt(x**2+z**2)+q/np.sqrt((1-x)**2+z**2)+(q+1)*x**2/2.-q*x def Omega_xy(q,x,y): return 1./np.sqrt(x**2+y**2)+q/np.sqrt((1-x)**2+y**2)+(q+1)*(x**2+y**2)/2.-q*x def dOmegadx_z(q,x,z): return -x/(x**2+z**2)**(3./2)+q*(1-x)/((1-x)**2+z**2)**(3./2.)+(q+1)*x-q def dOmegadx_y(q,x,y): return -x/(x**2+y**2)**(3./2)+q*(1-x)/((1-x)**2+y**2)**(3./2.)+(q+1)*x-q def dOmegadz(q,x,z): return -z/(x**2+z**2)**(3./2)-q*z/((1-x)**2+z**2)**(3./2.) def dOmegady(q,x,y): return -y/(x**2+y**2)**(3./2)-q*y/((1-x)**2+y**2)**(3./2.)+(q+1)*y def d2Omegadx2_z(q,x,z): return (2*x**2-z**2)/(x**2+z**2)**(5./2)+q*(2*(1-x)**2-z**2)/((1-x)**2+z**2)**(5./2)+(q+1) def d2Omegadx2_y(q,x,y): return (2*x**2-y**2)/(x**2+y**2)**(5./2)+q*(2*(1-x)**2-y**2)/((1-x)**2+y**2)**(5./2)+(q+1) def d2Omegadxdz(q,x,z): return 3*x*z/(x**2+z**2)**(5./2)-3*q*x*(1-x)/((1-x)**2+z**2)**(5./2) def d2Omegadxdy(q,x,y): return 3*x*y/(x**2+y**2)**(5./2)-3*q*x*(1-x)/((1-x)**2+y**2)**(5./2) xz,z = x0,z0 dxz, dz = 1.,1. # find solution in xz plane while abs(dxz)>1e-8 and abs(dz)>1e-8: delz = 1. z=0.05 while abs(delz) > 0.000001: delom = omega_in - Omega_xz(q,xz,z) delz = delom/dOmegadz(q,xz,z) z = abs(z+delz) DN = np.array([[dOmegadx_z(q,xz,z),dOmegadz(q,xz,z)],[d2Omegadx2_z(q,xz,z),d2Omegadxdz(q,xz,z)]]) EN = np.array([omega_in-Omega_xz(q,xz,z),(-1)*dOmegadx_z(q,xz,z)]) a,b,c,d = DN[0][0],DN[0][1],DN[1][0],DN[1][1] if (a*d-b*c)!=0.: DNINV = 1./(a*d-b*c)*np.array([[d,(-1)*b],[(-1)*c,d]]) #DNINV = inv(DN) dd = np.dot(DNINV,EN) dxz,dz = dd[0],dd[1] xz=xz+dxz z=z+dz else: xz = xz+0.5 z = z+0.5 dxz = 1. dz = 1. return xz,z
def install_board_with_programmer(mcu, programmer, f_cpu=16000000, core='arduino', replace_existing=False, ): """install board with programmer.""" bunch = AutoBunch() board_id = '{mcu}_{f_cpu}_{programmer}'.format(f_cpu=f_cpu, mcu=mcu, programmer=programmer, ) bunch.name = '{mcu}@{f} Prog:{programmer}'.format(f=strfreq(f_cpu), mcu=mcu, programmer=programmer, ) bunch.upload.using = programmer bunch.build.mcu = mcu bunch.build.f_cpu = str(f_cpu) + 'L' bunch.build.core = core install_board(board_id, bunch, replace_existing=replace_existing)
install board with programmer.
Below is the the instruction that describes the task: ### Input: install board with programmer. ### Response: def install_board_with_programmer(mcu, programmer, f_cpu=16000000, core='arduino', replace_existing=False, ): """install board with programmer.""" bunch = AutoBunch() board_id = '{mcu}_{f_cpu}_{programmer}'.format(f_cpu=f_cpu, mcu=mcu, programmer=programmer, ) bunch.name = '{mcu}@{f} Prog:{programmer}'.format(f=strfreq(f_cpu), mcu=mcu, programmer=programmer, ) bunch.upload.using = programmer bunch.build.mcu = mcu bunch.build.f_cpu = str(f_cpu) + 'L' bunch.build.core = core install_board(board_id, bunch, replace_existing=replace_existing)
def p_annotation_type(self, p): """annotation_type : ANNOTATION_TYPE ID NL \ INDENT docsection field_list DEDENT""" p[0] = AstAnnotationTypeDef( path=self.path, lineno=p.lineno(1), lexpos=p.lexpos(1), name=p[2], doc=p[5], params=p[6])
annotation_type : ANNOTATION_TYPE ID NL \ INDENT docsection field_list DEDENT
Below is the the instruction that describes the task: ### Input: annotation_type : ANNOTATION_TYPE ID NL \ INDENT docsection field_list DEDENT ### Response: def p_annotation_type(self, p): """annotation_type : ANNOTATION_TYPE ID NL \ INDENT docsection field_list DEDENT""" p[0] = AstAnnotationTypeDef( path=self.path, lineno=p.lineno(1), lexpos=p.lexpos(1), name=p[2], doc=p[5], params=p[6])
def qn(phi, *n): """ Calculate the complex flow vector `Q_n`. :param array-like phi: Azimuthal angles. :param int n: One or more harmonics to calculate. :returns: A single complex number if only one ``n`` was given or a complex array for multiple ``n``. """ phi = np.ravel(phi) n = np.asarray(n) i_n_phi = np.zeros((n.size, phi.size), dtype=complex) np.outer(n, phi, out=i_n_phi.imag) qn = np.exp(i_n_phi, out=i_n_phi).sum(axis=1) if qn.size == 1: qn = qn[0] return qn
Calculate the complex flow vector `Q_n`. :param array-like phi: Azimuthal angles. :param int n: One or more harmonics to calculate. :returns: A single complex number if only one ``n`` was given or a complex array for multiple ``n``.
Below is the the instruction that describes the task: ### Input: Calculate the complex flow vector `Q_n`. :param array-like phi: Azimuthal angles. :param int n: One or more harmonics to calculate. :returns: A single complex number if only one ``n`` was given or a complex array for multiple ``n``. ### Response: def qn(phi, *n): """ Calculate the complex flow vector `Q_n`. :param array-like phi: Azimuthal angles. :param int n: One or more harmonics to calculate. :returns: A single complex number if only one ``n`` was given or a complex array for multiple ``n``. """ phi = np.ravel(phi) n = np.asarray(n) i_n_phi = np.zeros((n.size, phi.size), dtype=complex) np.outer(n, phi, out=i_n_phi.imag) qn = np.exp(i_n_phi, out=i_n_phi).sum(axis=1) if qn.size == 1: qn = qn[0] return qn
def element_info(cls_or_slf, node, siblings, level, value_dims): """ Return the information summary for an Element. This consists of the dotted name followed by an value dimension names. """ info = cls_or_slf.component_type(node) if len(node.kdims) >= 1: info += cls_or_slf.tab + '[%s]' % ','.join(d.name for d in node.kdims) if value_dims and len(node.vdims) >= 1: info += cls_or_slf.tab + '(%s)' % ','.join(d.name for d in node.vdims) return level, [(level, info)]
Return the information summary for an Element. This consists of the dotted name followed by an value dimension names.
Below is the the instruction that describes the task: ### Input: Return the information summary for an Element. This consists of the dotted name followed by an value dimension names. ### Response: def element_info(cls_or_slf, node, siblings, level, value_dims): """ Return the information summary for an Element. This consists of the dotted name followed by an value dimension names. """ info = cls_or_slf.component_type(node) if len(node.kdims) >= 1: info += cls_or_slf.tab + '[%s]' % ','.join(d.name for d in node.kdims) if value_dims and len(node.vdims) >= 1: info += cls_or_slf.tab + '(%s)' % ','.join(d.name for d in node.vdims) return level, [(level, info)]
def wr_data(self, xlsx_data, row_i, worksheet): """Write data into xlsx worksheet.""" fld2fmt = self.vars.fld2fmt # User may specify to skip rows based on values in row prt_if = self.vars.prt_if # User may specify a subset of columns to print or # a column ordering different from the _fields seen in the namedtuple prt_flds = self.wbfmtobj.get_prt_flds() get_wbfmt = self.wbfmtobj.get_wbfmt if self.vars.sort_by is not None: xlsx_data = sorted(xlsx_data, key=self.vars.sort_by) try: for data_nt in xlsx_data: if prt_if is None or prt_if(data_nt): wbfmt = get_wbfmt(data_nt) # xlsxwriter.format.Format created w/add_format # Print an xlsx row by printing each column in order. for col_i, fld in enumerate(prt_flds): try: # If fld "format_txt" present, use val for formatting, but don't print. val = getattr(data_nt, fld, "") # Optional user-formatting of specific fields, eg, pval: "{:8.2e}" # If field value is empty (""), don't use fld2fmt if fld2fmt is not None and fld in fld2fmt and val != "" and val != "*": val = fld2fmt[fld].format(val) worksheet.write(row_i, col_i, val, wbfmt) except: raise RuntimeError(self._get_err_msg(row_i, col_i, fld, val, prt_flds)) row_i += 1 except RuntimeError as inst: import traceback traceback.print_exc() sys.stderr.write("\n **FATAL in wr_data: {MSG}\n\n".format(MSG=str(inst))) sys.exit(1) return row_i
Write data into xlsx worksheet.
Below is the the instruction that describes the task: ### Input: Write data into xlsx worksheet. ### Response: def wr_data(self, xlsx_data, row_i, worksheet): """Write data into xlsx worksheet.""" fld2fmt = self.vars.fld2fmt # User may specify to skip rows based on values in row prt_if = self.vars.prt_if # User may specify a subset of columns to print or # a column ordering different from the _fields seen in the namedtuple prt_flds = self.wbfmtobj.get_prt_flds() get_wbfmt = self.wbfmtobj.get_wbfmt if self.vars.sort_by is not None: xlsx_data = sorted(xlsx_data, key=self.vars.sort_by) try: for data_nt in xlsx_data: if prt_if is None or prt_if(data_nt): wbfmt = get_wbfmt(data_nt) # xlsxwriter.format.Format created w/add_format # Print an xlsx row by printing each column in order. for col_i, fld in enumerate(prt_flds): try: # If fld "format_txt" present, use val for formatting, but don't print. val = getattr(data_nt, fld, "") # Optional user-formatting of specific fields, eg, pval: "{:8.2e}" # If field value is empty (""), don't use fld2fmt if fld2fmt is not None and fld in fld2fmt and val != "" and val != "*": val = fld2fmt[fld].format(val) worksheet.write(row_i, col_i, val, wbfmt) except: raise RuntimeError(self._get_err_msg(row_i, col_i, fld, val, prt_flds)) row_i += 1 except RuntimeError as inst: import traceback traceback.print_exc() sys.stderr.write("\n **FATAL in wr_data: {MSG}\n\n".format(MSG=str(inst))) sys.exit(1) return row_i
def stetson_jindex(ftimes, fmags, ferrs, weightbytimediff=False): '''This calculates the Stetson index for the magseries, based on consecutive pairs of observations. Based on Nicole Loncke's work for her Planets and Life certificate at Princeton in 2014. Parameters ---------- ftimes,fmags,ferrs : np.array The input mag/flux time-series with all non-finite elements removed. weightbytimediff : bool If this is True, the Stetson index for any pair of mags will be reweighted by the difference in times between them using the scheme in Fruth+ 2012 and Zhange+ 2003 (as seen in Sokolovsky+ 2017):: w_i = exp(- (t_i+1 - t_i)/ delta_t ) Returns ------- float The calculated Stetson J variability index. ''' ndet = len(fmags) if ndet > 9: # get the median and ndet medmag = npmedian(fmags) # get the stetson index elements delta_prefactor = (ndet/(ndet - 1)) sigma_i = delta_prefactor*(fmags - medmag)/ferrs # Nicole's clever trick to advance indices by 1 and do x_i*x_(i+1) sigma_j = nproll(sigma_i,1) if weightbytimediff: difft = npdiff(ftimes) deltat = npmedian(difft) weights_i = npexp(- difft/deltat ) products = (weights_i*sigma_i[1:]*sigma_j[1:]) else: # ignore first elem since it's actually x_0*x_n products = (sigma_i*sigma_j)[1:] stetsonj = ( npsum(npsign(products) * npsqrt(npabs(products))) ) / ndet return stetsonj else: LOGERROR('not enough detections in this magseries ' 'to calculate stetson J index') return npnan
This calculates the Stetson index for the magseries, based on consecutive pairs of observations. Based on Nicole Loncke's work for her Planets and Life certificate at Princeton in 2014. Parameters ---------- ftimes,fmags,ferrs : np.array The input mag/flux time-series with all non-finite elements removed. weightbytimediff : bool If this is True, the Stetson index for any pair of mags will be reweighted by the difference in times between them using the scheme in Fruth+ 2012 and Zhange+ 2003 (as seen in Sokolovsky+ 2017):: w_i = exp(- (t_i+1 - t_i)/ delta_t ) Returns ------- float The calculated Stetson J variability index.
Below is the the instruction that describes the task: ### Input: This calculates the Stetson index for the magseries, based on consecutive pairs of observations. Based on Nicole Loncke's work for her Planets and Life certificate at Princeton in 2014. Parameters ---------- ftimes,fmags,ferrs : np.array The input mag/flux time-series with all non-finite elements removed. weightbytimediff : bool If this is True, the Stetson index for any pair of mags will be reweighted by the difference in times between them using the scheme in Fruth+ 2012 and Zhange+ 2003 (as seen in Sokolovsky+ 2017):: w_i = exp(- (t_i+1 - t_i)/ delta_t ) Returns ------- float The calculated Stetson J variability index. ### Response: def stetson_jindex(ftimes, fmags, ferrs, weightbytimediff=False): '''This calculates the Stetson index for the magseries, based on consecutive pairs of observations. Based on Nicole Loncke's work for her Planets and Life certificate at Princeton in 2014. Parameters ---------- ftimes,fmags,ferrs : np.array The input mag/flux time-series with all non-finite elements removed. weightbytimediff : bool If this is True, the Stetson index for any pair of mags will be reweighted by the difference in times between them using the scheme in Fruth+ 2012 and Zhange+ 2003 (as seen in Sokolovsky+ 2017):: w_i = exp(- (t_i+1 - t_i)/ delta_t ) Returns ------- float The calculated Stetson J variability index. ''' ndet = len(fmags) if ndet > 9: # get the median and ndet medmag = npmedian(fmags) # get the stetson index elements delta_prefactor = (ndet/(ndet - 1)) sigma_i = delta_prefactor*(fmags - medmag)/ferrs # Nicole's clever trick to advance indices by 1 and do x_i*x_(i+1) sigma_j = nproll(sigma_i,1) if weightbytimediff: difft = npdiff(ftimes) deltat = npmedian(difft) weights_i = npexp(- difft/deltat ) products = (weights_i*sigma_i[1:]*sigma_j[1:]) else: # ignore first elem since it's actually x_0*x_n products = (sigma_i*sigma_j)[1:] stetsonj = ( npsum(npsign(products) * npsqrt(npabs(products))) ) / ndet return stetsonj else: LOGERROR('not enough detections in this magseries ' 'to calculate stetson J index') return npnan
def get_client(self, name): """Like :meth:`.get`, but only mechanisms inheriting :class:`ClientMechanism` will be returned. Args: name: The SASL mechanism name. Returns: The mechanism object or ``None`` """ mech = self.get(name) return mech if isinstance(mech, ClientMechanism) else None
Like :meth:`.get`, but only mechanisms inheriting :class:`ClientMechanism` will be returned. Args: name: The SASL mechanism name. Returns: The mechanism object or ``None``
Below is the the instruction that describes the task: ### Input: Like :meth:`.get`, but only mechanisms inheriting :class:`ClientMechanism` will be returned. Args: name: The SASL mechanism name. Returns: The mechanism object or ``None`` ### Response: def get_client(self, name): """Like :meth:`.get`, but only mechanisms inheriting :class:`ClientMechanism` will be returned. Args: name: The SASL mechanism name. Returns: The mechanism object or ``None`` """ mech = self.get(name) return mech if isinstance(mech, ClientMechanism) else None
def save_b26_file(filename, instruments=None, scripts=None, probes=None, overwrite=False, verbose=False): """ save instruments, scripts and probes as a json file Args: filename: instruments: scripts: probes: dictionary of the form {instrument_name : probe_1_of_intrument, probe_2_of_intrument, ...} Returns: """ # if overwrite is false load existing data and append to new instruments if os.path.isfile(filename) and overwrite == False: data_dict = load_b26_file(filename) else: data_dict = {} if instruments is not None: if 'instruments' in data_dict: data_dict['instruments'].update(instruments) else: data_dict['instruments'] = instruments if scripts is not None: if 'scripts' in data_dict: data_dict['scripts'].update(scripts) else: data_dict['scripts'] = scripts if probes is not None: probe_instruments = list(probes.keys()) if 'probes' in data_dict: # all the instruments required for old and new probes probe_instruments= set(probe_instruments + list(data_dict['probes'].keys())) else: data_dict.update({'probes':{}}) for instrument in probe_instruments: if instrument in data_dict['probes'] and instrument in probes: # update the data_dict data_dict['probes'][instrument] = ','.join(set(data_dict['probes'][instrument].split(',') + probes[instrument].split(','))) else: data_dict['probes'].update(probes) if verbose: print(('writing ', filename)) if data_dict != {}: # if platform == 'Windows': # # windows can't deal with long filenames so we have to use the prefix '\\\\?\\' # if len(filename.split('\\\\?\\')) == 1: # filename = '\\\\?\\'+ filename # create folder if it doesn't exist if verbose: print(('filename', filename)) print(('exists', os.path.exists(os.path.dirname(filename)))) if os.path.exists(os.path.dirname(filename)) is False: # print(('creating', os.path.dirname(filename))) os.makedirs(os.path.dirname(filename)) with open(filename, 'w') as outfile: tmp = json.dump(data_dict, outfile, indent=4)
save instruments, scripts and probes as a json file Args: filename: instruments: scripts: probes: dictionary of the form {instrument_name : probe_1_of_intrument, probe_2_of_intrument, ...} Returns:
Below is the the instruction that describes the task: ### Input: save instruments, scripts and probes as a json file Args: filename: instruments: scripts: probes: dictionary of the form {instrument_name : probe_1_of_intrument, probe_2_of_intrument, ...} Returns: ### Response: def save_b26_file(filename, instruments=None, scripts=None, probes=None, overwrite=False, verbose=False): """ save instruments, scripts and probes as a json file Args: filename: instruments: scripts: probes: dictionary of the form {instrument_name : probe_1_of_intrument, probe_2_of_intrument, ...} Returns: """ # if overwrite is false load existing data and append to new instruments if os.path.isfile(filename) and overwrite == False: data_dict = load_b26_file(filename) else: data_dict = {} if instruments is not None: if 'instruments' in data_dict: data_dict['instruments'].update(instruments) else: data_dict['instruments'] = instruments if scripts is not None: if 'scripts' in data_dict: data_dict['scripts'].update(scripts) else: data_dict['scripts'] = scripts if probes is not None: probe_instruments = list(probes.keys()) if 'probes' in data_dict: # all the instruments required for old and new probes probe_instruments= set(probe_instruments + list(data_dict['probes'].keys())) else: data_dict.update({'probes':{}}) for instrument in probe_instruments: if instrument in data_dict['probes'] and instrument in probes: # update the data_dict data_dict['probes'][instrument] = ','.join(set(data_dict['probes'][instrument].split(',') + probes[instrument].split(','))) else: data_dict['probes'].update(probes) if verbose: print(('writing ', filename)) if data_dict != {}: # if platform == 'Windows': # # windows can't deal with long filenames so we have to use the prefix '\\\\?\\' # if len(filename.split('\\\\?\\')) == 1: # filename = '\\\\?\\'+ filename # create folder if it doesn't exist if verbose: print(('filename', filename)) print(('exists', os.path.exists(os.path.dirname(filename)))) if os.path.exists(os.path.dirname(filename)) is False: # print(('creating', os.path.dirname(filename))) os.makedirs(os.path.dirname(filename)) with open(filename, 'w') as outfile: tmp = json.dump(data_dict, outfile, indent=4)
def pw( ctx, key_pattern, user_pattern, mode, strict_flag, user_flag, file, edit_subcommand, gen_subcommand, ): """Search for USER and KEY in GPG-encrypted password file.""" # install silent Ctrl-C handler def handle_sigint(*_): click.echo() ctx.exit(1) signal.signal(signal.SIGINT, handle_sigint) # invoke a subcommand? if gen_subcommand: length = int(key_pattern) if key_pattern else None generate_password(mode, length) return elif edit_subcommand: launch_editor(ctx, file) return # verify that database file is present if not os.path.exists(file): click.echo("error: password store not found at '%s'" % file, err=True) ctx.exit(1) # load database store = Store.load(file) # if no user query provided, split key query according to right-most "@" sign (since usernames are typically email addresses) if not user_pattern: user_pattern, _, key_pattern = key_pattern.rpartition("@") # search database results = store.search(key_pattern, user_pattern) results = list(results) # if strict flag is enabled, check that precisely a single record was found if strict_flag and len(results) != 1: click.echo( "error: multiple or no records found (but using --strict flag)", err=True ) ctx.exit(2) # raw mode? if mode == Mode.RAW: for entry in results: click.echo(entry.user if user_flag else entry.password) return # print results for idx, entry in enumerate(results): # start with key and user line = highlight_match(key_pattern, entry.key) if entry.user: line += ": " + highlight_match(user_pattern, entry.user) # add password or copy&paste sucess message if mode == Mode.ECHO and not user_flag: line += " | " + style_password(entry.password) elif mode == Mode.COPY and idx == 0: try: import pyperclip pyperclip.copy(entry.user if user_flag else entry.password) result = style_success( "*** %s COPIED TO CLIPBOARD ***" % ("USERNAME" if user_flag else "PASSWORD") ) except ImportError: result = style_error('*** PYTHON PACKAGE "PYPERCLIP" NOT FOUND ***') line += " | " + result # add notes if entry.notes: if idx == 0: line += "\n" line += "\n".join(" " + line for line in entry.notes.splitlines()) else: lines = entry.notes.splitlines() line += " | " + lines[0] if len(lines) > 1: line += " (...)" click.echo(line)
Search for USER and KEY in GPG-encrypted password file.
Below is the the instruction that describes the task: ### Input: Search for USER and KEY in GPG-encrypted password file. ### Response: def pw( ctx, key_pattern, user_pattern, mode, strict_flag, user_flag, file, edit_subcommand, gen_subcommand, ): """Search for USER and KEY in GPG-encrypted password file.""" # install silent Ctrl-C handler def handle_sigint(*_): click.echo() ctx.exit(1) signal.signal(signal.SIGINT, handle_sigint) # invoke a subcommand? if gen_subcommand: length = int(key_pattern) if key_pattern else None generate_password(mode, length) return elif edit_subcommand: launch_editor(ctx, file) return # verify that database file is present if not os.path.exists(file): click.echo("error: password store not found at '%s'" % file, err=True) ctx.exit(1) # load database store = Store.load(file) # if no user query provided, split key query according to right-most "@" sign (since usernames are typically email addresses) if not user_pattern: user_pattern, _, key_pattern = key_pattern.rpartition("@") # search database results = store.search(key_pattern, user_pattern) results = list(results) # if strict flag is enabled, check that precisely a single record was found if strict_flag and len(results) != 1: click.echo( "error: multiple or no records found (but using --strict flag)", err=True ) ctx.exit(2) # raw mode? if mode == Mode.RAW: for entry in results: click.echo(entry.user if user_flag else entry.password) return # print results for idx, entry in enumerate(results): # start with key and user line = highlight_match(key_pattern, entry.key) if entry.user: line += ": " + highlight_match(user_pattern, entry.user) # add password or copy&paste sucess message if mode == Mode.ECHO and not user_flag: line += " | " + style_password(entry.password) elif mode == Mode.COPY and idx == 0: try: import pyperclip pyperclip.copy(entry.user if user_flag else entry.password) result = style_success( "*** %s COPIED TO CLIPBOARD ***" % ("USERNAME" if user_flag else "PASSWORD") ) except ImportError: result = style_error('*** PYTHON PACKAGE "PYPERCLIP" NOT FOUND ***') line += " | " + result # add notes if entry.notes: if idx == 0: line += "\n" line += "\n".join(" " + line for line in entry.notes.splitlines()) else: lines = entry.notes.splitlines() line += " | " + lines[0] if len(lines) > 1: line += " (...)" click.echo(line)
def do_ams_post(endpoint, path, body, access_token, rformat="json", ds_min_version="3.0;NetFx"): '''Do a AMS HTTP POST request and return JSON. Args: endpoint (str): Azure Media Services Initial Endpoint. path (str): Azure Media Services Endpoint Path. body (str): Azure Media Services Content Body. access_token (str): A valid Azure authentication token. rformat (str): A required JSON Accept Format. ds_min_version (str): A required DS MIN Version. Returns: HTTP response. JSON body. ''' min_ds = dsversion_min content_acceptformat = json_acceptformat acceptformat = json_acceptformat if rformat == "json_only": min_ds = ds_min_version content_acceptformat = json_only_acceptformat if rformat == "xml": content_acceptformat = xml_acceptformat acceptformat = xml_acceptformat + ",application/xml" headers = {"Content-Type": content_acceptformat, "DataServiceVersion": min_ds, "MaxDataServiceVersion": dsversion_max, "Accept": acceptformat, "Accept-Charset" : charset, "Authorization": "Bearer " + access_token, "x-ms-version" : xmsversion} response = requests.post(endpoint, data=body, headers=headers, allow_redirects=False) # AMS response to the first call can be a redirect, # so we handle it here to make it transparent for the caller... if response.status_code == 301: redirected_url = ''.join([response.headers['location'], path]) response = requests.post(redirected_url, data=body, headers=headers) return response
Do a AMS HTTP POST request and return JSON. Args: endpoint (str): Azure Media Services Initial Endpoint. path (str): Azure Media Services Endpoint Path. body (str): Azure Media Services Content Body. access_token (str): A valid Azure authentication token. rformat (str): A required JSON Accept Format. ds_min_version (str): A required DS MIN Version. Returns: HTTP response. JSON body.
Below is the the instruction that describes the task: ### Input: Do a AMS HTTP POST request and return JSON. Args: endpoint (str): Azure Media Services Initial Endpoint. path (str): Azure Media Services Endpoint Path. body (str): Azure Media Services Content Body. access_token (str): A valid Azure authentication token. rformat (str): A required JSON Accept Format. ds_min_version (str): A required DS MIN Version. Returns: HTTP response. JSON body. ### Response: def do_ams_post(endpoint, path, body, access_token, rformat="json", ds_min_version="3.0;NetFx"): '''Do a AMS HTTP POST request and return JSON. Args: endpoint (str): Azure Media Services Initial Endpoint. path (str): Azure Media Services Endpoint Path. body (str): Azure Media Services Content Body. access_token (str): A valid Azure authentication token. rformat (str): A required JSON Accept Format. ds_min_version (str): A required DS MIN Version. Returns: HTTP response. JSON body. ''' min_ds = dsversion_min content_acceptformat = json_acceptformat acceptformat = json_acceptformat if rformat == "json_only": min_ds = ds_min_version content_acceptformat = json_only_acceptformat if rformat == "xml": content_acceptformat = xml_acceptformat acceptformat = xml_acceptformat + ",application/xml" headers = {"Content-Type": content_acceptformat, "DataServiceVersion": min_ds, "MaxDataServiceVersion": dsversion_max, "Accept": acceptformat, "Accept-Charset" : charset, "Authorization": "Bearer " + access_token, "x-ms-version" : xmsversion} response = requests.post(endpoint, data=body, headers=headers, allow_redirects=False) # AMS response to the first call can be a redirect, # so we handle it here to make it transparent for the caller... if response.status_code == 301: redirected_url = ''.join([response.headers['location'], path]) response = requests.post(redirected_url, data=body, headers=headers) return response
def split_bodies(dataset, label=False): """Find, label, and split connected bodies/volumes. This splits different connected bodies into blocks in a MultiBlock dataset. Parameters ---------- label : bool A flag on whether to keep the ID arrays given by the ``connectivity`` filter. """ # Get the connectivity and label different bodies labeled = dataset.connectivity() classifier = labeled.cell_arrays['RegionId'] bodies = vtki.MultiBlock() for vid in np.unique(classifier): # Now extract it: b = labeled.threshold([vid-0.5, vid+0.5], scalars='RegionId') if not label: # strange behavior: # must use this method rather than deleting from the point_arrays # or else object is collected. b._remove_cell_scalar('RegionId') b._remove_point_scalar('RegionId') bodies.append(b) return bodies
Find, label, and split connected bodies/volumes. This splits different connected bodies into blocks in a MultiBlock dataset. Parameters ---------- label : bool A flag on whether to keep the ID arrays given by the ``connectivity`` filter.
Below is the the instruction that describes the task: ### Input: Find, label, and split connected bodies/volumes. This splits different connected bodies into blocks in a MultiBlock dataset. Parameters ---------- label : bool A flag on whether to keep the ID arrays given by the ``connectivity`` filter. ### Response: def split_bodies(dataset, label=False): """Find, label, and split connected bodies/volumes. This splits different connected bodies into blocks in a MultiBlock dataset. Parameters ---------- label : bool A flag on whether to keep the ID arrays given by the ``connectivity`` filter. """ # Get the connectivity and label different bodies labeled = dataset.connectivity() classifier = labeled.cell_arrays['RegionId'] bodies = vtki.MultiBlock() for vid in np.unique(classifier): # Now extract it: b = labeled.threshold([vid-0.5, vid+0.5], scalars='RegionId') if not label: # strange behavior: # must use this method rather than deleting from the point_arrays # or else object is collected. b._remove_cell_scalar('RegionId') b._remove_point_scalar('RegionId') bodies.append(b) return bodies
def _raw_aspera_metadata(self, bucket): ''' get the Aspera connection details on Aspera enabled buckets ''' response = self._client.get_bucket_aspera(Bucket=bucket) # Parse metadata from response aspera_access_key = response['AccessKey']['Id'] aspera_secret_key = response['AccessKey']['Secret'] ats_endpoint = response['ATSEndpoint'] return aspera_access_key, aspera_secret_key, ats_endpoint
get the Aspera connection details on Aspera enabled buckets
Below is the the instruction that describes the task: ### Input: get the Aspera connection details on Aspera enabled buckets ### Response: def _raw_aspera_metadata(self, bucket): ''' get the Aspera connection details on Aspera enabled buckets ''' response = self._client.get_bucket_aspera(Bucket=bucket) # Parse metadata from response aspera_access_key = response['AccessKey']['Id'] aspera_secret_key = response['AccessKey']['Secret'] ats_endpoint = response['ATSEndpoint'] return aspera_access_key, aspera_secret_key, ats_endpoint
def positions_to_contigs(positions): """Label contigs according to relative positions Given a list of positions, return an ordered list of labels reflecting where the positions array started over (and presumably a new contig began). Parameters ---------- positions : list or array_like A piece-wise ordered list of integers representing positions Returns ------- contig_labels : numpy.ndarray The list of contig labels """ contig_labels = np.zeros_like(positions) contig_index = 0 for i, p in enumerate(positions): if p == 0: contig_index += 1 contig_labels[i] = contig_index return contig_labels
Label contigs according to relative positions Given a list of positions, return an ordered list of labels reflecting where the positions array started over (and presumably a new contig began). Parameters ---------- positions : list or array_like A piece-wise ordered list of integers representing positions Returns ------- contig_labels : numpy.ndarray The list of contig labels
Below is the the instruction that describes the task: ### Input: Label contigs according to relative positions Given a list of positions, return an ordered list of labels reflecting where the positions array started over (and presumably a new contig began). Parameters ---------- positions : list or array_like A piece-wise ordered list of integers representing positions Returns ------- contig_labels : numpy.ndarray The list of contig labels ### Response: def positions_to_contigs(positions): """Label contigs according to relative positions Given a list of positions, return an ordered list of labels reflecting where the positions array started over (and presumably a new contig began). Parameters ---------- positions : list or array_like A piece-wise ordered list of integers representing positions Returns ------- contig_labels : numpy.ndarray The list of contig labels """ contig_labels = np.zeros_like(positions) contig_index = 0 for i, p in enumerate(positions): if p == 0: contig_index += 1 contig_labels[i] = contig_index return contig_labels
def write_case_data(self, file): """ Writes case data to file. """ change_code = 0 s_base = self.case.base_mva timestr = time.strftime("%Y%m%d%H%M", time.gmtime()) file.write("%d, %8.2f, 30 / PSS(tm)E-30 RAW created by Pylon (%s).\n" % (change_code, s_base, timestr)) file.write("Modified by Hantao Cui, CURENT, UTK\n ") file.write("%s, %d BUSES, %d BRANCHES\n" % (self.case.name, len(self.case.buses), len(self.case.branches)))
Writes case data to file.
Below is the the instruction that describes the task: ### Input: Writes case data to file. ### Response: def write_case_data(self, file): """ Writes case data to file. """ change_code = 0 s_base = self.case.base_mva timestr = time.strftime("%Y%m%d%H%M", time.gmtime()) file.write("%d, %8.2f, 30 / PSS(tm)E-30 RAW created by Pylon (%s).\n" % (change_code, s_base, timestr)) file.write("Modified by Hantao Cui, CURENT, UTK\n ") file.write("%s, %d BUSES, %d BRANCHES\n" % (self.case.name, len(self.case.buses), len(self.case.branches)))
def import_dotted_path(path): """ Takes a dotted path to a member name in a module, and returns the member after importing it. """ # stolen from Mezzanine (mezzanine.utils.importing.import_dotted_path) try: module_path, member_name = path.rsplit(".", 1) module = import_module(module_path) return getattr(module, member_name) except (ValueError, ImportError, AttributeError) as e: raise ImportError('Could not import the name: {}: {}'.format(path, e))
Takes a dotted path to a member name in a module, and returns the member after importing it.
Below is the the instruction that describes the task: ### Input: Takes a dotted path to a member name in a module, and returns the member after importing it. ### Response: def import_dotted_path(path): """ Takes a dotted path to a member name in a module, and returns the member after importing it. """ # stolen from Mezzanine (mezzanine.utils.importing.import_dotted_path) try: module_path, member_name = path.rsplit(".", 1) module = import_module(module_path) return getattr(module, member_name) except (ValueError, ImportError, AttributeError) as e: raise ImportError('Could not import the name: {}: {}'.format(path, e))
def create_from_resource_class(cls, resource_class): """ Creates a new representer for the given resource class. The representer obtains a reference to the (freshly created or looked up) mapping for the resource class. """ mp_reg = get_mapping_registry(cls.content_type) mp = mp_reg.find_or_create_mapping(resource_class) return cls(resource_class, mp)
Creates a new representer for the given resource class. The representer obtains a reference to the (freshly created or looked up) mapping for the resource class.
Below is the the instruction that describes the task: ### Input: Creates a new representer for the given resource class. The representer obtains a reference to the (freshly created or looked up) mapping for the resource class. ### Response: def create_from_resource_class(cls, resource_class): """ Creates a new representer for the given resource class. The representer obtains a reference to the (freshly created or looked up) mapping for the resource class. """ mp_reg = get_mapping_registry(cls.content_type) mp = mp_reg.find_or_create_mapping(resource_class) return cls(resource_class, mp)
def merge_model_instances(self, primary_object, alias_objects): """ Merge several model instances into one, the `primary_object`. Use this function to merge model objects and migrate all of the related fields from the alias objects the primary object. """ generic_fields = get_generic_fields() # get related fields related_fields = list(filter( lambda x: x.is_relation is True, primary_object._meta.get_fields())) many_to_many_fields = list(filter( lambda x: x.many_to_many is True, related_fields)) related_fields = list(filter( lambda x: x.many_to_many is False, related_fields)) # Loop through all alias objects and migrate their references to the # primary object deleted_objects = [] deleted_objects_count = 0 for alias_object in alias_objects: # Migrate all foreign key references from alias object to primary # object. for many_to_many_field in many_to_many_fields: alias_varname = many_to_many_field.name related_objects = getattr(alias_object, alias_varname) for obj in related_objects.all(): try: # Handle regular M2M relationships. getattr(alias_object, alias_varname).remove(obj) getattr(primary_object, alias_varname).add(obj) except AttributeError: # Handle M2M relationships with a 'through' model. # This does not delete the 'through model. # TODO: Allow the user to delete a duplicate 'through' model. through_model = getattr(alias_object, alias_varname).through kwargs = { many_to_many_field.m2m_reverse_field_name(): obj, many_to_many_field.m2m_field_name(): alias_object, } through_model_instances = through_model.objects.filter(**kwargs) for instance in through_model_instances: # Re-attach the through model to the primary_object setattr( instance, many_to_many_field.m2m_field_name(), primary_object) instance.save() # TODO: Here, try to delete duplicate instances that are # disallowed by a unique_together constraint for related_field in related_fields: if related_field.one_to_many: alias_varname = related_field.get_accessor_name() related_objects = getattr(alias_object, alias_varname) for obj in related_objects.all(): field_name = related_field.field.name setattr(obj, field_name, primary_object) obj.save() elif related_field.one_to_one or related_field.many_to_one: alias_varname = related_field.name related_object = getattr(alias_object, alias_varname) primary_related_object = getattr(primary_object, alias_varname) if primary_related_object is None: setattr(primary_object, alias_varname, related_object) primary_object.save() elif related_field.one_to_one: self.stdout.write("Deleted {} with id {}\n".format( related_object, related_object.id)) related_object.delete() for field in generic_fields: filter_kwargs = {} filter_kwargs[field.fk_field] = alias_object._get_pk_val() filter_kwargs[field.ct_field] = field.get_content_type(alias_object) related_objects = field.model.objects.filter(**filter_kwargs) for generic_related_object in related_objects: setattr(generic_related_object, field.name, primary_object) generic_related_object.save() if alias_object.id: deleted_objects += [alias_object] self.stdout.write("Deleted {} with id {}\n".format( alias_object, alias_object.id)) alias_object.delete() deleted_objects_count += 1 return primary_object, deleted_objects, deleted_objects_count
Merge several model instances into one, the `primary_object`. Use this function to merge model objects and migrate all of the related fields from the alias objects the primary object.
Below is the the instruction that describes the task: ### Input: Merge several model instances into one, the `primary_object`. Use this function to merge model objects and migrate all of the related fields from the alias objects the primary object. ### Response: def merge_model_instances(self, primary_object, alias_objects): """ Merge several model instances into one, the `primary_object`. Use this function to merge model objects and migrate all of the related fields from the alias objects the primary object. """ generic_fields = get_generic_fields() # get related fields related_fields = list(filter( lambda x: x.is_relation is True, primary_object._meta.get_fields())) many_to_many_fields = list(filter( lambda x: x.many_to_many is True, related_fields)) related_fields = list(filter( lambda x: x.many_to_many is False, related_fields)) # Loop through all alias objects and migrate their references to the # primary object deleted_objects = [] deleted_objects_count = 0 for alias_object in alias_objects: # Migrate all foreign key references from alias object to primary # object. for many_to_many_field in many_to_many_fields: alias_varname = many_to_many_field.name related_objects = getattr(alias_object, alias_varname) for obj in related_objects.all(): try: # Handle regular M2M relationships. getattr(alias_object, alias_varname).remove(obj) getattr(primary_object, alias_varname).add(obj) except AttributeError: # Handle M2M relationships with a 'through' model. # This does not delete the 'through model. # TODO: Allow the user to delete a duplicate 'through' model. through_model = getattr(alias_object, alias_varname).through kwargs = { many_to_many_field.m2m_reverse_field_name(): obj, many_to_many_field.m2m_field_name(): alias_object, } through_model_instances = through_model.objects.filter(**kwargs) for instance in through_model_instances: # Re-attach the through model to the primary_object setattr( instance, many_to_many_field.m2m_field_name(), primary_object) instance.save() # TODO: Here, try to delete duplicate instances that are # disallowed by a unique_together constraint for related_field in related_fields: if related_field.one_to_many: alias_varname = related_field.get_accessor_name() related_objects = getattr(alias_object, alias_varname) for obj in related_objects.all(): field_name = related_field.field.name setattr(obj, field_name, primary_object) obj.save() elif related_field.one_to_one or related_field.many_to_one: alias_varname = related_field.name related_object = getattr(alias_object, alias_varname) primary_related_object = getattr(primary_object, alias_varname) if primary_related_object is None: setattr(primary_object, alias_varname, related_object) primary_object.save() elif related_field.one_to_one: self.stdout.write("Deleted {} with id {}\n".format( related_object, related_object.id)) related_object.delete() for field in generic_fields: filter_kwargs = {} filter_kwargs[field.fk_field] = alias_object._get_pk_val() filter_kwargs[field.ct_field] = field.get_content_type(alias_object) related_objects = field.model.objects.filter(**filter_kwargs) for generic_related_object in related_objects: setattr(generic_related_object, field.name, primary_object) generic_related_object.save() if alias_object.id: deleted_objects += [alias_object] self.stdout.write("Deleted {} with id {}\n".format( alias_object, alias_object.id)) alias_object.delete() deleted_objects_count += 1 return primary_object, deleted_objects, deleted_objects_count
def read_property_to_result_element(obj, propertyIdentifier, propertyArrayIndex=None): """Read the specified property of the object, with the optional array index, and cast the result into an Any object.""" if _debug: read_property_to_result_element._debug("read_property_to_result_element %s %r %r", obj, propertyIdentifier, propertyArrayIndex) # save the result in the property value read_result = ReadAccessResultElementChoice() try: if not obj: raise ExecutionError(errorClass='object', errorCode='unknownObject') read_result.propertyValue = read_property_to_any(obj, propertyIdentifier, propertyArrayIndex) if _debug: read_property_to_result_element._debug(" - success") except PropertyError as error: if _debug: read_property_to_result_element._debug(" - error: %r", error) read_result.propertyAccessError = ErrorType(errorClass='property', errorCode='unknownProperty') except ExecutionError as error: if _debug: read_property_to_result_element._debug(" - error: %r", error) read_result.propertyAccessError = ErrorType(errorClass=error.errorClass, errorCode=error.errorCode) # make an element for this value read_access_result_element = ReadAccessResultElement( propertyIdentifier=propertyIdentifier, propertyArrayIndex=propertyArrayIndex, readResult=read_result, ) if _debug: read_property_to_result_element._debug(" - read_access_result_element: %r", read_access_result_element) # fini return read_access_result_element
Read the specified property of the object, with the optional array index, and cast the result into an Any object.
Below is the the instruction that describes the task: ### Input: Read the specified property of the object, with the optional array index, and cast the result into an Any object. ### Response: def read_property_to_result_element(obj, propertyIdentifier, propertyArrayIndex=None): """Read the specified property of the object, with the optional array index, and cast the result into an Any object.""" if _debug: read_property_to_result_element._debug("read_property_to_result_element %s %r %r", obj, propertyIdentifier, propertyArrayIndex) # save the result in the property value read_result = ReadAccessResultElementChoice() try: if not obj: raise ExecutionError(errorClass='object', errorCode='unknownObject') read_result.propertyValue = read_property_to_any(obj, propertyIdentifier, propertyArrayIndex) if _debug: read_property_to_result_element._debug(" - success") except PropertyError as error: if _debug: read_property_to_result_element._debug(" - error: %r", error) read_result.propertyAccessError = ErrorType(errorClass='property', errorCode='unknownProperty') except ExecutionError as error: if _debug: read_property_to_result_element._debug(" - error: %r", error) read_result.propertyAccessError = ErrorType(errorClass=error.errorClass, errorCode=error.errorCode) # make an element for this value read_access_result_element = ReadAccessResultElement( propertyIdentifier=propertyIdentifier, propertyArrayIndex=propertyArrayIndex, readResult=read_result, ) if _debug: read_property_to_result_element._debug(" - read_access_result_element: %r", read_access_result_element) # fini return read_access_result_element
def _truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100): """ Truncates a colormap to use. Code originall from http://stackoverflow.com/questions/18926031/how-to-extract-a-subset-of-a-colormap-as-a-new-colormap-in-matplotlib """ new_cmap = LinearSegmentedColormap.from_list( 'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval), cmap(numpy.linspace(minval, maxval, n)) ) return new_cmap
Truncates a colormap to use. Code originall from http://stackoverflow.com/questions/18926031/how-to-extract-a-subset-of-a-colormap-as-a-new-colormap-in-matplotlib
Below is the the instruction that describes the task: ### Input: Truncates a colormap to use. Code originall from http://stackoverflow.com/questions/18926031/how-to-extract-a-subset-of-a-colormap-as-a-new-colormap-in-matplotlib ### Response: def _truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100): """ Truncates a colormap to use. Code originall from http://stackoverflow.com/questions/18926031/how-to-extract-a-subset-of-a-colormap-as-a-new-colormap-in-matplotlib """ new_cmap = LinearSegmentedColormap.from_list( 'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval), cmap(numpy.linspace(minval, maxval, n)) ) return new_cmap
def safe_tag(self, tag, errors='strict'): """URL Encode and truncate tag to match limit (128 characters) of ThreatConnect API. Args: tag (string): The tag to be truncated Returns: (string): The truncated tag """ if tag is not None: try: # handle unicode characters and url encode tag value tag = quote(self.s(tag, errors=errors), safe='~')[:128] except KeyError as e: warn = 'Failed converting tag to safetag ({})'.format(e) self.log.warning(warn) return tag
URL Encode and truncate tag to match limit (128 characters) of ThreatConnect API. Args: tag (string): The tag to be truncated Returns: (string): The truncated tag
Below is the the instruction that describes the task: ### Input: URL Encode and truncate tag to match limit (128 characters) of ThreatConnect API. Args: tag (string): The tag to be truncated Returns: (string): The truncated tag ### Response: def safe_tag(self, tag, errors='strict'): """URL Encode and truncate tag to match limit (128 characters) of ThreatConnect API. Args: tag (string): The tag to be truncated Returns: (string): The truncated tag """ if tag is not None: try: # handle unicode characters and url encode tag value tag = quote(self.s(tag, errors=errors), safe='~')[:128] except KeyError as e: warn = 'Failed converting tag to safetag ({})'.format(e) self.log.warning(warn) return tag
def current_consumption(self): """Get the current power consumption in Watt.""" res = 'N/A' if self.use_legacy_protocol: # Use /my_cgi.cgi to retrieve current consumption try: res = self.fetchMyCgi()['Meter Watt'] except: return 'N/A' else: try: res = self.SOAPAction('GetCurrentPowerConsumption', 'CurrentConsumption', self.moduleParameters("2")) except: return 'N/A' if res is None: return 'N/A' try: res = float(res) except ValueError: _LOGGER.error("Failed to retrieve current power consumption from SmartPlug") return res
Get the current power consumption in Watt.
Below is the the instruction that describes the task: ### Input: Get the current power consumption in Watt. ### Response: def current_consumption(self): """Get the current power consumption in Watt.""" res = 'N/A' if self.use_legacy_protocol: # Use /my_cgi.cgi to retrieve current consumption try: res = self.fetchMyCgi()['Meter Watt'] except: return 'N/A' else: try: res = self.SOAPAction('GetCurrentPowerConsumption', 'CurrentConsumption', self.moduleParameters("2")) except: return 'N/A' if res is None: return 'N/A' try: res = float(res) except ValueError: _LOGGER.error("Failed to retrieve current power consumption from SmartPlug") return res
def make_worker_pool( processes=None, initializer=None, initializer_kwargs_per_process=None, max_tasks_per_worker=None): """ Convenience wrapper to create a multiprocessing.Pool. This function adds support for per-worker initializer arguments, which are not natively supported by the multiprocessing module. The motivation for this feature is to support allocating each worker to a (different) GPU. IMPLEMENTATION NOTE: The per-worker initializer arguments are implemented using a Queue. Each worker reads its arguments from this queue when it starts. When it terminates, it adds its initializer arguments back to the queue, so a future process can initialize itself using these arguments. There is one issue with this approach, however. If a worker crashes, it never repopulates the queue of initializer arguments. This will prevent any future worker from re-using those arguments. To deal with this issue we add a second 'backup queue'. This queue always contains the full set of initializer arguments: whenever a worker reads from it, it always pushes the pop'd args back to the end of the queue immediately. If the primary arg queue is ever empty, then workers will read from this backup queue. Parameters ---------- processes : int Number of workers. Default: num CPUs. initializer : function, optional Init function to call in each worker initializer_kwargs_per_process : list of dict, optional Arguments to pass to initializer function for each worker. Length of list must equal the number of workers. max_tasks_per_worker : int, optional Restart workers after this many tasks. Requires Python >=3.2. Returns ------- multiprocessing.Pool """ if not processes: processes = cpu_count() pool_kwargs = { 'processes': processes, } if max_tasks_per_worker: pool_kwargs["maxtasksperchild"] = max_tasks_per_worker if initializer: if initializer_kwargs_per_process: assert len(initializer_kwargs_per_process) == processes kwargs_queue = Queue() kwargs_queue_backup = Queue() for kwargs in initializer_kwargs_per_process: kwargs_queue.put(kwargs) kwargs_queue_backup.put(kwargs) pool_kwargs["initializer"] = worker_init_entry_point pool_kwargs["initargs"] = ( initializer, kwargs_queue, kwargs_queue_backup) else: pool_kwargs["initializer"] = initializer worker_pool = Pool(**pool_kwargs) print("Started pool: %s" % str(worker_pool)) pprint(pool_kwargs) return worker_pool
Convenience wrapper to create a multiprocessing.Pool. This function adds support for per-worker initializer arguments, which are not natively supported by the multiprocessing module. The motivation for this feature is to support allocating each worker to a (different) GPU. IMPLEMENTATION NOTE: The per-worker initializer arguments are implemented using a Queue. Each worker reads its arguments from this queue when it starts. When it terminates, it adds its initializer arguments back to the queue, so a future process can initialize itself using these arguments. There is one issue with this approach, however. If a worker crashes, it never repopulates the queue of initializer arguments. This will prevent any future worker from re-using those arguments. To deal with this issue we add a second 'backup queue'. This queue always contains the full set of initializer arguments: whenever a worker reads from it, it always pushes the pop'd args back to the end of the queue immediately. If the primary arg queue is ever empty, then workers will read from this backup queue. Parameters ---------- processes : int Number of workers. Default: num CPUs. initializer : function, optional Init function to call in each worker initializer_kwargs_per_process : list of dict, optional Arguments to pass to initializer function for each worker. Length of list must equal the number of workers. max_tasks_per_worker : int, optional Restart workers after this many tasks. Requires Python >=3.2. Returns ------- multiprocessing.Pool
Below is the the instruction that describes the task: ### Input: Convenience wrapper to create a multiprocessing.Pool. This function adds support for per-worker initializer arguments, which are not natively supported by the multiprocessing module. The motivation for this feature is to support allocating each worker to a (different) GPU. IMPLEMENTATION NOTE: The per-worker initializer arguments are implemented using a Queue. Each worker reads its arguments from this queue when it starts. When it terminates, it adds its initializer arguments back to the queue, so a future process can initialize itself using these arguments. There is one issue with this approach, however. If a worker crashes, it never repopulates the queue of initializer arguments. This will prevent any future worker from re-using those arguments. To deal with this issue we add a second 'backup queue'. This queue always contains the full set of initializer arguments: whenever a worker reads from it, it always pushes the pop'd args back to the end of the queue immediately. If the primary arg queue is ever empty, then workers will read from this backup queue. Parameters ---------- processes : int Number of workers. Default: num CPUs. initializer : function, optional Init function to call in each worker initializer_kwargs_per_process : list of dict, optional Arguments to pass to initializer function for each worker. Length of list must equal the number of workers. max_tasks_per_worker : int, optional Restart workers after this many tasks. Requires Python >=3.2. Returns ------- multiprocessing.Pool ### Response: def make_worker_pool( processes=None, initializer=None, initializer_kwargs_per_process=None, max_tasks_per_worker=None): """ Convenience wrapper to create a multiprocessing.Pool. This function adds support for per-worker initializer arguments, which are not natively supported by the multiprocessing module. The motivation for this feature is to support allocating each worker to a (different) GPU. IMPLEMENTATION NOTE: The per-worker initializer arguments are implemented using a Queue. Each worker reads its arguments from this queue when it starts. When it terminates, it adds its initializer arguments back to the queue, so a future process can initialize itself using these arguments. There is one issue with this approach, however. If a worker crashes, it never repopulates the queue of initializer arguments. This will prevent any future worker from re-using those arguments. To deal with this issue we add a second 'backup queue'. This queue always contains the full set of initializer arguments: whenever a worker reads from it, it always pushes the pop'd args back to the end of the queue immediately. If the primary arg queue is ever empty, then workers will read from this backup queue. Parameters ---------- processes : int Number of workers. Default: num CPUs. initializer : function, optional Init function to call in each worker initializer_kwargs_per_process : list of dict, optional Arguments to pass to initializer function for each worker. Length of list must equal the number of workers. max_tasks_per_worker : int, optional Restart workers after this many tasks. Requires Python >=3.2. Returns ------- multiprocessing.Pool """ if not processes: processes = cpu_count() pool_kwargs = { 'processes': processes, } if max_tasks_per_worker: pool_kwargs["maxtasksperchild"] = max_tasks_per_worker if initializer: if initializer_kwargs_per_process: assert len(initializer_kwargs_per_process) == processes kwargs_queue = Queue() kwargs_queue_backup = Queue() for kwargs in initializer_kwargs_per_process: kwargs_queue.put(kwargs) kwargs_queue_backup.put(kwargs) pool_kwargs["initializer"] = worker_init_entry_point pool_kwargs["initargs"] = ( initializer, kwargs_queue, kwargs_queue_backup) else: pool_kwargs["initializer"] = initializer worker_pool = Pool(**pool_kwargs) print("Started pool: %s" % str(worker_pool)) pprint(pool_kwargs) return worker_pool
def get_words(self): """ Get words matching the request search terms. Returns: A dict in form: {<search term>: {<matching word>: <number of times this word is found in the Storage> } // Repeated for every matching word. } // Repeated for every search term. """ return dict([(word_list.attrib['to'], dict([(word.text, word.attrib['count']) for word in word_list.findall('word')])) for word_list in self._content.findall('list')])
Get words matching the request search terms. Returns: A dict in form: {<search term>: {<matching word>: <number of times this word is found in the Storage> } // Repeated for every matching word. } // Repeated for every search term.
Below is the the instruction that describes the task: ### Input: Get words matching the request search terms. Returns: A dict in form: {<search term>: {<matching word>: <number of times this word is found in the Storage> } // Repeated for every matching word. } // Repeated for every search term. ### Response: def get_words(self): """ Get words matching the request search terms. Returns: A dict in form: {<search term>: {<matching word>: <number of times this word is found in the Storage> } // Repeated for every matching word. } // Repeated for every search term. """ return dict([(word_list.attrib['to'], dict([(word.text, word.attrib['count']) for word in word_list.findall('word')])) for word_list in self._content.findall('list')])
def send(self, s): """ Sends all the given data to the socket. Aliases: write, put, sendall, send_all """ self._print_header('======== Sending ({0}) ========'.format(len(s))) self._log_send(s) out = len(s) while s: s = s[self._send(s):] return out
Sends all the given data to the socket. Aliases: write, put, sendall, send_all
Below is the the instruction that describes the task: ### Input: Sends all the given data to the socket. Aliases: write, put, sendall, send_all ### Response: def send(self, s): """ Sends all the given data to the socket. Aliases: write, put, sendall, send_all """ self._print_header('======== Sending ({0}) ========'.format(len(s))) self._log_send(s) out = len(s) while s: s = s[self._send(s):] return out
def _harvest_validate(self, userkwargs): """Validate and Plant user provided arguments - Go through and plants the seedlings for any user arguments provided. - Validate the arguments, cleaning and adapting (valideer wise) - Extract negatives "!" arguments """ # the valideer to parse the # user arguemnts when watering parser = {} userkwargs.update(self.network_kwargs) # a simple set of original provided argument keys (used in IGNORES) original_kwargs = set(map(lambda k: k.split('_')[1] if k.find('_')>-1 else k, userkwargs.keys())) # list of columns that are required from seeds requires = [] # ------------- # Clean up Aggs # ------------- for key in userkwargs.keys(): # agg example: "avg_total", "max_tax" if key.find('_') > 0: agg, base = tuple(key.split('_')) if base in userkwargs: if type(userkwargs[base]) is not list: userkwargs[base] = [(None, userkwargs[base])] userkwargs[base].append( (agg, userkwargs.pop(key)) ) else: userkwargs[base] = [(agg, userkwargs.pop(key))] # ----------------- # Process Arguments # ----------------- for key, seed in self.arguments.iteritems(): # -------------- # Argument Alias # -------------- if seed.get('alias') and key in userkwargs: # pop the value form the user kwargs (to change the key later) value = userkwargs.pop(key) if key in userkwargs else NotImplemented # for duplicate keys oldkey = key+"" # change the key key = seed.get('alias') # change the seed seed = get(self.arguments, seed.get('alias')) # set the new key:value if value is not NotImplemented: if key in userkwargs: raise valideer.ValidationError("Argument alias already specified for `%s` via `%s`" % (oldkey, key), oldkey) userkwargs[key] = value # can provide multiple arguments if key.endswith('[]'): multi = True key = key[:-2] else: multi = False # get value(s) from user if key in userkwargs: value = userkwargs.pop(key) elif seed.get('copy'): value = userkwargs.get(seed.get('copy')) else: value = seed.get('default') # no argument provided, lets continue) if value is None or value == []: if seed.get('required'): raise valideer.ValidationError("missing required property: %s" % key, key) else: continue # add requires requires.extend(array(get(seed, 'requires', []))) # ----------- # Inheritance # ----------- # not permited from arguements yet. would need to happen above the ""PROCESS ARGUMENT"" block # self._inherit(*array(get(seed, 'inherit', []))) if type(value) is list and type(value[0]) is tuple: # complex for v in value: ud, pd = self._harvest_args(key, seed, v, multi) userkwargs.update(ud) parser.update(pd) else: ud, pd = self._harvest_args(key, seed, value, multi) userkwargs.update(ud) parser.update(pd) # ------------ # Ignored Keys # ------------ for seed in self.seeds: ignores = set(array(get(seed, 'ignore'))) if ignores: if ignores & original_kwargs: if not get(seed, 'silent'): additionals = ignores & original_kwargs raise valideer.ValidationError("additional properties: %s" % ",".join(additionals), additionals) [userkwargs.pop(key) for key in ignores if key in userkwargs] # ------------------------- # Custom Operators (part 1) # ------------------------- operators = {} for key, value in userkwargs.items(): rk = key agg = None if key.find('_')>-1: agg, rk = tuple(key.split('_')) seed = self.arguments.get(rk, self.arguments.get(rk+'[]')) if seed: if type(value) is list: operators[key] = [] # need to remove the operator for validating new_values = [] for v in value: operator, v = self._operator(v, *seed.get('column', "").rsplit("::", 1)) new_values.append(v) operators[key].append((agg, operator) if agg else operator) userkwargs[key] = new_values else: operator, value = self._operator(value, *seed.get('column', "").rsplit("::", 1)) operators[key] = (agg, operator) if agg else operator userkwargs[key] = value # ----------------- # Plant Sort Method # ----------------- if 'sortby' in userkwargs: seed = self.arguments.get(userkwargs['sortby'].lower(), self.arguments.get(userkwargs['sortby'].lower()+'[]')) if seed: seed['id'] = str(userkwargs['sortby'].lower()) for r in set(requires): if userkwargs.get(r) is None: raise valideer.ValidationError("required property not set: %s" % r, r) # -------- # Validate # -------- parser = valideer.parse(parser, additional_properties=False) validated = parser.validate(userkwargs, adapt=self.navigator.adapter()) validated.update(self.network_kwargs) # operators validated # --------------------------- | -------------------------------- # { { # "type": ["!", "!"], "type": ['a', 'b'], # "total": "<", "total": "50", # "tax": ("avg, ">"), "tax": "1", # "time": None "time": "2014" # } } return operators, validated
Validate and Plant user provided arguments - Go through and plants the seedlings for any user arguments provided. - Validate the arguments, cleaning and adapting (valideer wise) - Extract negatives "!" arguments
Below is the the instruction that describes the task: ### Input: Validate and Plant user provided arguments - Go through and plants the seedlings for any user arguments provided. - Validate the arguments, cleaning and adapting (valideer wise) - Extract negatives "!" arguments ### Response: def _harvest_validate(self, userkwargs): """Validate and Plant user provided arguments - Go through and plants the seedlings for any user arguments provided. - Validate the arguments, cleaning and adapting (valideer wise) - Extract negatives "!" arguments """ # the valideer to parse the # user arguemnts when watering parser = {} userkwargs.update(self.network_kwargs) # a simple set of original provided argument keys (used in IGNORES) original_kwargs = set(map(lambda k: k.split('_')[1] if k.find('_')>-1 else k, userkwargs.keys())) # list of columns that are required from seeds requires = [] # ------------- # Clean up Aggs # ------------- for key in userkwargs.keys(): # agg example: "avg_total", "max_tax" if key.find('_') > 0: agg, base = tuple(key.split('_')) if base in userkwargs: if type(userkwargs[base]) is not list: userkwargs[base] = [(None, userkwargs[base])] userkwargs[base].append( (agg, userkwargs.pop(key)) ) else: userkwargs[base] = [(agg, userkwargs.pop(key))] # ----------------- # Process Arguments # ----------------- for key, seed in self.arguments.iteritems(): # -------------- # Argument Alias # -------------- if seed.get('alias') and key in userkwargs: # pop the value form the user kwargs (to change the key later) value = userkwargs.pop(key) if key in userkwargs else NotImplemented # for duplicate keys oldkey = key+"" # change the key key = seed.get('alias') # change the seed seed = get(self.arguments, seed.get('alias')) # set the new key:value if value is not NotImplemented: if key in userkwargs: raise valideer.ValidationError("Argument alias already specified for `%s` via `%s`" % (oldkey, key), oldkey) userkwargs[key] = value # can provide multiple arguments if key.endswith('[]'): multi = True key = key[:-2] else: multi = False # get value(s) from user if key in userkwargs: value = userkwargs.pop(key) elif seed.get('copy'): value = userkwargs.get(seed.get('copy')) else: value = seed.get('default') # no argument provided, lets continue) if value is None or value == []: if seed.get('required'): raise valideer.ValidationError("missing required property: %s" % key, key) else: continue # add requires requires.extend(array(get(seed, 'requires', []))) # ----------- # Inheritance # ----------- # not permited from arguements yet. would need to happen above the ""PROCESS ARGUMENT"" block # self._inherit(*array(get(seed, 'inherit', []))) if type(value) is list and type(value[0]) is tuple: # complex for v in value: ud, pd = self._harvest_args(key, seed, v, multi) userkwargs.update(ud) parser.update(pd) else: ud, pd = self._harvest_args(key, seed, value, multi) userkwargs.update(ud) parser.update(pd) # ------------ # Ignored Keys # ------------ for seed in self.seeds: ignores = set(array(get(seed, 'ignore'))) if ignores: if ignores & original_kwargs: if not get(seed, 'silent'): additionals = ignores & original_kwargs raise valideer.ValidationError("additional properties: %s" % ",".join(additionals), additionals) [userkwargs.pop(key) for key in ignores if key in userkwargs] # ------------------------- # Custom Operators (part 1) # ------------------------- operators = {} for key, value in userkwargs.items(): rk = key agg = None if key.find('_')>-1: agg, rk = tuple(key.split('_')) seed = self.arguments.get(rk, self.arguments.get(rk+'[]')) if seed: if type(value) is list: operators[key] = [] # need to remove the operator for validating new_values = [] for v in value: operator, v = self._operator(v, *seed.get('column', "").rsplit("::", 1)) new_values.append(v) operators[key].append((agg, operator) if agg else operator) userkwargs[key] = new_values else: operator, value = self._operator(value, *seed.get('column', "").rsplit("::", 1)) operators[key] = (agg, operator) if agg else operator userkwargs[key] = value # ----------------- # Plant Sort Method # ----------------- if 'sortby' in userkwargs: seed = self.arguments.get(userkwargs['sortby'].lower(), self.arguments.get(userkwargs['sortby'].lower()+'[]')) if seed: seed['id'] = str(userkwargs['sortby'].lower()) for r in set(requires): if userkwargs.get(r) is None: raise valideer.ValidationError("required property not set: %s" % r, r) # -------- # Validate # -------- parser = valideer.parse(parser, additional_properties=False) validated = parser.validate(userkwargs, adapt=self.navigator.adapter()) validated.update(self.network_kwargs) # operators validated # --------------------------- | -------------------------------- # { { # "type": ["!", "!"], "type": ['a', 'b'], # "total": "<", "total": "50", # "tax": ("avg, ">"), "tax": "1", # "time": None "time": "2014" # } } return operators, validated
def get_grad_zmat(self, construction_table, as_function=True): r"""Return the gradient for the transformation to a Zmatrix. If ``as_function`` is True, a function is returned that can be directly applied onto instances of :class:`~Cartesian`, which contain the applied distortions in cartesian space. In this case the user does not have to worry about indexing and correct application of the tensor product. Basically this is the function :func:`xyz_functions.apply_grad_zmat_tensor` with partially replaced arguments. If ``as_function`` is False, a ``(3, n, n, 3)`` tensor is returned, which contains the values of the derivatives. Since a ``n * 3`` matrix is deriven after a ``n * 3`` matrix, it is important to specify the used rules for indexing the resulting tensor. The rule is very simple: The indices of the numerator are used first then the indices of the denominator get swapped and appended: .. math:: \left( \frac{\partial \mathbf{Y}}{\partial \mathbf{X}} \right)_{i, j, k, l} = \frac{\partial \mathbf{Y}_{i, j}}{\partial \mathbf{X}_{l, k}} Applying this rule to an example function: .. math:: f \colon \mathbb{R}^3 \rightarrow \mathbb{R} Gives as derivative the known row-vector gradient: .. math:: (\nabla f)_{1, i} = \frac{\partial f}{\partial x_i} \qquad i \in \{1, 2, 3\} .. note:: The row wise alignment of the XYZ files makes sense for these CSV like files. But it is mathematically advantageous and sometimes (depending on the memory layout) numerically better to use a column wise alignment of the coordinates. In this function the resulting tensor assumes a ``3 * n`` array for the coordinates. If .. math:: \mathbf{X}_{i, j} &\qquad 1 \leq i \leq 3, \quad 1 \leq j \leq n \\ \mathbf{C}_{i, j} &\qquad 1 \leq i \leq 3, \quad 1 \leq j \leq n denote the positions in cartesian and Zmatrix space, The complete tensor may be written as: .. math:: \left( \frac{\partial \mathbf{C}}{\partial \mathbf{X}} \right)_{i, j, k, l} = \frac{\partial \mathbf{C}_{i, j}}{\partial \mathbf{X}_{l, k}} Args: construction_table (pandas.DataFrame): as_function (bool): Return a tensor or :func:`xyz_functions.apply_grad_zmat_tensor` with partially replaced arguments. Returns: (func, np.array): Depending on ``as_function`` return a tensor or :func:`~chemcoord.xyz_functions.apply_grad_zmat_tensor` with partially replaced arguments. """ if (construction_table.index != self.index).any(): message = "construction_table and self must use the same index" raise ValueError(message) c_table = construction_table.loc[:, ['b', 'a', 'd']] c_table = c_table.replace(constants.int_label) c_table = c_table.replace({k: v for v, k in enumerate(c_table.index)}) c_table = c_table.values.T X = self.loc[:, ['x', 'y', 'z']].values.T if X.dtype == np.dtype('i8'): X = X.astype('f8') err, row, grad_C = transformation.get_grad_C(X, c_table) if err == ERR_CODE_InvalidReference: rename = dict(enumerate(self.index)) i = rename[row] b, a, d = construction_table.loc[i, ['b', 'a', 'd']] raise InvalidReference(i=i, b=b, a=a, d=d) if as_function: return partial(xyz_functions.apply_grad_zmat_tensor, grad_C, construction_table) else: return grad_C
r"""Return the gradient for the transformation to a Zmatrix. If ``as_function`` is True, a function is returned that can be directly applied onto instances of :class:`~Cartesian`, which contain the applied distortions in cartesian space. In this case the user does not have to worry about indexing and correct application of the tensor product. Basically this is the function :func:`xyz_functions.apply_grad_zmat_tensor` with partially replaced arguments. If ``as_function`` is False, a ``(3, n, n, 3)`` tensor is returned, which contains the values of the derivatives. Since a ``n * 3`` matrix is deriven after a ``n * 3`` matrix, it is important to specify the used rules for indexing the resulting tensor. The rule is very simple: The indices of the numerator are used first then the indices of the denominator get swapped and appended: .. math:: \left( \frac{\partial \mathbf{Y}}{\partial \mathbf{X}} \right)_{i, j, k, l} = \frac{\partial \mathbf{Y}_{i, j}}{\partial \mathbf{X}_{l, k}} Applying this rule to an example function: .. math:: f \colon \mathbb{R}^3 \rightarrow \mathbb{R} Gives as derivative the known row-vector gradient: .. math:: (\nabla f)_{1, i} = \frac{\partial f}{\partial x_i} \qquad i \in \{1, 2, 3\} .. note:: The row wise alignment of the XYZ files makes sense for these CSV like files. But it is mathematically advantageous and sometimes (depending on the memory layout) numerically better to use a column wise alignment of the coordinates. In this function the resulting tensor assumes a ``3 * n`` array for the coordinates. If .. math:: \mathbf{X}_{i, j} &\qquad 1 \leq i \leq 3, \quad 1 \leq j \leq n \\ \mathbf{C}_{i, j} &\qquad 1 \leq i \leq 3, \quad 1 \leq j \leq n denote the positions in cartesian and Zmatrix space, The complete tensor may be written as: .. math:: \left( \frac{\partial \mathbf{C}}{\partial \mathbf{X}} \right)_{i, j, k, l} = \frac{\partial \mathbf{C}_{i, j}}{\partial \mathbf{X}_{l, k}} Args: construction_table (pandas.DataFrame): as_function (bool): Return a tensor or :func:`xyz_functions.apply_grad_zmat_tensor` with partially replaced arguments. Returns: (func, np.array): Depending on ``as_function`` return a tensor or :func:`~chemcoord.xyz_functions.apply_grad_zmat_tensor` with partially replaced arguments.
Below is the the instruction that describes the task: ### Input: r"""Return the gradient for the transformation to a Zmatrix. If ``as_function`` is True, a function is returned that can be directly applied onto instances of :class:`~Cartesian`, which contain the applied distortions in cartesian space. In this case the user does not have to worry about indexing and correct application of the tensor product. Basically this is the function :func:`xyz_functions.apply_grad_zmat_tensor` with partially replaced arguments. If ``as_function`` is False, a ``(3, n, n, 3)`` tensor is returned, which contains the values of the derivatives. Since a ``n * 3`` matrix is deriven after a ``n * 3`` matrix, it is important to specify the used rules for indexing the resulting tensor. The rule is very simple: The indices of the numerator are used first then the indices of the denominator get swapped and appended: .. math:: \left( \frac{\partial \mathbf{Y}}{\partial \mathbf{X}} \right)_{i, j, k, l} = \frac{\partial \mathbf{Y}_{i, j}}{\partial \mathbf{X}_{l, k}} Applying this rule to an example function: .. math:: f \colon \mathbb{R}^3 \rightarrow \mathbb{R} Gives as derivative the known row-vector gradient: .. math:: (\nabla f)_{1, i} = \frac{\partial f}{\partial x_i} \qquad i \in \{1, 2, 3\} .. note:: The row wise alignment of the XYZ files makes sense for these CSV like files. But it is mathematically advantageous and sometimes (depending on the memory layout) numerically better to use a column wise alignment of the coordinates. In this function the resulting tensor assumes a ``3 * n`` array for the coordinates. If .. math:: \mathbf{X}_{i, j} &\qquad 1 \leq i \leq 3, \quad 1 \leq j \leq n \\ \mathbf{C}_{i, j} &\qquad 1 \leq i \leq 3, \quad 1 \leq j \leq n denote the positions in cartesian and Zmatrix space, The complete tensor may be written as: .. math:: \left( \frac{\partial \mathbf{C}}{\partial \mathbf{X}} \right)_{i, j, k, l} = \frac{\partial \mathbf{C}_{i, j}}{\partial \mathbf{X}_{l, k}} Args: construction_table (pandas.DataFrame): as_function (bool): Return a tensor or :func:`xyz_functions.apply_grad_zmat_tensor` with partially replaced arguments. Returns: (func, np.array): Depending on ``as_function`` return a tensor or :func:`~chemcoord.xyz_functions.apply_grad_zmat_tensor` with partially replaced arguments. ### Response: def get_grad_zmat(self, construction_table, as_function=True): r"""Return the gradient for the transformation to a Zmatrix. If ``as_function`` is True, a function is returned that can be directly applied onto instances of :class:`~Cartesian`, which contain the applied distortions in cartesian space. In this case the user does not have to worry about indexing and correct application of the tensor product. Basically this is the function :func:`xyz_functions.apply_grad_zmat_tensor` with partially replaced arguments. If ``as_function`` is False, a ``(3, n, n, 3)`` tensor is returned, which contains the values of the derivatives. Since a ``n * 3`` matrix is deriven after a ``n * 3`` matrix, it is important to specify the used rules for indexing the resulting tensor. The rule is very simple: The indices of the numerator are used first then the indices of the denominator get swapped and appended: .. math:: \left( \frac{\partial \mathbf{Y}}{\partial \mathbf{X}} \right)_{i, j, k, l} = \frac{\partial \mathbf{Y}_{i, j}}{\partial \mathbf{X}_{l, k}} Applying this rule to an example function: .. math:: f \colon \mathbb{R}^3 \rightarrow \mathbb{R} Gives as derivative the known row-vector gradient: .. math:: (\nabla f)_{1, i} = \frac{\partial f}{\partial x_i} \qquad i \in \{1, 2, 3\} .. note:: The row wise alignment of the XYZ files makes sense for these CSV like files. But it is mathematically advantageous and sometimes (depending on the memory layout) numerically better to use a column wise alignment of the coordinates. In this function the resulting tensor assumes a ``3 * n`` array for the coordinates. If .. math:: \mathbf{X}_{i, j} &\qquad 1 \leq i \leq 3, \quad 1 \leq j \leq n \\ \mathbf{C}_{i, j} &\qquad 1 \leq i \leq 3, \quad 1 \leq j \leq n denote the positions in cartesian and Zmatrix space, The complete tensor may be written as: .. math:: \left( \frac{\partial \mathbf{C}}{\partial \mathbf{X}} \right)_{i, j, k, l} = \frac{\partial \mathbf{C}_{i, j}}{\partial \mathbf{X}_{l, k}} Args: construction_table (pandas.DataFrame): as_function (bool): Return a tensor or :func:`xyz_functions.apply_grad_zmat_tensor` with partially replaced arguments. Returns: (func, np.array): Depending on ``as_function`` return a tensor or :func:`~chemcoord.xyz_functions.apply_grad_zmat_tensor` with partially replaced arguments. """ if (construction_table.index != self.index).any(): message = "construction_table and self must use the same index" raise ValueError(message) c_table = construction_table.loc[:, ['b', 'a', 'd']] c_table = c_table.replace(constants.int_label) c_table = c_table.replace({k: v for v, k in enumerate(c_table.index)}) c_table = c_table.values.T X = self.loc[:, ['x', 'y', 'z']].values.T if X.dtype == np.dtype('i8'): X = X.astype('f8') err, row, grad_C = transformation.get_grad_C(X, c_table) if err == ERR_CODE_InvalidReference: rename = dict(enumerate(self.index)) i = rename[row] b, a, d = construction_table.loc[i, ['b', 'a', 'd']] raise InvalidReference(i=i, b=b, a=a, d=d) if as_function: return partial(xyz_functions.apply_grad_zmat_tensor, grad_C, construction_table) else: return grad_C
def get_name(key): """Given a keyword, try to get the name of it. .. versionadded:: 4.2 Definition dicts are defined in keywords.py. We try to return the name if present, otherwise we return none. keyword = 'layer_purpose' kio = safe.utilities.keyword_io.Keyword_IO() name = kio.get_name(keyword) print name :param key: A keyword key. :type key: str :returns: The name of the keyword :rtype: str """ definition_dict = definition(key) if definition_dict: return definition_dict.get('name', key) # Else, return the keyword return key
Given a keyword, try to get the name of it. .. versionadded:: 4.2 Definition dicts are defined in keywords.py. We try to return the name if present, otherwise we return none. keyword = 'layer_purpose' kio = safe.utilities.keyword_io.Keyword_IO() name = kio.get_name(keyword) print name :param key: A keyword key. :type key: str :returns: The name of the keyword :rtype: str
Below is the the instruction that describes the task: ### Input: Given a keyword, try to get the name of it. .. versionadded:: 4.2 Definition dicts are defined in keywords.py. We try to return the name if present, otherwise we return none. keyword = 'layer_purpose' kio = safe.utilities.keyword_io.Keyword_IO() name = kio.get_name(keyword) print name :param key: A keyword key. :type key: str :returns: The name of the keyword :rtype: str ### Response: def get_name(key): """Given a keyword, try to get the name of it. .. versionadded:: 4.2 Definition dicts are defined in keywords.py. We try to return the name if present, otherwise we return none. keyword = 'layer_purpose' kio = safe.utilities.keyword_io.Keyword_IO() name = kio.get_name(keyword) print name :param key: A keyword key. :type key: str :returns: The name of the keyword :rtype: str """ definition_dict = definition(key) if definition_dict: return definition_dict.get('name', key) # Else, return the keyword return key
def _names(lexer): """Return a tuple of names.""" first = _expect_token(lexer, {NameToken}).value rest = _zom_name(lexer) rnames = (first, ) + rest return rnames[::-1]
Return a tuple of names.
Below is the the instruction that describes the task: ### Input: Return a tuple of names. ### Response: def _names(lexer): """Return a tuple of names.""" first = _expect_token(lexer, {NameToken}).value rest = _zom_name(lexer) rnames = (first, ) + rest return rnames[::-1]
def waveform_image(mediafile, xy_size, outdir=None, center_color=None, outer_color=None, bg_color=None): """ Create waveform image from audio data. Return path to created image file. """ try: import waveform except ImportError, exc: raise ImportError("%s [get it at https://github.com/superjoe30/PyWaveform]" % exc) outdir = outdir or os.path.dirname(mediafile) outfile = os.path.join(outdir, os.path.splitext(os.path.basename(mediafile))[0] + ".png") with transcode.to_wav(mediafile) as wavfile: # Draw using a gradient waveform.draw(wavfile, outfile, xy_size, bgColor=bg_color or WAVE_BG_COLOR, fgGradientCenter=center_color or WAVE_CENTER_COLOR, fgGradientOuter=outer_color or WAVE_OUTER_COLOR) return outfile
Create waveform image from audio data. Return path to created image file.
Below is the the instruction that describes the task: ### Input: Create waveform image from audio data. Return path to created image file. ### Response: def waveform_image(mediafile, xy_size, outdir=None, center_color=None, outer_color=None, bg_color=None): """ Create waveform image from audio data. Return path to created image file. """ try: import waveform except ImportError, exc: raise ImportError("%s [get it at https://github.com/superjoe30/PyWaveform]" % exc) outdir = outdir or os.path.dirname(mediafile) outfile = os.path.join(outdir, os.path.splitext(os.path.basename(mediafile))[0] + ".png") with transcode.to_wav(mediafile) as wavfile: # Draw using a gradient waveform.draw(wavfile, outfile, xy_size, bgColor=bg_color or WAVE_BG_COLOR, fgGradientCenter=center_color or WAVE_CENTER_COLOR, fgGradientOuter=outer_color or WAVE_OUTER_COLOR) return outfile
def update_processcount(self, plist): """Update the global process count from the current processes list""" # Update the maximum process ID (pid) number self.processcount['pid_max'] = self.pid_max # For each key in the processcount dict # count the number of processes with the same status for k in iterkeys(self.processcount): self.processcount[k] = len(list(filter(lambda v: v['status'] is k, plist))) # Compute thread self.processcount['thread'] = sum(i['num_threads'] for i in plist if i['num_threads'] is not None) # Compute total self.processcount['total'] = len(plist)
Update the global process count from the current processes list
Below is the the instruction that describes the task: ### Input: Update the global process count from the current processes list ### Response: def update_processcount(self, plist): """Update the global process count from the current processes list""" # Update the maximum process ID (pid) number self.processcount['pid_max'] = self.pid_max # For each key in the processcount dict # count the number of processes with the same status for k in iterkeys(self.processcount): self.processcount[k] = len(list(filter(lambda v: v['status'] is k, plist))) # Compute thread self.processcount['thread'] = sum(i['num_threads'] for i in plist if i['num_threads'] is not None) # Compute total self.processcount['total'] = len(plist)
def UpdateLease(self, duration): """Updates the lease and flushes the object. The lease is set to expire after the "duration" time from the present moment. This method is supposed to be used when operation that requires locking may run for a time that exceeds the lease time specified in OpenWithLock(). See flows/hunts locking for an example. Args: duration: Integer number of seconds. Lease expiry time will be set to "time.time() + duration". Raises: LockError: if the object is not currently locked or the lease has expired. """ if not self.locked: raise LockError("Object must be locked to update the lease: %s." % self.urn) if self.CheckLease() == 0: self._RaiseLockError("UpdateLease") self.transaction.UpdateLease(duration)
Updates the lease and flushes the object. The lease is set to expire after the "duration" time from the present moment. This method is supposed to be used when operation that requires locking may run for a time that exceeds the lease time specified in OpenWithLock(). See flows/hunts locking for an example. Args: duration: Integer number of seconds. Lease expiry time will be set to "time.time() + duration". Raises: LockError: if the object is not currently locked or the lease has expired.
Below is the the instruction that describes the task: ### Input: Updates the lease and flushes the object. The lease is set to expire after the "duration" time from the present moment. This method is supposed to be used when operation that requires locking may run for a time that exceeds the lease time specified in OpenWithLock(). See flows/hunts locking for an example. Args: duration: Integer number of seconds. Lease expiry time will be set to "time.time() + duration". Raises: LockError: if the object is not currently locked or the lease has expired. ### Response: def UpdateLease(self, duration): """Updates the lease and flushes the object. The lease is set to expire after the "duration" time from the present moment. This method is supposed to be used when operation that requires locking may run for a time that exceeds the lease time specified in OpenWithLock(). See flows/hunts locking for an example. Args: duration: Integer number of seconds. Lease expiry time will be set to "time.time() + duration". Raises: LockError: if the object is not currently locked or the lease has expired. """ if not self.locked: raise LockError("Object must be locked to update the lease: %s." % self.urn) if self.CheckLease() == 0: self._RaiseLockError("UpdateLease") self.transaction.UpdateLease(duration)
def prepare_args(model_matrix, response, model_coefficients, predicted_linear_response, offset, name=None): """Helper to `fit` which sanitizes input args. Args: model_matrix: (Batch of) `float`-like, matrix-shaped `Tensor` where each row represents a sample's features. response: (Batch of) vector-shaped `Tensor` where each element represents a sample's observed response (to the corresponding row of features). Must have same `dtype` as `model_matrix`. model_coefficients: Optional (batch of) vector-shaped `Tensor` representing the model coefficients, one for each column in `model_matrix`. Must have same `dtype` as `model_matrix`. Default value: `tf.zeros(tf.shape(model_matrix)[-1], model_matrix.dtype)`. predicted_linear_response: Optional `Tensor` with `shape`, `dtype` matching `response`; represents `offset` shifted initial linear predictions based on current `model_coefficients`. Default value: `offset` if `model_coefficients is None`, and `tf.linalg.matvec(model_matrix, model_coefficients_start) + offset` otherwise. offset: Optional `Tensor` with `shape`, `dtype` matching `response`; represents constant shift applied to `predicted_linear_response`. Default value: `None` (i.e., `tf.zeros_like(response)`). name: Python `str` used as name prefix to ops created by this function. Default value: `"prepare_args"`. Returns: model_matrix: A `Tensor` with `shape`, `dtype` and values of the `model_matrix` argument. response: A `Tensor` with `shape`, `dtype` and values of the `response` argument. model_coefficients_start: A `Tensor` with `shape`, `dtype` and values of the `model_coefficients_start` argument if specified. A (batch of) vector-shaped `Tensors` with `dtype` matching `model_matrix` containing the default starting point otherwise. predicted_linear_response: A `Tensor` with `shape`, `dtype` and values of the `predicted_linear_response` argument if specified. A `Tensor` with `shape`, `dtype` matching `response` containing the default value otherwise. offset: A `Tensor` with `shape`, `dtype` and values of the `offset` argument if specified or `None` otherwise. """ graph_deps = [model_matrix, response, model_coefficients, predicted_linear_response, offset] with tf.compat.v1.name_scope(name, 'prepare_args', graph_deps): dtype = dtype_util.common_dtype(graph_deps, np.float32) model_matrix = tf.convert_to_tensor( value=model_matrix, dtype=dtype, name='model_matrix') if offset is not None: offset = tf.convert_to_tensor(value=offset, dtype=dtype, name='offset') response = tf.convert_to_tensor( value=response, dtype=dtype, name='response') use_default_model_coefficients = model_coefficients is None if use_default_model_coefficients: # User did not supply model coefficients; assume they're all zero. batch_shape = tf.shape(input=model_matrix)[:-2] num_columns = tf.shape(input=model_matrix)[-1] model_coefficients = tf.zeros( shape=tf.concat([batch_shape, [num_columns]], axis=0), dtype=dtype, name='model_coefficients') else: # User did supply model coefficients; convert to Tensor in case it's # numpy or literal. model_coefficients = tf.convert_to_tensor( value=model_coefficients, dtype=dtype, name='model_coefficients') if predicted_linear_response is None: if use_default_model_coefficients: # Since we're using zeros for model_coefficients, we know the predicted # linear response will also be all zeros. if offset is None: predicted_linear_response = tf.zeros_like( response, dtype, name='predicted_linear_response') else: predicted_linear_response = tf.broadcast_to( offset, tf.shape(input=response), name='predicted_linear_response') else: # We were given model_coefficients but not the predicted linear # response. predicted_linear_response = calculate_linear_predictor( model_matrix, model_coefficients, offset) else: predicted_linear_response = tf.convert_to_tensor( value=predicted_linear_response, dtype=dtype, name='predicted_linear_response') return [ model_matrix, response, model_coefficients, predicted_linear_response, offset, ]
Helper to `fit` which sanitizes input args. Args: model_matrix: (Batch of) `float`-like, matrix-shaped `Tensor` where each row represents a sample's features. response: (Batch of) vector-shaped `Tensor` where each element represents a sample's observed response (to the corresponding row of features). Must have same `dtype` as `model_matrix`. model_coefficients: Optional (batch of) vector-shaped `Tensor` representing the model coefficients, one for each column in `model_matrix`. Must have same `dtype` as `model_matrix`. Default value: `tf.zeros(tf.shape(model_matrix)[-1], model_matrix.dtype)`. predicted_linear_response: Optional `Tensor` with `shape`, `dtype` matching `response`; represents `offset` shifted initial linear predictions based on current `model_coefficients`. Default value: `offset` if `model_coefficients is None`, and `tf.linalg.matvec(model_matrix, model_coefficients_start) + offset` otherwise. offset: Optional `Tensor` with `shape`, `dtype` matching `response`; represents constant shift applied to `predicted_linear_response`. Default value: `None` (i.e., `tf.zeros_like(response)`). name: Python `str` used as name prefix to ops created by this function. Default value: `"prepare_args"`. Returns: model_matrix: A `Tensor` with `shape`, `dtype` and values of the `model_matrix` argument. response: A `Tensor` with `shape`, `dtype` and values of the `response` argument. model_coefficients_start: A `Tensor` with `shape`, `dtype` and values of the `model_coefficients_start` argument if specified. A (batch of) vector-shaped `Tensors` with `dtype` matching `model_matrix` containing the default starting point otherwise. predicted_linear_response: A `Tensor` with `shape`, `dtype` and values of the `predicted_linear_response` argument if specified. A `Tensor` with `shape`, `dtype` matching `response` containing the default value otherwise. offset: A `Tensor` with `shape`, `dtype` and values of the `offset` argument if specified or `None` otherwise.
Below is the the instruction that describes the task: ### Input: Helper to `fit` which sanitizes input args. Args: model_matrix: (Batch of) `float`-like, matrix-shaped `Tensor` where each row represents a sample's features. response: (Batch of) vector-shaped `Tensor` where each element represents a sample's observed response (to the corresponding row of features). Must have same `dtype` as `model_matrix`. model_coefficients: Optional (batch of) vector-shaped `Tensor` representing the model coefficients, one for each column in `model_matrix`. Must have same `dtype` as `model_matrix`. Default value: `tf.zeros(tf.shape(model_matrix)[-1], model_matrix.dtype)`. predicted_linear_response: Optional `Tensor` with `shape`, `dtype` matching `response`; represents `offset` shifted initial linear predictions based on current `model_coefficients`. Default value: `offset` if `model_coefficients is None`, and `tf.linalg.matvec(model_matrix, model_coefficients_start) + offset` otherwise. offset: Optional `Tensor` with `shape`, `dtype` matching `response`; represents constant shift applied to `predicted_linear_response`. Default value: `None` (i.e., `tf.zeros_like(response)`). name: Python `str` used as name prefix to ops created by this function. Default value: `"prepare_args"`. Returns: model_matrix: A `Tensor` with `shape`, `dtype` and values of the `model_matrix` argument. response: A `Tensor` with `shape`, `dtype` and values of the `response` argument. model_coefficients_start: A `Tensor` with `shape`, `dtype` and values of the `model_coefficients_start` argument if specified. A (batch of) vector-shaped `Tensors` with `dtype` matching `model_matrix` containing the default starting point otherwise. predicted_linear_response: A `Tensor` with `shape`, `dtype` and values of the `predicted_linear_response` argument if specified. A `Tensor` with `shape`, `dtype` matching `response` containing the default value otherwise. offset: A `Tensor` with `shape`, `dtype` and values of the `offset` argument if specified or `None` otherwise. ### Response: def prepare_args(model_matrix, response, model_coefficients, predicted_linear_response, offset, name=None): """Helper to `fit` which sanitizes input args. Args: model_matrix: (Batch of) `float`-like, matrix-shaped `Tensor` where each row represents a sample's features. response: (Batch of) vector-shaped `Tensor` where each element represents a sample's observed response (to the corresponding row of features). Must have same `dtype` as `model_matrix`. model_coefficients: Optional (batch of) vector-shaped `Tensor` representing the model coefficients, one for each column in `model_matrix`. Must have same `dtype` as `model_matrix`. Default value: `tf.zeros(tf.shape(model_matrix)[-1], model_matrix.dtype)`. predicted_linear_response: Optional `Tensor` with `shape`, `dtype` matching `response`; represents `offset` shifted initial linear predictions based on current `model_coefficients`. Default value: `offset` if `model_coefficients is None`, and `tf.linalg.matvec(model_matrix, model_coefficients_start) + offset` otherwise. offset: Optional `Tensor` with `shape`, `dtype` matching `response`; represents constant shift applied to `predicted_linear_response`. Default value: `None` (i.e., `tf.zeros_like(response)`). name: Python `str` used as name prefix to ops created by this function. Default value: `"prepare_args"`. Returns: model_matrix: A `Tensor` with `shape`, `dtype` and values of the `model_matrix` argument. response: A `Tensor` with `shape`, `dtype` and values of the `response` argument. model_coefficients_start: A `Tensor` with `shape`, `dtype` and values of the `model_coefficients_start` argument if specified. A (batch of) vector-shaped `Tensors` with `dtype` matching `model_matrix` containing the default starting point otherwise. predicted_linear_response: A `Tensor` with `shape`, `dtype` and values of the `predicted_linear_response` argument if specified. A `Tensor` with `shape`, `dtype` matching `response` containing the default value otherwise. offset: A `Tensor` with `shape`, `dtype` and values of the `offset` argument if specified or `None` otherwise. """ graph_deps = [model_matrix, response, model_coefficients, predicted_linear_response, offset] with tf.compat.v1.name_scope(name, 'prepare_args', graph_deps): dtype = dtype_util.common_dtype(graph_deps, np.float32) model_matrix = tf.convert_to_tensor( value=model_matrix, dtype=dtype, name='model_matrix') if offset is not None: offset = tf.convert_to_tensor(value=offset, dtype=dtype, name='offset') response = tf.convert_to_tensor( value=response, dtype=dtype, name='response') use_default_model_coefficients = model_coefficients is None if use_default_model_coefficients: # User did not supply model coefficients; assume they're all zero. batch_shape = tf.shape(input=model_matrix)[:-2] num_columns = tf.shape(input=model_matrix)[-1] model_coefficients = tf.zeros( shape=tf.concat([batch_shape, [num_columns]], axis=0), dtype=dtype, name='model_coefficients') else: # User did supply model coefficients; convert to Tensor in case it's # numpy or literal. model_coefficients = tf.convert_to_tensor( value=model_coefficients, dtype=dtype, name='model_coefficients') if predicted_linear_response is None: if use_default_model_coefficients: # Since we're using zeros for model_coefficients, we know the predicted # linear response will also be all zeros. if offset is None: predicted_linear_response = tf.zeros_like( response, dtype, name='predicted_linear_response') else: predicted_linear_response = tf.broadcast_to( offset, tf.shape(input=response), name='predicted_linear_response') else: # We were given model_coefficients but not the predicted linear # response. predicted_linear_response = calculate_linear_predictor( model_matrix, model_coefficients, offset) else: predicted_linear_response = tf.convert_to_tensor( value=predicted_linear_response, dtype=dtype, name='predicted_linear_response') return [ model_matrix, response, model_coefficients, predicted_linear_response, offset, ]
def parallel_prep_region(samples, run_parallel): """Perform full pre-variant calling BAM prep work on regions. """ file_key = "work_bam" split_fn = _split_by_regions("bamprep", "-prep.bam", file_key) # identify samples that do not need preparation -- no recalibration or realignment extras = [] torun = [] for data in [x[0] for x in samples]: if data.get("work_bam"): data["align_bam"] = data["work_bam"] if (not dd.get_realign(data) and not dd.get_variantcaller(data)): extras.append([data]) elif not data.get(file_key): extras.append([data]) else: # Do not want to re-run duplicate marking after realignment data["config"]["algorithm"]["orig_markduplicates"] = dd.get_mark_duplicates(data) data = dd.set_mark_duplicates(data, False) torun.append([data]) return extras + parallel_split_combine(torun, split_fn, run_parallel, "piped_bamprep", _add_combine_info, file_key, ["config"])
Perform full pre-variant calling BAM prep work on regions.
Below is the the instruction that describes the task: ### Input: Perform full pre-variant calling BAM prep work on regions. ### Response: def parallel_prep_region(samples, run_parallel): """Perform full pre-variant calling BAM prep work on regions. """ file_key = "work_bam" split_fn = _split_by_regions("bamprep", "-prep.bam", file_key) # identify samples that do not need preparation -- no recalibration or realignment extras = [] torun = [] for data in [x[0] for x in samples]: if data.get("work_bam"): data["align_bam"] = data["work_bam"] if (not dd.get_realign(data) and not dd.get_variantcaller(data)): extras.append([data]) elif not data.get(file_key): extras.append([data]) else: # Do not want to re-run duplicate marking after realignment data["config"]["algorithm"]["orig_markduplicates"] = dd.get_mark_duplicates(data) data = dd.set_mark_duplicates(data, False) torun.append([data]) return extras + parallel_split_combine(torun, split_fn, run_parallel, "piped_bamprep", _add_combine_info, file_key, ["config"])
def manage_subscription(): """Shows how to interact with a parameter subscription.""" subscription = processor.create_parameter_subscription([ '/YSS/SIMULATOR/BatteryVoltage1' ]) sleep(5) print('Adding extra items to the existing subscription...') subscription.add([ '/YSS/SIMULATOR/Alpha', '/YSS/SIMULATOR/BatteryVoltage2', 'MDB:OPS Name/SIMULATOR_PrimBusVoltage1', ]) sleep(5) print('Shrinking subscription...') subscription.remove('/YSS/SIMULATOR/Alpha') print('Cancelling the subscription...') subscription.cancel() print('Last values from cache:') print(subscription.get_value('/YSS/SIMULATOR/BatteryVoltage1')) print(subscription.get_value('/YSS/SIMULATOR/BatteryVoltage2')) print(subscription.get_value('/YSS/SIMULATOR/Alpha')) print(subscription.get_value('MDB:OPS Name/SIMULATOR_PrimBusVoltage1'))
Shows how to interact with a parameter subscription.
Below is the the instruction that describes the task: ### Input: Shows how to interact with a parameter subscription. ### Response: def manage_subscription(): """Shows how to interact with a parameter subscription.""" subscription = processor.create_parameter_subscription([ '/YSS/SIMULATOR/BatteryVoltage1' ]) sleep(5) print('Adding extra items to the existing subscription...') subscription.add([ '/YSS/SIMULATOR/Alpha', '/YSS/SIMULATOR/BatteryVoltage2', 'MDB:OPS Name/SIMULATOR_PrimBusVoltage1', ]) sleep(5) print('Shrinking subscription...') subscription.remove('/YSS/SIMULATOR/Alpha') print('Cancelling the subscription...') subscription.cancel() print('Last values from cache:') print(subscription.get_value('/YSS/SIMULATOR/BatteryVoltage1')) print(subscription.get_value('/YSS/SIMULATOR/BatteryVoltage2')) print(subscription.get_value('/YSS/SIMULATOR/Alpha')) print(subscription.get_value('MDB:OPS Name/SIMULATOR_PrimBusVoltage1'))
def kl_divergence(mu, log_var, mu_p=0.0, log_var_p=0.0): """KL divergence of diagonal gaussian N(mu,exp(log_var)) and N(0,1). Args: mu: mu parameter of the distribution. log_var: log(var) parameter of the distribution. mu_p: optional mu from a learned prior distribution log_var_p: optional log(var) from a learned prior distribution Returns: the KL loss. """ batch_size = shape_list(mu)[0] prior_distribution = tfp.distributions.Normal( mu_p, tf.exp(tf.multiply(0.5, log_var_p))) posterior_distribution = tfp.distributions.Normal( mu, tf.exp(tf.multiply(0.5, log_var))) kld = tfp.distributions.kl_divergence(posterior_distribution, prior_distribution) return tf.reduce_sum(kld) / to_float(batch_size)
KL divergence of diagonal gaussian N(mu,exp(log_var)) and N(0,1). Args: mu: mu parameter of the distribution. log_var: log(var) parameter of the distribution. mu_p: optional mu from a learned prior distribution log_var_p: optional log(var) from a learned prior distribution Returns: the KL loss.
Below is the the instruction that describes the task: ### Input: KL divergence of diagonal gaussian N(mu,exp(log_var)) and N(0,1). Args: mu: mu parameter of the distribution. log_var: log(var) parameter of the distribution. mu_p: optional mu from a learned prior distribution log_var_p: optional log(var) from a learned prior distribution Returns: the KL loss. ### Response: def kl_divergence(mu, log_var, mu_p=0.0, log_var_p=0.0): """KL divergence of diagonal gaussian N(mu,exp(log_var)) and N(0,1). Args: mu: mu parameter of the distribution. log_var: log(var) parameter of the distribution. mu_p: optional mu from a learned prior distribution log_var_p: optional log(var) from a learned prior distribution Returns: the KL loss. """ batch_size = shape_list(mu)[0] prior_distribution = tfp.distributions.Normal( mu_p, tf.exp(tf.multiply(0.5, log_var_p))) posterior_distribution = tfp.distributions.Normal( mu, tf.exp(tf.multiply(0.5, log_var))) kld = tfp.distributions.kl_divergence(posterior_distribution, prior_distribution) return tf.reduce_sum(kld) / to_float(batch_size)
def get_connected_services(self, project_id, kind=None): """GetConnectedServices. [Preview API] :param str project_id: :param str kind: :rtype: [WebApiConnectedService] """ route_values = {} if project_id is not None: route_values['projectId'] = self._serialize.url('project_id', project_id, 'str') query_parameters = {} if kind is not None: query_parameters['kind'] = self._serialize.query('kind', kind, 'str') response = self._send(http_method='GET', location_id='b4f70219-e18b-42c5-abe3-98b07d35525e', version='5.0-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[WebApiConnectedService]', self._unwrap_collection(response))
GetConnectedServices. [Preview API] :param str project_id: :param str kind: :rtype: [WebApiConnectedService]
Below is the the instruction that describes the task: ### Input: GetConnectedServices. [Preview API] :param str project_id: :param str kind: :rtype: [WebApiConnectedService] ### Response: def get_connected_services(self, project_id, kind=None): """GetConnectedServices. [Preview API] :param str project_id: :param str kind: :rtype: [WebApiConnectedService] """ route_values = {} if project_id is not None: route_values['projectId'] = self._serialize.url('project_id', project_id, 'str') query_parameters = {} if kind is not None: query_parameters['kind'] = self._serialize.query('kind', kind, 'str') response = self._send(http_method='GET', location_id='b4f70219-e18b-42c5-abe3-98b07d35525e', version='5.0-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[WebApiConnectedService]', self._unwrap_collection(response))
def create_cfg(self, cfg_file, defaults=None, mode='json'): ''' set mode to json or yaml? probably remove this option..Todo Creates the config file for your app with default values The file will only be created if it doesn't exits also sets up the first_run attribute. also sets correct windows permissions you can add custom stuff to the config by doing app.cfg['fkdsfa'] = 'fdsaf' # todo auto save on change remember to call cfg.save() ''' assert mode in ('json', 'yaml') self.cfg_mode = mode self.cfg_file = cfg_file try: self.cfg = CfgDict(app=self, cfg=self.load_cfg()) logging.info('cfg file found : %s' % self.cfg_file) except FileNotFoundError: self.cfg = CfgDict(app=self, cfg={'first_run': True}) with suppress(TypeError): self.cfg.update(defaults) self.cfg.save() set_windows_permissions(self.cfg_file) logging.info( 'Created cfg file for first time!: %s' % self.cfg_file) if self._check_first_run(): self.first_run = True else: self.first_run = False
set mode to json or yaml? probably remove this option..Todo Creates the config file for your app with default values The file will only be created if it doesn't exits also sets up the first_run attribute. also sets correct windows permissions you can add custom stuff to the config by doing app.cfg['fkdsfa'] = 'fdsaf' # todo auto save on change remember to call cfg.save()
Below is the the instruction that describes the task: ### Input: set mode to json or yaml? probably remove this option..Todo Creates the config file for your app with default values The file will only be created if it doesn't exits also sets up the first_run attribute. also sets correct windows permissions you can add custom stuff to the config by doing app.cfg['fkdsfa'] = 'fdsaf' # todo auto save on change remember to call cfg.save() ### Response: def create_cfg(self, cfg_file, defaults=None, mode='json'): ''' set mode to json or yaml? probably remove this option..Todo Creates the config file for your app with default values The file will only be created if it doesn't exits also sets up the first_run attribute. also sets correct windows permissions you can add custom stuff to the config by doing app.cfg['fkdsfa'] = 'fdsaf' # todo auto save on change remember to call cfg.save() ''' assert mode in ('json', 'yaml') self.cfg_mode = mode self.cfg_file = cfg_file try: self.cfg = CfgDict(app=self, cfg=self.load_cfg()) logging.info('cfg file found : %s' % self.cfg_file) except FileNotFoundError: self.cfg = CfgDict(app=self, cfg={'first_run': True}) with suppress(TypeError): self.cfg.update(defaults) self.cfg.save() set_windows_permissions(self.cfg_file) logging.info( 'Created cfg file for first time!: %s' % self.cfg_file) if self._check_first_run(): self.first_run = True else: self.first_run = False
def cookies(self) -> Mapping[str, str]: """Return request cookies. A read-only dictionary-like object. """ raw = self.headers.get(hdrs.COOKIE, '') parsed = SimpleCookie(raw) return MappingProxyType( {key: val.value for key, val in parsed.items()})
Return request cookies. A read-only dictionary-like object.
Below is the the instruction that describes the task: ### Input: Return request cookies. A read-only dictionary-like object. ### Response: def cookies(self) -> Mapping[str, str]: """Return request cookies. A read-only dictionary-like object. """ raw = self.headers.get(hdrs.COOKIE, '') parsed = SimpleCookie(raw) return MappingProxyType( {key: val.value for key, val in parsed.items()})
async def receive_bilateral_response(self): """Receive the response to a request made to the Watchman service.""" self._check_receive_loop() resp = await self.bilateral_response_queue.get() self._check_error(resp) return resp
Receive the response to a request made to the Watchman service.
Below is the the instruction that describes the task: ### Input: Receive the response to a request made to the Watchman service. ### Response: async def receive_bilateral_response(self): """Receive the response to a request made to the Watchman service.""" self._check_receive_loop() resp = await self.bilateral_response_queue.get() self._check_error(resp) return resp
def get_inline_func(inline_str, modules=None, **stream_kwargs): """returns a function decorated by `cbox.stream` decorator. :param str inline_str: the inline function to execute, can use `s` - local variable as the input line/char/raw (according to `input_type` param). :param str modules: comma separated list of modules to import before running the inline function. :param dict stream_kwargs: optional arguments to `cbox.stream` decorator :rtype: callable """ if not _is_compilable(inline_str): raise ValueError( 'cannot compile the inline expression - "%s"' % inline_str ) inline_globals = _import_inline_modules(modules) func = _inline2func(inline_str, inline_globals, **stream_kwargs) return func
returns a function decorated by `cbox.stream` decorator. :param str inline_str: the inline function to execute, can use `s` - local variable as the input line/char/raw (according to `input_type` param). :param str modules: comma separated list of modules to import before running the inline function. :param dict stream_kwargs: optional arguments to `cbox.stream` decorator :rtype: callable
Below is the the instruction that describes the task: ### Input: returns a function decorated by `cbox.stream` decorator. :param str inline_str: the inline function to execute, can use `s` - local variable as the input line/char/raw (according to `input_type` param). :param str modules: comma separated list of modules to import before running the inline function. :param dict stream_kwargs: optional arguments to `cbox.stream` decorator :rtype: callable ### Response: def get_inline_func(inline_str, modules=None, **stream_kwargs): """returns a function decorated by `cbox.stream` decorator. :param str inline_str: the inline function to execute, can use `s` - local variable as the input line/char/raw (according to `input_type` param). :param str modules: comma separated list of modules to import before running the inline function. :param dict stream_kwargs: optional arguments to `cbox.stream` decorator :rtype: callable """ if not _is_compilable(inline_str): raise ValueError( 'cannot compile the inline expression - "%s"' % inline_str ) inline_globals = _import_inline_modules(modules) func = _inline2func(inline_str, inline_globals, **stream_kwargs) return func
def add_callback(self, callback, *callback_args, **callback_kwargs): """Add a callback without an associated errback.""" return self.add_callbacks(callback, callback_args=callback_args, callback_kwargs=callback_kwargs)
Add a callback without an associated errback.
Below is the the instruction that describes the task: ### Input: Add a callback without an associated errback. ### Response: def add_callback(self, callback, *callback_args, **callback_kwargs): """Add a callback without an associated errback.""" return self.add_callbacks(callback, callback_args=callback_args, callback_kwargs=callback_kwargs)
def load_hdf(cls, filename, path='', name=None): """ A class method to load a saved StarModel from an HDF5 file. File must have been created by a call to :func:`StarModel.save_hdf`. :param filename: H5 file to load. :param path: (optional) Path within HDF file. :return: :class:`StarModel` object. """ store = pd.HDFStore(filename) try: samples = store['{}/samples'.format(path)] attrs = store.get_storer('{}/samples'.format(path)).attrs except: store.close() raise properties = attrs.properties maxAV = attrs.maxAV max_distance = attrs.max_distance min_logg = attrs.min_logg ic_type = attrs.ic_type use_emcee = attrs.use_emcee basename = attrs._mnest_basename if name is None: try: name = attrs.name except: name = '' store.close() #ic = ic_type() don't need to initialize anymore mod = cls(ic_type, maxAV=maxAV, max_distance=max_distance, use_emcee=use_emcee, name=name, **properties) mod._samples = samples mod._mnest_basename = basename return mod
A class method to load a saved StarModel from an HDF5 file. File must have been created by a call to :func:`StarModel.save_hdf`. :param filename: H5 file to load. :param path: (optional) Path within HDF file. :return: :class:`StarModel` object.
Below is the the instruction that describes the task: ### Input: A class method to load a saved StarModel from an HDF5 file. File must have been created by a call to :func:`StarModel.save_hdf`. :param filename: H5 file to load. :param path: (optional) Path within HDF file. :return: :class:`StarModel` object. ### Response: def load_hdf(cls, filename, path='', name=None): """ A class method to load a saved StarModel from an HDF5 file. File must have been created by a call to :func:`StarModel.save_hdf`. :param filename: H5 file to load. :param path: (optional) Path within HDF file. :return: :class:`StarModel` object. """ store = pd.HDFStore(filename) try: samples = store['{}/samples'.format(path)] attrs = store.get_storer('{}/samples'.format(path)).attrs except: store.close() raise properties = attrs.properties maxAV = attrs.maxAV max_distance = attrs.max_distance min_logg = attrs.min_logg ic_type = attrs.ic_type use_emcee = attrs.use_emcee basename = attrs._mnest_basename if name is None: try: name = attrs.name except: name = '' store.close() #ic = ic_type() don't need to initialize anymore mod = cls(ic_type, maxAV=maxAV, max_distance=max_distance, use_emcee=use_emcee, name=name, **properties) mod._samples = samples mod._mnest_basename = basename return mod
def find_playlists_by_ids(self, playlist_ids): """doc: http://open.youku.com/docs/doc?id=67 """ url = 'https://openapi.youku.com/v2/playlists/show_batch.json' params = { 'client_id': self.client_id, 'playlist_ids': playlist_ids } r = requests.get(url, params=params) check_error(r) return r.json()
doc: http://open.youku.com/docs/doc?id=67
Below is the the instruction that describes the task: ### Input: doc: http://open.youku.com/docs/doc?id=67 ### Response: def find_playlists_by_ids(self, playlist_ids): """doc: http://open.youku.com/docs/doc?id=67 """ url = 'https://openapi.youku.com/v2/playlists/show_batch.json' params = { 'client_id': self.client_id, 'playlist_ids': playlist_ids } r = requests.get(url, params=params) check_error(r) return r.json()