code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def open_subreddit_page(self, name): from .subreddit_page import SubredditPage with self.term.loader('Loading subreddit'): page = SubredditPage(self.reddit, self.term, self.config, self.oauth, name) if not self.term.loader.exception: return page
Open an instance of the subreddit page for the given subreddit name.
def _data_update(subjects, queue, run_flag): while run_flag.running: while not queue.empty(): data = queue.get() for subject in [s for s in subjects if not s.is_disposed]: subject.on_next(data) time.sleep(0.1)
Get data from backgound process and notify all subscribed observers with the new data
def list_launch_configurations(region=None, key=None, keyid=None, profile=None): ret = get_all_launch_configurations(region, key, keyid, profile) return [r.name for r in ret]
List all Launch Configurations. CLI example:: salt myminion boto_asg.list_launch_configurations
def volume_delete(pool, volume, **kwargs): conn = __get_conn(**kwargs) try: vol = _get_storage_vol(conn, pool, volume) return not bool(vol.delete()) finally: conn.close()
Delete a libvirt managed volume. :param pool: libvirt storage pool name :param volume: name of the volume to delete :param connection: libvirt connection URI, overriding defaults :param username: username to connect with, overriding defaults :param password: password to connect with, overriding defaults .. versionadded:: Neon CLI Example: .. code-block:: bash salt "*" virt.volume_delete <pool> <volume>
def save_csv(p, sheet): 'Save as single CSV file, handling column names as first line.' with p.open_text(mode='w') as fp: cw = csv.writer(fp, **csvoptions()) colnames = [col.name for col in sheet.visibleCols] if ''.join(colnames): cw.writerow(colnames) for r in Progress(sheet.rows, 'saving'): cw.writerow([col.getDisplayValue(r) for col in sheet.visibleCols])
Save as single CSV file, handling column names as first line.
def random(self, *args, **kwargs): indexer = Random() self.add(indexer) return self
Add a random index. Shortcut of :class:`recordlinkage.index.Random`:: from recordlinkage.index import Random indexer = recordlinkage.Index() indexer.add(Random())
def create(self, name, network): if not network in SUPPORTED_NETWORKS: raise ValueError('Network not valid!') account = self.wrap(self.resource.create(dict(name=name, network=network))) self.add(account) return account
Create a new Account object and add it to this Accounts collection. Args: name (str): Account name network (str): Type of cryptocurrency. Can be one of, 'bitcoin', ' bitcoin_testnet', 'litecoin', 'dogecoin'. Returns: The new round.Account
def get_peak_mem(): import resource rusage_denom = 1024. if sys.platform == 'darwin': rusage_denom = rusage_denom * rusage_denom mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / rusage_denom return mem
this returns peak memory use since process starts till the moment its called
def img2code(self, key, img): code_template = \ "wx.ImageFromData({width}, {height}, " + \ "bz2.decompress(base64.b64decode('{data}'))).ConvertToBitmap()" code_alpha_template = \ "wx.ImageFromDataWithAlpha({width}, {height}, " + \ "bz2.decompress(base64.b64decode('{data}')), " + \ "bz2.decompress(base64.b64decode('{alpha}'))).ConvertToBitmap()" data = base64.b64encode(bz2.compress(img.GetData(), 9)) if img.HasAlpha(): alpha = base64.b64encode(bz2.compress(img.GetAlphaData(), 9)) code_str = code_alpha_template.format( width=img.GetWidth(), height=img.GetHeight(), data=data, alpha=alpha) else: code_str = code_template.format(width=img.GetWidth(), height=img.GetHeight(), data=data) return code_str
Pastes wx.Image into single cell
def register(self, *args): super(ConfigurableMeta, self).register(*args) from hfos.database import configschemastore configschemastore[self.name] = self.configschema
Register a configurable component in the configuration schema store
def update_snapshot(self, snapshot, display_name=None, display_description=None): return snapshot.update(display_name=display_name, display_description=display_description)
Update the specified values on the specified snapshot. You may specify one or more values to update.
def fit(self, col): dates = self.safe_datetime_cast(col) self.default_val = dates.groupby(dates).count().index[0].timestamp() * 1e9
Prepare the transformer to convert data. Args: col(pandas.DataFrame): Data to transform. Returns: None
def certify_parameter(certifier, name, value, kwargs=None): try: certifier(value, **kwargs or {}) except CertifierError as err: six.raise_from( CertifierParamError( name, value, ), err)
Internal certifier for kwargs passed to Certifiable public methods. :param callable certifier: The certifier to use :param str name: The name of the kwargs :param object value: The value of the kwarg. :param bool required: Is the param required. Default=False. :raises CertifierParamError: A parameter failed internal certification.
def viterbi_alignment(es, fs, t, a): max_a = collections.defaultdict(float) l_e = len(es) l_f = len(fs) for (j, e) in enumerate(es, 1): current_max = (0, -1) for (i, f) in enumerate(fs, 1): val = t[(e, f)] * a[(i, j, l_e, l_f)] if current_max[1] < val: current_max = (i, val) max_a[j] = current_max[0] return max_a
return dictionary e in es -> f in fs
def _to_list(obj): ret = {} for attr in __attrs: if hasattr(obj, attr): ret[attr] = getattr(obj, attr) return ret
Convert snetinfo object to list
def uri(self): return url_for( '.{0}_files'.format(self.pid.pid_type), pid_value=self.pid.pid_value, filename=self.file.key)
Get file download link. .. note:: The URI generation assumes that you can download the file using the view ``invenio_records_ui.<pid_type>_files``.
def patch_os_module(): if not hasattr(os, 'symlink'): os.symlink = symlink os.path.islink = islink if not hasattr(os, 'readlink'): os.readlink = readlink
jaraco.windows provides the os.symlink and os.readlink functions. Monkey-patch the os module to include them if not present.
def _check_states_enum(cls): states_enum_name = cls.context.get_config('states_enum_name') try: cls.context['states_enum'] = getattr( cls.context.new_class, states_enum_name) except AttributeError: raise ValueError('No states enum given!') proper = True try: if not issubclass(cls.context.states_enum, Enum): proper = False except TypeError: proper = False if not proper: raise ValueError( 'Please provide enum instance to define available states.')
Check if states enum exists and is proper one.
def copy(self): return self.__class__(self, key=self._keyfn, precedes=self._precedes)
Return a shallow copy of a pqdict.
def serialize_attrs(self, *args): cls = type(self) result = {} for a in args: if hasattr(cls, a) and a not in cls.attrs_forbidden_for_serialization(): val = getattr(self, a) if is_list_like(val): result[a] = list(val) else: result[a] = val return result
Converts and instance to a dictionary with only the specified attributes as keys Args: *args (list): The attributes to serialize Examples: >>> customer = Customer.create(name="James Bond", email="007@mi.com", phone="007", city="London") >>> customer.serialize_attrs('name', 'email') {'name': u'James Bond', 'email': u'007@mi.com'}
def are_equal(value1, value2): if value1 == None or value2 == None: return True if value1 == None or value2 == None: return False return value1 == value2
Checks if two values are equal. The operation can be performed over values of any type. :param value1: the first value to compare :param value2: the second value to compare :return: true if values are equal and false otherwise
def replace(zpool, old_device, new_device=None, force=False): flags = [] target = [] if force: flags.append('-f') target.append(zpool) target.append(old_device) if new_device: target.append(new_device) res = __salt__['cmd.run_all']( __utils__['zfs.zpool_command']( command='replace', flags=flags, target=target, ), python_shell=False, ) ret = __utils__['zfs.parse_command_result'](res, 'replaced') if ret['replaced']: ret['vdevs'] = _clean_vdev_config( __salt__['zpool.status'](zpool=zpool)[zpool]['config'][zpool], ) return ret
Replaces ``old_device`` with ``new_device`` .. note:: This is equivalent to attaching ``new_device``, waiting for it to resilver, and then detaching ``old_device``. The size of ``new_device`` must be greater than or equal to the minimum size of all the devices in a mirror or raidz configuration. zpool : string Name of storage pool old_device : string Old device to replace new_device : string Optional new device force : boolean Forces use of new_device, even if its appears to be in use. CLI Example: .. code-block:: bash salt '*' zpool.replace myzpool /path/to/vdev1 /path/to/vdev2
def is_ancestor_of_book(self, id_, book_id): if self._catalog_session is not None: return self._catalog_session.is_ancestor_of_catalog(id_=id_, catalog_id=book_id) return self._hierarchy_session.is_ancestor(id_=id_, ancestor_id=book_id)
Tests if an ``Id`` is an ancestor of a book. arg: id (osid.id.Id): an ``Id`` arg: book_id (osid.id.Id): the ``Id`` of a book return: (boolean) - ``tru`` e if this ``id`` is an ancestor of ``book_id,`` ``false`` otherwise raise: NotFound - ``book_id`` is not found raise: NullArgument - ``id`` or ``book_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` not found return ``false``.
def train(self, *args, **kwargs): objs = self._do_transform(*args, **kwargs) obj_list = [objs, ] if not isinstance(objs, Iterable) else objs for obj in obj_list: if not isinstance(obj, ODPSModelExpr): continue for meta in ['predictor', 'recommender']: if meta not in self._metas: continue mod = __import__(self.__class__.__module__.__name__, fromlist=[''])\ if not hasattr(self, '_env') else self._env action_cls_name = underline_to_capitalized(self._metas[meta]) if not hasattr(mod, action_cls_name): action_cls_name = '_' + action_cls_name setattr(obj, '_' + meta, mod + '.' + action_cls_name) return objs
Perform training on a DataFrame. The label field is specified by the ``label_field`` method. :param train_data: DataFrame to be trained. Label field must be specified. :type train_data: DataFrame :return: Trained model :rtype: MLModel
def _generate_to_tempfile(self, generator): with temporary_file(cleanup=False, binary_mode=False) as output: generator.write(output) return output.name
Applies the specified generator to a temp file and returns the path to that file. We generate into a temp file so that we don't lose any manual customizations on error.
def _dat_read_params(fmt, sig_len, byte_offset, skew, tsamps_per_frame, sampfrom, sampto): start_flat_sample = sampfrom * tsamps_per_frame if (sampto + max(skew)) > sig_len: end_flat_sample = sig_len * tsamps_per_frame extra_flat_samples = (sampto + max(skew) - sig_len) * tsamps_per_frame else: end_flat_sample = (sampto + max(skew)) * tsamps_per_frame extra_flat_samples = 0 if fmt == '212': block_floor_samples = start_flat_sample % 2 start_flat_sample = start_flat_sample - block_floor_samples elif fmt in ['310', '311']: block_floor_samples = start_flat_sample % 3 start_flat_sample = start_flat_sample - block_floor_samples else: block_floor_samples = 0 start_byte = byte_offset + int(start_flat_sample * BYTES_PER_SAMPLE[fmt]) n_read_samples = end_flat_sample - start_flat_sample nan_replace = [max(0, sampto + s - sig_len) for s in skew] return (start_byte, n_read_samples, block_floor_samples, extra_flat_samples, nan_replace)
Calculate the parameters used to read and process a dat file, given its layout, and the desired sample range. Parameters ---------- fmt : str The format of the dat file sig_len : int The signal length (per channel) of the dat file byte_offset : int The byte offset of the dat file skew : list The skew for the signals of the dat file tsamps_per_frame : int The total samples/frame for all channels of the dat file sampfrom : int The starting sample number to be read from the signals sampto : int The final sample number to be read from the signals Returns ------- start_byte : int The starting byte to read the dat file from. Always points to the start of a byte block for special formats. n_read_samples : int The number of flat samples to read from the dat file. block_floor_samples : int The extra samples read prior to the first desired sample, for special formats, in order to ensure entire byte blocks are read. extra_flat_samples : int The extra samples desired beyond what is contained in the file. nan_replace : list The number of samples to replace with nan at the end of each signal, due to skew wanting samples beyond the file. Examples -------- sig_len=100, t = 4 (total samples/frame), skew = [0, 2, 4, 5] sampfrom=0, sampto=100 --> read_len = 100, n_sampread = 100*t, extralen = 5, nan_replace = [0, 2, 4, 5] sampfrom=50, sampto=100 --> read_len = 50, n_sampread = 50*t, extralen = 5, nan_replace = [0, 2, 4, 5] sampfrom=0, sampto=50 --> read_len = 50, n_sampread = 55*t, extralen = 0, nan_replace = [0, 0, 0, 0] sampfrom=95, sampto=99 --> read_len = 4, n_sampread = 5*t, extralen = 4, nan_replace = [0, 1, 3, 4]
def unwrap_raw(content): starting_symbol = get_start_symbol(content) ending_symbol = ']' if starting_symbol == '[' else '}' start = content.find(starting_symbol, 0) end = content.rfind(ending_symbol) return content[start:end+1]
unwraps the callback and returns the raw content
def is_bootstrapped(metadata): fields = UNIHAN_FIELDS + DEFAULT_COLUMNS if TABLE_NAME in metadata.tables.keys(): table = metadata.tables[TABLE_NAME] if set(fields) == set(c.name for c in table.columns): return True else: return False else: return False
Return True if cihai is correctly bootstrapped.
def add_signature(name=None, inputs=None, outputs=None): if not name: name = "default" if inputs is None: inputs = {} if outputs is None: outputs = {} if not isinstance(inputs, dict): inputs = {"default": inputs} if not isinstance(outputs, dict): outputs = {"default": outputs} message = find_signature_inputs_from_multivalued_ops(inputs) if message: logging.error(message) message = find_signature_input_colocation_error(name, inputs) if message: raise ValueError(message) saved_model_lib.add_signature(name, inputs, outputs)
Adds a signature to the module definition. NOTE: This must be called within a `module_fn` that is defining a Module. Args: name: Signature name as a string. If omitted, it is interpreted as 'default' and is the signature used when `Module.__call__` `signature` is not specified. inputs: A dict from input name to Tensor or SparseTensor to feed when applying the signature. If a single tensor is passed, it is interpreted as a dict with a single 'default' entry. outputs: A dict from output name to Tensor or SparseTensor to return from applying the signature. If a single tensor is passed, it is interpreted as a dict with a single 'default' entry. Raises: ValueError: if the arguments are invalid.
def find_occurrences(self, resource=None, pymodule=None): tools = _OccurrenceToolsCreator(self.project, resource=resource, pymodule=pymodule, docs=self.docs) for offset in self._textual_finder.find_offsets(tools.source_code): occurrence = Occurrence(tools, offset) for filter in self.filters: result = filter(occurrence) if result is None: continue if result: yield occurrence break
Generate `Occurrence` instances
def printer(self): if not self._has_loaded: self.load() if not self._printer_name: raise exceptions.ConfigSectionMissingError('printer') if not self._printer: self._printer = getattr(printer, self._printer_name)(**self._printer_config) return self._printer
Returns a printer that was defined in the config, or throws an exception. This method loads the default config if one hasn't beeen already loaded.
def update_model(self, words): extended_words = DefaultCompleter._DefaultCompleter__tokens[self.__language][:] extended_words.extend((word for word in set(words) if word not in DefaultCompleter._DefaultCompleter__tokens[self.__language])) self.setModel(QStringListModel(extended_words)) return True
Updates the completer model. :param words: Words to update the completer with. :type words: tuple or list :return: Method success. :rtype: bool
def load(self, specfiles=None): if specfiles is None: specfiles = [_ for _ in viewkeys(self.info)] else: specfiles = aux.toList(specfiles) for specfile in specfiles: if specfile not in self.info: warntext = 'Error while calling "FiContainer.load()": "%s" is'\ ' not present in "FiContainer.info"!'\ % (specfile, ) warnings.warn(warntext) continue else: fiPath = aux.joinpath(self.info[specfile]['path'], specfile+'.fic' ) with zipfile.ZipFile(fiPath, 'r') as containerZip: jsonString = io.TextIOWrapper(containerZip.open('data'), encoding='utf-8' ).read() self.container[specfile] = json.loads(jsonString, object_hook=Fi.jsonHook )
Imports the specified ``fic`` files from the hard disk. :param specfiles: the name of an ms-run file or a list of names. If None all specfiles are selected. :type specfiles: None, str, [str, str]
def magic_session(db_session=None, url=None): if db_session is not None: yield db_session else: session = get_session(url, expire_on_commit=False) try: try: yield session finally: session.commit() finally: session.close()
Either does nothing with the session you already have or makes one that commits and closes no matter what happens
def setup_list_pars(self): tdf = self.setup_temporal_list_pars() sdf = self.setup_spatial_list_pars() if tdf is None and sdf is None: return os.chdir(self.m.model_ws) try: apply_list_pars() except Exception as e: os.chdir("..") self.logger.lraise("error test running apply_list_pars():{0}".format(str(e))) os.chdir('..') line = "pyemu.helpers.apply_list_pars()\n" self.logger.statement("forward_run line:{0}".format(line)) self.frun_pre_lines.append(line)
main entry point for setting up list multiplier parameters
def additional_assets(context: Context): rsync_flags = '-avz' if context.verbosity == 2 else '-az' for path in context.app.additional_asset_paths: context.shell('rsync %s %s %s/' % (rsync_flags, path, context.app.asset_build_path))
Collects assets from GOV.UK frontend toolkit
def remove_root_book(self, book_id): if self._catalog_session is not None: return self._catalog_session.remove_root_catalog(catalog_id=book_id) return self._hierarchy_session.remove_root(id_=book_id)
Removes a root book. arg: book_id (osid.id.Id): the ``Id`` of a book raise: NotFound - ``book_id`` is not a root raise: NullArgument - ``book_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def populate(self, blueprint, documents): documents = self.finish(blueprint, documents) frames = [] for document in documents: meta_document = {} for field_name in blueprint._meta_fields: meta_document[field_name] = document[field_name] document.pop(field_name) frame = blueprint.get_frame_cls()(document) for key, value in meta_document.items(): setattr(frame, key, value) frames.append(frame) blueprint.on_fake(frames) frames = blueprint.get_frame_cls().insert_many(frames) blueprint.on_faked(frames) return frames
Populate the database with documents
def _polygon_from_coords(coords, fix_geom=False, swap=True, dims=2): assert len(coords) % dims == 0 number_of_points = len(coords)/dims coords_as_array = np.array(coords) reshaped = coords_as_array.reshape(number_of_points, dims) points = [ (float(i[1]), float(i[0])) if swap else ((float(i[0]), float(i[1]))) for i in reshaped.tolist() ] polygon = Polygon(points).buffer(0) try: assert polygon.is_valid return polygon except AssertionError: if fix_geom: return polygon.buffer(0) else: raise RuntimeError("Geometry is not valid.")
Return Shapely Polygon from coordinates. - coords: list of alterating latitude / longitude coordinates - fix_geom: automatically fix geometry
def _check_euk_contamination(self, hmm_hit_tables): euk_hit_table = HMMreader(hmm_hit_tables.pop(-1)) other_hit_tables = [HMMreader(x) for x in hmm_hit_tables] reads_unique_to_eukaryotes = [] reads_with_better_euk_hit = [] for hit in euk_hit_table.names(): bits = [] for hit_table in other_hit_tables: if hit in hit_table.names(): bits.append(hit_table.bit(hit)) else: reads_unique_to_eukaryotes.append(hit) if bits: if any([x for x in bits if x > euk_hit_table.bit(hit)]): continue else: reads_with_better_euk_hit.append(hit) else: continue if len(reads_with_better_euk_hit) == 0: logging.info("No contaminating eukaryotic reads detected") else: logging.info("Found %s read(s) that may be eukaryotic" % len(reads_with_better_euk_hit + reads_unique_to_eukaryotes)) euk_reads = set(reads_with_better_euk_hit + reads_unique_to_eukaryotes) return euk_reads
check_euk_contamination - Check output HMM tables hits reads that hit the 18S HMM with a higher bit score. Parameters ---------- hmm_hit_tables : array Array of paths to the output files produced by hmmsearch or nhmmer. run_stats : dict A dictionary to updatewith the number of unique 18S reads and reads detected by both 18S and non-18S HMMs Returns ------- euk_reads : set Non-redundant set of all read names deemed to be eukaryotic
def get_querystring(uri): parts = urlparse.urlsplit(uri) return urlparse.parse_qs(parts.query)
Get Querystring information from uri. :param uri: uri :return: querystring info or {}
def end_time(self): try: return self.start_time + SCAN_DURATION[self.sector] except KeyError: return self.start_time
End timestamp of the dataset
def open_as_pillow(filename): with __sys_open(filename, 'rb') as f: data = BytesIO(f.read()) return Image.open(data)
This way can delete file immediately
def on_finish(self): r = self.response r.request_time = time.time() - self.start_time if self.callback: self.callback(r)
Called regardless of success or failure
def valid_ovsdb_addr(addr): m = re.match(r'unix:(\S+)', addr) if m: file = m.group(1) return os.path.isfile(file) m = re.match(r'(tcp|ssl):(\S+):(\d+)', addr) if m: address = m.group(2) port = m.group(3) if '[' in address: address = address.strip('[').strip(']') return ip.valid_ipv6(address) and port.isdigit() else: return ip.valid_ipv4(address) and port.isdigit() return False
Returns True if the given addr is valid OVSDB server address, otherwise False. The valid formats are: - ``unix:file`` - ``tcp:ip:port`` - ``ssl:ip:port`` If ip is IPv6 address, wrap ip with brackets (e.g., ssl:[::1]:6640). :param addr: str value of OVSDB server address. :return: True if valid, otherwise False.
def _api_call(function): @wraps(function) def wrapper(*args, **kwargs): try: if not _webview_ready.wait(15): raise Exception('Main window failed to start') return function(*args, **kwargs) except NameError: raise Exception('Create a web view window first, before invoking this function') except KeyError as e: try: uid = kwargs['uid'] except KeyError: uid = args[-1] raise Exception('Cannot call function: No webview exists with uid: {}'.format(uid)) return wrapper
Decorator to call a pywebview API, checking for _webview_ready and raisings appropriate Exceptions on failure.
def write_record(self, event_str): header = struct.pack('Q', len(event_str)) header += struct.pack('I', masked_crc32c(header)) footer = struct.pack('I', masked_crc32c(event_str)) self._writer.write(header + event_str + footer)
Writes a serialized event to file.
def delete(self): logger.debug('Deleting Dagobah instance with ID {0}'.format(self.dagobah_id)) self.jobs = [] self.created_jobs = 0 self.backend.delete_dagobah(self.dagobah_id)
Delete this Dagobah instance from the Backend.
def popitem(self): heap = self._heap position = self._position try: end = heap.pop(-1) except IndexError: raise KeyError('pqdict is empty') if heap: node = heap[0] heap[0] = end position[end.key] = 0 self._sink(0) else: node = end del position[node.key] return node.key, node.value
Remove and return the item with highest priority. Raises ``KeyError`` if pqdict is empty.
def getAccountNames(store, protocol=None): return ((meth.localpart, meth.domain) for meth in getLoginMethods(store, protocol))
Retrieve account name information about the given database. @param store: An Axiom Store representing a user account. It must have been opened through the store which contains its account information. @return: A generator of two-tuples of (username, domain) which refer to the given store.
def read_input(self, input_cls, filename, **kwargs): input_inst = input_cls() input_inst.read_input(filename) return input_inst.get_data()
Read in input and do some minimal preformatting input_cls - the class to use to read the input filename - input filename
def uuidify(val): if uuidutils.is_uuid_like(val): return val else: try: int_val = int(val, 16) except ValueError: with excutils.save_and_reraise_exception(): LOG.error("Invalid UUID format %s. Please provide an " "integer in decimal (0-9) or hex (0-9a-e) " "format", val) res = str(int_val) num = 12 - len(res) return "00000000-0000-0000-0000-" + "0" * num + res
Takes an integer and transforms it to a UUID format. returns: UUID formatted version of input.
def update_assessment_offered(self, assessment_offered_form): collection = JSONClientValidated('assessment', collection='AssessmentOffered', runtime=self._runtime) if not isinstance(assessment_offered_form, ABCAssessmentOfferedForm): raise errors.InvalidArgument('argument type is not an AssessmentOfferedForm') if not assessment_offered_form.is_for_update(): raise errors.InvalidArgument('the AssessmentOfferedForm is for update only, not create') try: if self._forms[assessment_offered_form.get_id().get_identifier()] == UPDATED: raise errors.IllegalState('assessment_offered_form already used in an update transaction') except KeyError: raise errors.Unsupported('assessment_offered_form did not originate from this session') if not assessment_offered_form.is_valid(): raise errors.InvalidArgument('one or more of the form elements is invalid') collection.save(assessment_offered_form._my_map) self._forms[assessment_offered_form.get_id().get_identifier()] = UPDATED return objects.AssessmentOffered( osid_object_map=assessment_offered_form._my_map, runtime=self._runtime, proxy=self._proxy)
Updates an existing assessment offered. arg: assessment_offered_form (osid.assessment.AssessmentOfferedForm): the form containing the elements to be updated raise: IllegalState - ``assessment_offrered_form`` already used in an update transaction raise: InvalidArgument - the form contains an invalid value raise: NullArgument - ``assessment_offered_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred raise: Unsupported - ``assessment_form`` did not originate from ``get_assessment_form_for_update()`` *compliance: mandatory -- This method must be implemented.*
async def seek(self, pos, whence=sync_io.SEEK_SET): return self._stream.seek(pos, whence)
Move to new file position. Argument offset is a byte count. Optional argument whence defaults to SEEK_SET or 0 (offset from start of file, offset should be >= 0); other values are SEEK_CUR or 1 (move relative to current position, positive or negative), and SEEK_END or 2 (move relative to end of file, usually negative, although many platforms allow seeking beyond the end of a file). Note that not all file objects are seekable.
def update_rotation(self, dt, buttons): assert isinstance(buttons, dict) ma = buttons['right'] - buttons['left'] if ma != 0: self.stats['battery'] -= self.battery_use['angular'] self.rotation += ma * dt * self.angular_velocity a = math.radians(self.rotation) self.impulse_dir = eu.Vector2(math.sin(a), math.cos(a))
Updates rotation and impulse direction
def add_api_key(key, value): if key is None or key == "": logger.error("Key cannot be empty") if value is None or value == "": logger.error("Value cannot be empty") from .. import datatools data = datatools.get_data() if "keys" not in data["discord"]: data["discord"]["keys"] = {} is_key_new = False if key not in data["discord"]["keys"]: is_key_new = True elif data["discord"]["keys"][key] == value: logger.info("API key '{}' already has value '{}'".format(key, value)) return data["discord"]["keys"][key] = value datatools.write_data(data) key_text = "added" if is_key_new else "updated" logger.info("API key '{}' {} with value '{}'".format(key, key_text, value))
Adds a key to the bot's data Args: key: The name of the key to add value: The value for the key
def tmpconfig(request): SUBFOLDER = tempfile.mkdtemp() CONF = UserConfig('spyder-test', defaults=DEFAULTS, version=CONF_VERSION, subfolder=SUBFOLDER, raw_mode=True, ) def fin(): shutil.rmtree(SUBFOLDER) request.addfinalizer(fin) return CONF
Fixtures that returns a temporary CONF element.
def cause_repertoire(self, mechanism, purview): if not purview: return np.array([1.0]) if not mechanism: return max_entropy_distribution(purview, self.tpm_size) purview = frozenset(purview) joint = np.ones(repertoire_shape(purview, self.tpm_size)) joint *= functools.reduce( np.multiply, [self._single_node_cause_repertoire(m, purview) for m in mechanism] ) return distribution.normalize(joint)
Return the cause repertoire of a mechanism over a purview. Args: mechanism (tuple[int]): The mechanism for which to calculate the cause repertoire. purview (tuple[int]): The purview over which to calculate the cause repertoire. Returns: np.ndarray: The cause repertoire of the mechanism over the purview. .. note:: The returned repertoire is a distribution over purview node states, not the states of the whole network.
def parse_tenant_config_path(config_path): try: return config_path % connection.schema_name except (TypeError, ValueError): return os.path.join(config_path, connection.schema_name)
Convenience function for parsing django-tenants' path configuration strings. If the string contains '%s', then the current tenant's schema name will be inserted at that location. Otherwise the schema name will be appended to the end of the string. :param config_path: A configuration path string that optionally contains '%s' to indicate where the tenant schema name should be inserted. :return: The formatted string containing the schema name
def is_alive(self, container: Container) -> bool: uid = container.uid return uid in self.__dockerc and \ self.__dockerc[uid].status == 'running'
Determines whether a given container is still alive. Returns: `True` if the underlying Docker container for the given BugZoo container is still alive, otherwise `False`.
def kron(a, b): if hasattr(a, '__kron__'): return a.__kron__(b) if a is None: return b else: raise ValueError( 'Kron is waiting for two TT-vectors or two TT-matrices')
Kronecker product of two TT-matrices or two TT-vectors
def send_rpc_response(self, rpc_tag, result, response): if rpc_tag not in self.in_flight_rpcs: raise ArgumentError("In flight RPC could not be found, it may have timed out", rpc_tag=rpc_tag) del self.in_flight_rpcs[rpc_tag] response_message = { 'response': response, 'result': result } try: self.rpc_results.set(rpc_tag, response_message) except KeyError: self._logger.warning("RPC response came but no one was waiting: response=%s", response)
Send a response to an RPC. Args: rpc_tag (str): The exact string given in a previous call to send_rpc_command result (str): The result of the operation. The possible values of response are: service_not_found, rpc_not_found, timeout, success, invalid_response, invalid_arguments, execution_exception response (bytes): The raw bytes that we should send back as a response.
def translocation(from_loc, to_loc): rv = _activity_helper(TRANSLOCATION) rv[EFFECT] = { FROM_LOC: Entity(namespace=BEL_DEFAULT_NAMESPACE, name=from_loc) if isinstance(from_loc, str) else from_loc, TO_LOC: Entity(namespace=BEL_DEFAULT_NAMESPACE, name=to_loc) if isinstance(to_loc, str) else to_loc, } return rv
Make a translocation dictionary. :param dict from_loc: An entity dictionary from :func:`pybel.dsl.entity` :param dict to_loc: An entity dictionary from :func:`pybel.dsl.entity` :rtype: dict
def generate_hash(self, length=30): import random, string chars = string.ascii_letters + string.digits ran = random.SystemRandom().choice hash = ''.join(ran(chars) for i in range(length)) return hash
Generate random string of given length
def parse(*models, **kwargs): if isinstance(models, tuple) and isinstance(models[0], list): models = models[0] config = kwargs.pop('config', False) state = kwargs.pop('state', False) profiles = kwargs.pop('profiles', []) if not profiles and hasattr(napalm_device, 'profile'): profiles = napalm_device.profile if not profiles: profiles = [__grains__.get('os')] root = _get_root_object(models) parser_kwargs = { 'device': napalm_device.get('DRIVER'), 'profile': profiles } if config: root.parse_config(**parser_kwargs) if state: root.parse_state(**parser_kwargs) return root.to_dict(filter=True)
Parse configuration from the device. models A list of models to be used when parsing. config: ``False`` Parse config. state: ``False`` Parse state. profiles: ``None`` Use certain profiles to parse. If not specified, will use the device default profile(s). CLI Example: .. code-block:: bash salt '*' napalm_yang.parse models.openconfig_interfaces Output Example: .. code-block:: python { "interfaces": { "interface": { ".local.": { "name": ".local.", "state": { "admin-status": "UP", "counters": { "in-discards": 0, "in-errors": 0, "out-errors": 0 }, "enabled": True, "ifindex": 0, "last-change": 0, "oper-status": "UP", "type": "softwareLoopback" }, "subinterfaces": { "subinterface": { ".local..0": { "index": ".local..0", "state": { "ifindex": 0, "name": ".local..0" } } } } }, "ae0": { "name": "ae0", "state": { "admin-status": "UP", "counters": { "in-discards": 0, "in-errors": 0, "out-errors": 0 }, "enabled": True, "ifindex": 531, "last-change": 255203, "mtu": 1518, "oper-status": "DOWN" }, "subinterfaces": { "subinterface": { "ae0.0": { "index": "ae0.0", "state": { "description": "ASDASDASD", "ifindex": 532, "name": "ae0.0" } } "ae0.32767": { "index": "ae0.32767", "state": { "ifindex": 535, "name": "ae0.32767" } } } } }, "dsc": { "name": "dsc", "state": { "admin-status": "UP", "counters": { "in-discards": 0, "in-errors": 0, "out-errors": 0 }, "enabled": True, "ifindex": 5, "last-change": 0, "oper-status": "UP" } }, "ge-0/0/0": { "name": "ge-0/0/0", "state": { "admin-status": "UP", "counters": { "in-broadcast-pkts": 0, "in-discards": 0, "in-errors": 0, "in-multicast-pkts": 0, "in-unicast-pkts": 16877, "out-broadcast-pkts": 0, "out-errors": 0, "out-multicast-pkts": 0, "out-unicast-pkts": 15742 }, "description": "management interface", "enabled": True, "ifindex": 507, "last-change": 258467, "mtu": 1400, "oper-status": "UP" }, "subinterfaces": { "subinterface": { "ge-0/0/0.0": { "index": "ge-0/0/0.0", "state": { "description": "ge-0/0/0.0", "ifindex": 521, "name": "ge-0/0/0.0" } } } } } "irb": { "name": "irb", "state": { "admin-status": "UP", "counters": { "in-discards": 0, "in-errors": 0, "out-errors": 0 }, "enabled": True, "ifindex": 502, "last-change": 0, "mtu": 1514, "oper-status": "UP", "type": "ethernetCsmacd" } }, "lo0": { "name": "lo0", "state": { "admin-status": "UP", "counters": { "in-discards": 0, "in-errors": 0, "out-errors": 0 }, "description": "lo0", "enabled": True, "ifindex": 6, "last-change": 0, "oper-status": "UP", "type": "softwareLoopback" }, "subinterfaces": { "subinterface": { "lo0.0": { "index": "lo0.0", "state": { "description": "lo0.0", "ifindex": 16, "name": "lo0.0" } }, "lo0.16384": { "index": "lo0.16384", "state": { "ifindex": 21, "name": "lo0.16384" } }, "lo0.16385": { "index": "lo0.16385", "state": { "ifindex": 22, "name": "lo0.16385" } }, "lo0.32768": { "index": "lo0.32768", "state": { "ifindex": 248, "name": "lo0.32768" } } } } } } } }
def __get_node(self, word): node = self.root for c in word: try: node = node.children[c] except KeyError: return None return node
Private function retrieving a final node of trie for given word Returns node or None, if the trie doesn't contain the word.
def data_to_string(self, data_element): stream = NativeIO() self.data_to_stream(data_element, stream) return stream.getvalue()
Converts the given data element into a string representation. :param data_element: object implementing :class:`everest.representers.interfaces.IExplicitDataElement` :returns: string representation (using the MIME content type configured for this representer)
def get_es_ids(self): search = self.search.source(['uri']).sort(['uri']) es_ids = [item.meta.id for item in search.scan()] return es_ids
reads all the elasticssearch ids for an index
def add_to_capabilities(self, capabilities): proxy_caps = {} proxy_caps['proxyType'] = self.proxyType['string'] if self.autodetect: proxy_caps['autodetect'] = self.autodetect if self.ftpProxy: proxy_caps['ftpProxy'] = self.ftpProxy if self.httpProxy: proxy_caps['httpProxy'] = self.httpProxy if self.proxyAutoconfigUrl: proxy_caps['proxyAutoconfigUrl'] = self.proxyAutoconfigUrl if self.sslProxy: proxy_caps['sslProxy'] = self.sslProxy if self.noProxy: proxy_caps['noProxy'] = self.noProxy if self.socksProxy: proxy_caps['socksProxy'] = self.socksProxy if self.socksUsername: proxy_caps['socksUsername'] = self.socksUsername if self.socksPassword: proxy_caps['socksPassword'] = self.socksPassword capabilities['proxy'] = proxy_caps
Adds proxy information as capability in specified capabilities. :Args: - capabilities: The capabilities to which proxy will be added.
def to_json(self): if self._embedding: warnings.warn('Serialization of attached embedding ' 'to json is not supported. ' 'You may serialize the embedding to a binary format ' 'separately using vocab.embedding.serialize') vocab_dict = {} vocab_dict['idx_to_token'] = self._idx_to_token vocab_dict['token_to_idx'] = dict(self._token_to_idx) vocab_dict['reserved_tokens'] = self._reserved_tokens vocab_dict['unknown_token'] = self._unknown_token vocab_dict['padding_token'] = self._padding_token vocab_dict['bos_token'] = self._bos_token vocab_dict['eos_token'] = self._eos_token return json.dumps(vocab_dict)
Serialize Vocab object to json string. This method does not serialize the underlying embedding.
def set_signal_type(self, sig_type): if isinstance(sig_type, str): sig_type = [sig_type] self.snr_input.signal_type = sig_type return
Set the signal type of interest. Sets the signal type for which the SNR is calculated. This means inspiral, merger, and/or ringdown. Args: sig_type (str or list of str): Signal type desired by user. Choices are `ins`, `mrg`, `rd`, `all` for circular waveforms created with PhenomD. If eccentric waveforms are used, must be `all`.
def _parse(self, globals_dict): globals = {} if not isinstance(globals_dict, dict): raise InvalidGlobalsSectionException(self._KEYWORD, "It must be a non-empty dictionary".format(self._KEYWORD)) for section_name, properties in globals_dict.items(): resource_type = self._make_resource_type(section_name) if resource_type not in self.supported_properties: raise InvalidGlobalsSectionException(self._KEYWORD, "'{section}' is not supported. " "Must be one of the following values - {supported}" .format(section=section_name, supported=self.supported_resource_section_names)) if not isinstance(properties, dict): raise InvalidGlobalsSectionException(self._KEYWORD, "Value of ${section} must be a dictionary") for key, value in properties.items(): supported = self.supported_properties[resource_type] if key not in supported: raise InvalidGlobalsSectionException(self._KEYWORD, "'{key}' is not a supported property of '{section}'. " "Must be one of the following values - {supported}" .format(key=key, section=section_name, supported=supported)) globals[resource_type] = GlobalProperties(properties) return globals
Takes a SAM template as input and parses the Globals section :param globals_dict: Dictionary representation of the Globals section :return: Processed globals dictionary which can be used to quickly identify properties to merge :raises: InvalidResourceException if the input contains properties that we don't support
def open_submission(self, url=None): if url is None: data = self.get_selected_item() url = data['permalink'] if data.get('url_type') == 'selfpost': self.config.history.add(data['url_full']) self.selected_page = self.open_submission_page(url)
Select the current submission to view posts.
def del_calculation(job_id, confirmed=False): if logs.dbcmd('get_job', job_id) is None: print('There is no job %d' % job_id) return if confirmed or confirm( 'Are you sure you want to (abort and) delete this calculation and ' 'all associated outputs?\nThis action cannot be undone. (y/n): '): try: abort(job_id) resp = logs.dbcmd('del_calc', job_id, getpass.getuser()) except RuntimeError as err: safeprint(err) else: if 'success' in resp: print('Removed %d' % job_id) else: print(resp['error'])
Delete a calculation and all associated outputs.
def export_kappa_im(model, fname=None): from .kappa_util import im_json_to_graph kappa = _prepare_kappa(model) imap = kappa.analyses_influence_map() im = im_json_to_graph(imap) for param in model.parameters: try: im.remove_node(param.name) except: pass if fname: agraph = networkx.nx_agraph.to_agraph(im) agraph.draw(fname, prog='dot') return im
Return a networkx graph representing the model's Kappa influence map. Parameters ---------- model : pysb.core.Model A PySB model to be exported into a Kappa IM. fname : Optional[str] A file name, typically with .png or .pdf extension in which the IM is rendered using pygraphviz. Returns ------- networkx.MultiDiGraph A graph object representing the influence map.
def authenticate(self, api_key): self._api_key = api_key self._session.auth = ('', self._api_key) return self._verify_api_key()
Logs user into Heroku with given api_key.
def _find_devices_mac(self): self.keyboards.append(Keyboard(self)) self.mice.append(MightyMouse(self)) self.mice.append(Mouse(self))
Find devices on Mac.
def _next_record(self, next_line): record = self.loader.parse_record_stream(self.reader, next_line, self.known_format, self.no_record_parse, self.ensure_http_headers) self.member_info = None if not self.mixed_arc_warc: self.known_format = record.format return record
Use loader to parse the record from the reader stream Supporting warc and arc records
def set_monitor(module): def monitor(name, tensor, track_data=True, track_grad=True): module.monitored_vars[name] = { 'tensor':tensor, 'track_data':track_data, 'track_grad':track_grad, } module.monitor = monitor
Defines the monitor method on the module.
def formula_sections(self): if self.dtree is not None: return self.dtree.order else: return [s for s in self.manifest.sections() if s != "config"]
Return all sections related to a formula, re-ordered according to the "depends" section.
def combine_first(self, other): import pandas.core.computation.expressions as expressions def extract_values(arr): if isinstance(arr, (ABCIndexClass, ABCSeries)): arr = arr._values if needs_i8_conversion(arr): if is_extension_array_dtype(arr.dtype): arr = arr.asi8 else: arr = arr.view('i8') return arr def combiner(x, y): mask = isna(x) if isinstance(mask, (ABCIndexClass, ABCSeries)): mask = mask._values x_values = extract_values(x) y_values = extract_values(y) if y.name not in self.columns: return y_values return expressions.where(mask, y_values, x_values) return self.combine(other, combiner, overwrite=False)
Update null elements with value in the same location in `other`. Combine two DataFrame objects by filling null values in one DataFrame with non-null values from other DataFrame. The row and column indexes of the resulting DataFrame will be the union of the two. Parameters ---------- other : DataFrame Provided DataFrame to use to fill null values. Returns ------- DataFrame See Also -------- DataFrame.combine : Perform series-wise operation on two DataFrames using a given function. Examples -------- >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine_first(df2) A B 0 1.0 3.0 1 0.0 4.0 Null values still persist if the location of that null value does not exist in `other` >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]}) >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2]) >>> df1.combine_first(df2) A B C 0 NaN 4.0 NaN 1 0.0 3.0 1.0 2 NaN 3.0 1.0
def clear_bucket_props(self, bucket): bucket_type = self._get_bucket_type(bucket.bucket_type) url = self.bucket_properties_path(bucket.name, bucket_type=bucket_type) url = self.bucket_properties_path(bucket.name) headers = {'Content-Type': 'application/json'} status, _, _ = self._request('DELETE', url, headers, None) if status == 204: return True elif status == 405: return False else: raise RiakError('Error %s clearing bucket properties.' % status)
reset the properties on the bucket object given
def getDigitalMinimum(self, chn=None): if chn is not None: if 0 <= chn < self.signals_in_file: return self.digital_min(chn) else: return 0 else: digMin = np.zeros(self.signals_in_file) for i in np.arange(self.signals_in_file): digMin[i] = self.digital_min(i) return digMin
Returns the minimum digital value of signal edfsignal. Parameters ---------- chn : int channel number Examples -------- >>> import pyedflib >>> f = pyedflib.data.test_generator() >>> f.getDigitalMinimum(0) -32768 >>> f._close() >>> del f
def reduce(self, dimensions=[], function=None, spreadfn=None, **kwargs): kwargs['_method_args'] = (dimensions, function, spreadfn) return self.__call__('reduce', **kwargs)
Applies a reduce function to all ViewableElement objects. See :py:meth:`Dimensioned.opts` and :py:meth:`Apply.__call__` for more information.
def get_tree_depth( self ): if (self.children): depth = 1 childDepths = [] for child in self.children: childDepths.append( child.get_tree_depth() ) return depth + max(childDepths) else: return 0
Finds depth of this tree.
def _enter_single_subdir(root_dir): current_cwd = os.getcwd() try: dest_dir = root_dir dir_list = os.listdir(root_dir) if len(dir_list) == 1: first = os.path.join(root_dir, dir_list[0]) if os.path.isdir(first): dest_dir = first else: dest_dir = root_dir os.chdir(dest_dir) yield dest_dir finally: os.chdir(current_cwd)
if the given directory has just a single subdir, enter that
def get_instance(self, payload): return FieldInstance( self._version, payload, assistant_sid=self._solution['assistant_sid'], task_sid=self._solution['task_sid'], )
Build an instance of FieldInstance :param dict payload: Payload response from the API :returns: twilio.rest.autopilot.v1.assistant.task.field.FieldInstance :rtype: twilio.rest.autopilot.v1.assistant.task.field.FieldInstance
def fetch(self): params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return PublishedTrackInstance( self._version, payload, room_sid=self._solution['room_sid'], participant_sid=self._solution['participant_sid'], sid=self._solution['sid'], )
Fetch a PublishedTrackInstance :returns: Fetched PublishedTrackInstance :rtype: twilio.rest.video.v1.room.room_participant.room_participant_published_track.PublishedTrackInstance
def wait_for_vacancy(self, processor_type): with self._condition: self._condition.wait_for(lambda: ( self._processor_available(processor_type) or self._cancelled_event.is_set())) if self._cancelled_event.is_set(): raise WaitCancelledException() processor = self[processor_type].next_processor() return processor
Waits for a particular processor type to have the capacity to handle additional transactions or until is_cancelled is True. Args: processor_type (ProcessorType): The family, and version of the transaction processor. Returns: Processor
def read_metadata(self, symbol): sym = self._get_symbol_info(symbol) if not sym: raise NoDataFoundException("Symbol does not exist.") x = self._symbols.find_one({SYMBOL: symbol}) return x[USERMETA] if USERMETA in x else None
Reads user defined metadata out for the given symbol Parameters ---------- symbol: str symbol for the given item in the DB Returns ------- ?
def get_kde_home_dir (): if os.environ.get("KDEHOME"): kde_home = os.path.abspath(os.environ["KDEHOME"]) else: home = os.environ.get("HOME") if not home: return kde3_home = os.path.join(home, ".kde") kde4_home = os.path.join(home, ".kde4") if fileutil.find_executable("kde4-config"): kde3_file = kde_home_to_config(kde3_home) kde4_file = kde_home_to_config(kde4_home) if os.path.exists(kde4_file) and os.path.exists(kde3_file): if fileutil.get_mtime(kde4_file) >= fileutil.get_mtime(kde3_file): kde_home = kde4_home else: kde_home = kde3_home else: kde_home = kde4_home else: kde_home = kde3_home return kde_home if os.path.exists(kde_home) else None
Return KDE home directory or None if not found.
def table_to_source_list(table, src_type=OutputSource): source_list = [] if table is None: return source_list for row in table: src = src_type() for param in src_type.names: if param in table.colnames: val = row[param] if isinstance(val, np.float32): val = np.float64(val) setattr(src, param, val) source_list.append(src) return source_list
Convert a table of data into a list of sources. A single table must have consistent source types given by src_type. src_type should be one of :class:`AegeanTools.models.OutputSource`, :class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`. Parameters ---------- table : Table Table of sources src_type : class Sources must be of type :class:`AegeanTools.models.OutputSource`, :class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`. Returns ------- sources : list A list of objects of the given type.
def guest_capture(self, userid, image_name, capture_type='rootonly', compress_level=6): action = ("capture guest '%(vm)s' to generate image '%(img)s'" % {'vm': userid, 'img': image_name}) with zvmutils.log_and_reraise_sdkbase_error(action): self._vmops.guest_capture(userid, image_name, capture_type=capture_type, compress_level=compress_level)
Capture the guest to generate a image :param userid: (str) the user id of the vm :param image_name: (str) the unique image name after capture :param capture_type: (str) the type of capture, the value can be: rootonly: indicate just root device will be captured alldisks: indicate all the devices of the userid will be captured :param compress_level: the compression level of the image, default is 6
def add_update_callback(self, callback, device): self._update_callbacks.append([callback, device]) _LOGGER.debug('Added update callback to %s on %s', callback, device)
Register as callback for when a matching device changes.
def save(callLog, logFilename): with open(logFilename, "wb") as outp: cPickle.dump(callLog, outp)
Save the call log history into this file. @param logFilename (path) Filename in which to save a pickled version of the call logs.
def mapzen_elevation_rgb(arr): arr = np.clip(arr + 32768.0, 0.0, 65535.0) r = arr / 256 g = arr % 256 b = (arr * 256) % 256 return np.stack([r, g, b]).astype(np.uint8)
Encode elevation value to RGB values compatible with Mapzen tangram. Attributes ---------- arr : numpy ndarray Image array to encode. Returns ------- out : numpy ndarray RGB array (3, h, w)
def track_end(self): self.__tracking = False changes = self.__changes self.__changes = {} return changes
Ends tracking of attributes changes. Returns the changes that occurred to the attributes. Only the final state of each attribute is obtained
def list_properties(self, list_all=False): if list_all: props = [] for k,v in self.env.property_rules.rdl_properties.items(): if type(self.inst) in v.bindable_to: props.append(k) for k,v in self.env.property_rules.user_properties.items(): if type(self.inst) in v.bindable_to: props.append(k) return props else: return list(self.inst.properties.keys())
Lists properties associated with this node. By default, only lists properties that were explicitly set. If ``list_all`` is set to ``True`` then lists all valid properties of this component type Parameters ---------- list_all: bool If true, lists all valid properties of this component type.
def create_socket(): sock = socket.socket(PF_CAN, socket.SOCK_RAW, CAN_RAW) log.info('Created a socket') return sock
Creates a raw CAN socket. The socket will be returned unbound to any interface.
def make(parser): mds_parser = parser.add_subparsers(dest='subcommand') mds_parser.required = True mds_create = mds_parser.add_parser( 'create', help='Deploy Ceph MDS on remote host(s)' ) mds_create.add_argument( 'mds', metavar='HOST[:NAME]', nargs='+', type=colon_separated, help='host (and optionally the daemon name) to deploy on', ) parser.set_defaults( func=mds, )
Ceph MDS daemon management