code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def share_project(project_id, usernames, read_only, share,**kwargs): user_id = kwargs.get('user_id') proj_i = _get_project(project_id) proj_i.check_share_permission(int(user_id)) user_id = int(user_id) for owner in proj_i.owners: if user_id == owner.user_id: break else: raise HydraError("Permission Denied. Cannot share project.") if read_only == 'Y': write = 'N' share = 'N' else: write = 'Y' if proj_i.created_by != user_id and share == 'Y': raise HydraError("Cannot share the 'sharing' ability as user %s is not" " the owner of project %s"% (user_id, project_id)) for username in usernames: user_i = _get_user(username) proj_i.set_owner(user_i.id, write=write, share=share) for net_i in proj_i.networks: net_i.set_owner(user_i.id, write=write, share=share) db.DBSession.flush()
Share an entire project with a list of users, identifed by their usernames. The read_only flag ('Y' or 'N') must be set to 'Y' to allow write access or sharing. The share flat ('Y' or 'N') must be set to 'Y' to allow the project to be shared with other users
def drawing_update(self): from MAVProxy.modules.mavproxy_map import mp_slipmap if self.draw_callback is None: return self.draw_line.append(self.click_position) if len(self.draw_line) > 1: self.mpstate.map.add_object(mp_slipmap.SlipPolygon('drawing', self.draw_line, layer='Drawing', linewidth=2, colour=(128,128,255)))
update line drawing
def add_concept_filter(self, concept, concept_name=None): if concept in self.query_params.keys(): if not concept_name: concept_name = concept if isinstance(self.query_params[concept], list): if self.es_version == '1': es_filter = {'or': []} for or_filter in self.query_params[concept]: es_filter['or'].append(self._build_concept_term(concept_name, or_filter)) else: es_filter = {"bool": {"should": []}} for or_filter in self.query_params[concept]: es_filter["bool"]["should"].append(self._build_concept_term(concept_name, or_filter)) else: es_filter = self._build_concept_term(concept_name, self.query_params[concept]) self.filters.append(es_filter)
Add a concept filter :param concept: concept which will be used as lowercase string in a search term :param concept_name: name of the place where there will be searched for
def exists(self, client=None): client = self._require_client(client) query_params = self._query_params query_params["fields"] = "name" try: client._connection.api_request( method="GET", path=self.path, query_params=query_params, _target_object=None, ) return True except NotFound: return False
Determines whether or not this blob exists. If :attr:`user_project` is set on the bucket, bills the API request to that project. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the blob's bucket. :rtype: bool :returns: True if the blob exists in Cloud Storage.
def pylog(self, *args, **kwargs): printerr(self.name, args, kwargs, traceback.format_exc())
Display all available logging information.
def export_to_dir(network, export_dir): package_path = ding0.__path__[0] network.export_to_csv_folder(os.path.join(package_path, 'output', 'debug', 'grid', export_dir))
Exports PyPSA network as CSV files to directory Args: network: pypsa.Network export_dir: str Sub-directory in output/debug/grid/ where csv Files of PyPSA network are exported to.
def add_after(self, pipeline): if not isinstance(pipeline, Pipeline): pipeline = Pipeline(pipeline) self.pipes = self.pipes[:] + pipeline.pipes[:] return self
Add a Pipeline to be applied after this processing pipeline. Arguments: pipeline: The Pipeline or callable to apply after this Pipeline.
def call_async(func): @wraps(func) def wrapper(self, *args, **kw): def call(): try: func(self, *args, **kw) except Exception: logger.exception( "failed to call async [%r] with [%r] [%r]", func, args, kw ) self.loop.call_soon_threadsafe(call) return wrapper
Decorates a function to be called async on the loop thread
def decode_cursor(self, request): encoded = request.query_params.get(self.cursor_query_param) if encoded is None: return None try: querystring = b64decode(encoded.encode('ascii')).decode('ascii') tokens = urlparse.parse_qs(querystring, keep_blank_values=True) offset = tokens.get('o', ['0'])[0] offset = _positive_int(offset, cutoff=self.offset_cutoff) reverse = tokens.get('r', ['0'])[0] reverse = bool(int(reverse)) position = tokens.get('p', None) except (TypeError, ValueError): raise NotFound(self.invalid_cursor_message) return Cursor(offset=offset, reverse=reverse, position=position)
Given a request with a cursor, return a `Cursor` instance. Differs from the standard CursorPagination to handle a tuple in the position field.
def is_valid(self): return len(self.ref) == 1 and \ len(self.alt) == 1 and \ len(self.alt[0]) == 1
Only retain SNPs or single indels, and are bi-allelic
def display_animation(anim, **kwargs): from IPython.display import HTML return HTML(anim_to_html(anim, **kwargs))
Display the animation with an IPython HTML object
def count(y_true, y_score=None, countna=False): if not countna: return (~np.isnan(to_float(y_true))).sum() else: return len(y_true)
Counts the number of examples. If countna is False then only count labeled examples, i.e. those with y_true not NaN
def _process_path_prefix(path_prefix): _validate_path(path_prefix) if not _GCS_PATH_PREFIX_REGEX.match(path_prefix): raise ValueError('Path prefix should have format /bucket, /bucket/, ' 'or /bucket/prefix but got %s.' % path_prefix) bucket_name_end = path_prefix.find('/', 1) bucket = path_prefix prefix = None if bucket_name_end != -1: bucket = path_prefix[:bucket_name_end] prefix = path_prefix[bucket_name_end + 1:] or None return bucket, prefix
Validate and process a Google Cloud Stoarge path prefix. Args: path_prefix: a Google Cloud Storage path prefix of format '/bucket/prefix' or '/bucket/' or '/bucket'. Raises: ValueError: if path is invalid. Returns: a tuple of /bucket and prefix. prefix can be None.
def main(argv: Optional[Sequence[str]] = None) -> None: args = parse_arguments(argv=argv) if args.logging: logging.basicConfig(level=logging.DEBUG) handle_skip() action = args.action request = parse_request() LOGGER.debug('Received action %s with request:\n%s', action, request) try: mapping = parse_mapping(args.mapping) except Exception as error: LOGGER.critical('Unable to parse mapping file', exc_info=True) print( 'Unable to parse mapping file: {error}'.format( error=error), file=sys.stderr) sys.exit(1) if action == 'get': get_password(request, mapping) else: LOGGER.info('Action %s is currently not supported', action) sys.exit(1)
Start the pass-git-helper script. Args: argv: If not ``None``, use the provided command line arguments for parsing. Otherwise, extract them automatically.
def parse_record(header, record): major_version = header[1] try: return RECORD_PARSER[major_version](header, record) except (KeyError, struct.error) as error: raise RuntimeError("Corrupted USN Record") from error
Parses a record according to its version.
def get_perm_model(): try: return django_apps.get_model(settings.PERM_MODEL, require_ready=False) except ValueError: raise ImproperlyConfigured("PERM_MODEL must be of the form 'app_label.model_name'") except LookupError: raise ImproperlyConfigured( "PERM_MODEL refers to model '{}' that has not been installed".format(settings.PERM_MODEL) )
Returns the Perm model that is active in this project.
def group_experiments_greedy(tomo_expt: TomographyExperiment): diag_sets = _max_tpb_overlap(tomo_expt) grouped_expt_settings_list = list(diag_sets.values()) grouped_tomo_expt = TomographyExperiment(grouped_expt_settings_list, program=tomo_expt.program) return grouped_tomo_expt
Greedy method to group ExperimentSettings in a given TomographyExperiment :param tomo_expt: TomographyExperiment to group ExperimentSettings within :return: TomographyExperiment, with grouped ExperimentSettings according to whether it consists of PauliTerms diagonal in the same tensor product basis
def _get_hosts_from_ports(self, ports): hosts = map(lambda x: 'localhost:%d' % int(x.strip()), ports.split(',')) return list(set(hosts))
validate hostnames from a list of ports
def create_sequence_rule(self, sequence_rule_form): collection = JSONClientValidated('assessment_authoring', collection='SequenceRule', runtime=self._runtime) if not isinstance(sequence_rule_form, ABCSequenceRuleForm): raise errors.InvalidArgument('argument type is not an SequenceRuleForm') if sequence_rule_form.is_for_update(): raise errors.InvalidArgument('the SequenceRuleForm is for update only, not create') try: if self._forms[sequence_rule_form.get_id().get_identifier()] == CREATED: raise errors.IllegalState('sequence_rule_form already used in a create transaction') except KeyError: raise errors.Unsupported('sequence_rule_form did not originate from this session') if not sequence_rule_form.is_valid(): raise errors.InvalidArgument('one or more of the form elements is invalid') insert_result = collection.insert_one(sequence_rule_form._my_map) self._forms[sequence_rule_form.get_id().get_identifier()] = CREATED result = objects.SequenceRule( osid_object_map=collection.find_one({'_id': insert_result.inserted_id}), runtime=self._runtime, proxy=self._proxy) return result
Creates a new ``SequenceRule``. arg: sequence_rule_form (osid.assessment.authoring.SequenceRuleForm): the form for this ``SequenceRule`` return: (osid.assessment.authoring.SequenceRule) - the new ``SequenceRule`` raise: IllegalState - ``sequence_rule_form`` already used in a create transaction raise: InvalidArgument - one or more of the form elements is invalid raise: NullArgument - ``sequence_rule_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``sequence_rule_form`` did not originate from ``get_sequence_rule_form_for_create()`` *compliance: mandatory -- This method must be implemented.*
def fetch_viewers(self, game): r = self.kraken_request('GET', 'streams/summary', params={'game': game.name}).json() game.viewers = r['viewers'] game.channels = r['channels'] return game
Query the viewers and channels of the given game and set them on the object :returns: the given game :rtype: :class:`models.Game` :raises: None
def find_substring_edge(self, substring, suffix_tree_id): suffix_tree = self.suffix_tree_repo[suffix_tree_id] started = datetime.datetime.now() edge, ln = find_substring_edge(substring=substring, suffix_tree=suffix_tree, edge_repo=self.edge_repo) print(" - searched for edge in {} for substring: '{}'".format(datetime.datetime.now() - started, substring)) return edge, ln
Returns an edge that matches the given substring.
def tau_from_final_mass_spin(final_mass, final_spin, l=2, m=2, nmodes=1): return get_lm_f0tau(final_mass, final_spin, l, m, nmodes)[1]
Returns QNM damping time for the given mass and spin and mode. Parameters ---------- final_mass : float or array Mass of the black hole (in solar masses). final_spin : float or array Dimensionless spin of the final black hole. l : int or array, optional l-index of the harmonic. Default is 2. m : int or array, optional m-index of the harmonic. Default is 2. nmodes : int, optional The number of overtones to generate. Default is 1. Returns ------- float or array The damping time of the QNM(s), in seconds. If only a single mode is requested (and mass, spin, l, and m are not arrays), this will be a float. If multiple modes requested, will be an array with shape ``[input shape x] nmodes``, where ``input shape`` is the broadcasted shape of the inputs.
def add_repo(self, repo, team): for t in self.iter_teams(): if team == t.name: return t.add_repo(repo) return False
Add ``repo`` to ``team``. .. note:: This method is of complexity O(n). This iterates over all teams in your organization and only adds the repo when the team name matches the team parameter above. If you want constant time, you should retrieve the team and call ``add_repo`` on that team directly. :param str repo: (required), form: 'user/repo' :param str team: (required), team name
def dependency_status(data): parents_statuses = set( DataDependency.objects.filter( child=data, kind=DataDependency.KIND_IO ).distinct('parent__status').values_list('parent__status', flat=True) ) if not parents_statuses: return Data.STATUS_DONE if None in parents_statuses: return Data.STATUS_ERROR if Data.STATUS_ERROR in parents_statuses: return Data.STATUS_ERROR if len(parents_statuses) == 1 and Data.STATUS_DONE in parents_statuses: return Data.STATUS_DONE return None
Return abstracted status of dependencies. - ``STATUS_ERROR`` .. one dependency has error status or was deleted - ``STATUS_DONE`` .. all dependencies have done status - ``None`` .. other
def upload_to_s3(self, key, filename): extra_args = {'ACL': self.acl} guess = mimetypes.guess_type(filename) content_type = guess[0] encoding = guess[1] if content_type: extra_args['ContentType'] = content_type if (self.gzip and content_type in self.gzip_content_types) or encoding == 'gzip': extra_args['ContentEncoding'] = 'gzip' if content_type in self.cache_control: extra_args['CacheControl'] = ''.join(( 'max-age=', str(self.cache_control[content_type]) )) if not self.dry_run: logger.debug("Uploading %s" % filename) if self.verbosity > 0: self.stdout.write("Uploading %s" % filename) s3_obj = self.s3_resource.Object(self.aws_bucket_name, key) s3_obj.upload_file(filename, ExtraArgs=extra_args) self.uploaded_files += 1 self.uploaded_file_list.append(filename)
Set the content type and gzip headers if applicable and upload the item to S3
def untar(fname, verbose=True): if fname.lower().endswith(".tar.gz"): dirpath = os.path.join(BIGDATA_PATH, os.path.basename(fname)[:-7]) if os.path.isdir(dirpath): return dirpath with tarfile.open(fname) as tf: members = tf.getmembers() for member in tqdm(members, total=len(members)): tf.extract(member, path=BIGDATA_PATH) dirpath = os.path.join(BIGDATA_PATH, members[0].name) if os.path.isdir(dirpath): return dirpath else: logger.warning("Not a tar.gz file: {}".format(fname))
Uunzip and untar a tar.gz file into a subdir of the BIGDATA_PATH directory
def add_vtt_file(self, vtt_file, language_type=None): if not isinstance(vtt_file, DataInputStream): raise InvalidArgument('vtt_file') locale = DEFAULT_LANGUAGE_TYPE.identifier if language_type is not None: locale = language_type.identifier self.my_osid_object_form.add_file(vtt_file, locale, asset_name="VTT File Container", asset_description="Used by an asset content to manage multi-language VTT files")
Adds a vtt file tagged as the given language. arg: vtt_file (displayText): the new vtt_file raise: InvalidArgument - ``vtt_file`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``media_description`` is ``null`` *compliance: mandatory -- This method must be implemented.*
def conditional_jit(function=None, **kwargs): def wrapper(function): try: numba = importlib.import_module("numba") return numba.jit(**kwargs)(function) except ImportError: return function if function: return wrapper(function) else: return wrapper
Use numba's jit decorator if numba is installed. Notes ----- If called without arguments then return wrapped function. @conditional_jit def my_func(): return else called with arguments @conditional_jit(nopython=True) def my_func(): return
def set_error_output_file(filename): filename = os.path.abspath(os.path.expanduser(filename)) fileOutputWindow = vtk.vtkFileOutputWindow() fileOutputWindow.SetFileName(filename) outputWindow = vtk.vtkOutputWindow() outputWindow.SetInstance(fileOutputWindow) return fileOutputWindow, outputWindow
Sets a file to write out the VTK errors
def for_display(self): skip = "" if self.skip: skip = " [SKIP]" result = "{step_num}: {path}{skip}".format( step_num=self.step_num, path=self.path, skip=skip ) description = self.task_config.get("description") if description: result += ": {}".format(description) return result
Step details formatted for logging output.
def assets(self, asset_type=None): if not self.can_update(): self._tcex.handle_error(910, [self.type]) if not asset_type: return self.tc_requests.adversary_assets( self.api_type, self.api_sub_type, self.unique_id ) if asset_type == 'PHONE': return self.tc_requests.adversary_phone_assets( self.api_type, self.api_sub_type, self.unique_id ) if asset_type == 'HANDLER': return self.tc_requests.adversary_handle_assets( self.api_type, self.api_sub_type, self.unique_id ) if asset_type == 'URL': return self.tc_requests.adversary_url_assets( self.api_type, self.api_sub_type, self.unique_id ) self._tcex.handle_error( 925, ['asset_type', 'assets', 'asset_type', 'asset_type', asset_type] ) return None
Retrieves all of the assets of a given asset_type Args: asset_type: (str) Either None, PHONE, HANDLER, or URL Returns:
def utime(self, tarinfo, targetpath): if not hasattr(os, 'utime'): return try: os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime)) except EnvironmentError as e: raise ExtractError("could not change modification time")
Set modification time of targetpath according to tarinfo.
def get_from_category_qs(cls, category): ids = cls.get_ties_for_categories_qs(category).values_list('object_id').distinct() filter_kwargs = {'id__in': [i[0] for i in ids]} return cls.objects.filter(**filter_kwargs)
Returns a QuerySet of objects of this type associated with the given category. :param Category category: :rtype: list :return:
def drawImage(self, image): padding = image.width() % 4 for i in range(0, image.height()): tmp = image.copy(0, i, image.width() + padding, 1) ptr = tmp.bits() ptr.setsize(tmp.byteCount()) self._controller.sendUpdate(self._dx, i + self._dy, image.width() + self._dx - 1, i + self._dy, tmp.width(), tmp.height(), self._colorDepth, False, ptr.asstring())
Render of widget
def check_dependencies_remote(args): cmd = [args.python, '-m', 'depends', args.requirement] env = dict(PYTHONPATH=os.path.dirname(__file__)) return subprocess.check_call(cmd, env=env)
Invoke this command on a remote Python.
def coverage(reportdir=None, extra=None): import coverage as coverage_api cov = coverage_api.coverage() opts = {'directory': reportdir} if reportdir else {} cov.start() test(extra) cov.stop() cov.html_report(**opts)
Test this project with coverage reports
def encode_data_items(self, *args): str_list = [] for arg in args: if isinstance(arg, str): arg_str = arg elif isinstance(arg, int): arg_str = self.INTEGER_PREFIX + self.encode_int(arg) else: arg_str = str(arg) str_list.append(arg_str) concatenated_str = self.SEPARATOR.join(str_list) return concatenated_str
Encodes a list of integers and strings into a concatenated string. - encode string items as-is. - encode integer items as base-64 with a ``'~'`` prefix. - concatenate encoded items with a ``'|'`` separator. Example: ``encode_data_items('abc', 123, 'xyz')`` returns ``'abc|~B7|xyz'``
def _is_defaultable(i, entry, table, check_for_aliases=True): if (len(entry.sources) == 1 and len(entry.route) == 1 and None not in entry.sources): source = next(iter(entry.sources)) sink = next(iter(entry.route)) if source.is_link and sink.is_link: if source.opposite is sink: key, mask = entry.key, entry.mask if not check_for_aliases or \ not any(intersect(key, mask, d.key, d.mask) for d in table[i+1:]): return True return False
Determine if an entry may be removed from a routing table and be replaced by a default route. Parameters ---------- i : int Position of the entry in the table entry : RoutingTableEntry The entry itself table : [RoutingTableEntry, ...] The table containing the entry. check_for_aliases : bool If True, the table is checked for aliased entries before suggesting a route may be default routed.
def export(self): fields = ['id', 'host', 'port', 'user'] out = {} for field in fields: out[field] = getattr(self, field, None) out['mountOptions'] = self.mount_opts out['mountPoint'] = self.mount_point out['beforeMount'] = self.cmd_before_mount out['authType'] = self.auth_method out['sshKey'] = self.ssh_key return json.dumps(out, indent=4)
Serializes to JSON.
def string_array_to_list(a): result = [] length = javabridge.get_env().get_array_length(a) wrapped = javabridge.get_env().get_object_array_elements(a) for i in range(length): result.append(javabridge.get_env().get_string(wrapped[i])) return result
Turns the Java string array into Python unicode string list. :param a: the string array to convert :type a: JB_Object :return: the string list :rtype: list
def list_all_native_quantities(self, with_info=False): q = self._native_quantities return {k: self.get_quantity_info(k) for k in q} if with_info else list(q)
Return a list of all available native quantities in this catalog. If *with_info* is `True`, return a dict with quantity info. See also: list_all_quantities
def commit_buildroot(self): logger.info("committing buildroot") self.ensure_is_built() commit_message = "docker build of '%s' (%s)" % (self.image, self.uri) self.buildroot_image_name = ImageName( repo="buildroot-%s" % self.image, tag=datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')) self.buildroot_image_id = self.dt.commit_container(self.build_container_id, commit_message) return self.buildroot_image_id
create image from buildroot :return:
def set_posts_param_modified_after(self, params, post_type, status): if not self.purge_first and not self.full and not self.modified_after: if status == "any": latest = Post.objects.filter(post_type=post_type).order_by("-modified").first() else: latest = Post.objects.filter(post_type=post_type, status=status).order_by("-modified").first() if latest: self.modified_after = latest.modified if self.modified_after: params["modified_after"] = self.modified_after.isoformat() logger.info("getting posts after: %s", params["modified_after"])
Set modified_after date to "continue where we left off" if appropriate :param params: the GET params dict, which may be updated to include the "modified_after" key :param post_type: post, page, attachment, or any custom post type set up in the WP API :param status: publish, private, draft, etc. :return: None
def get_output_score_metadata(self): metadata = dict(self._mdata['output_score']) metadata.update({'existing_decimal_values': self._my_map['outputScore']}) return Metadata(**metadata)
Gets the metadata for the output score start range. return: (osid.Metadata) - metadata for the output score start range *compliance: mandatory -- This method must be implemented.*
def integers(self, start: int = 0, end: int = 10, length: int = 10) -> List[int]: return self.random.randints( length, start, end)
Generate a list of random integers. Integers can be negative or positive numbers. .. note: You can use both positive and negative numbers. :param start: Start. :param end: End. :param length: Length of list. :return: List of integers. :Example: [-20, -19, -18, -17]
def create(gandi, datacenter, bandwidth, ip_version, vlan, ip, attach, background): if ip_version != 4 and vlan: gandi.echo('You must have an --ip-version to 4 when having a vlan.') return if ip and not vlan: gandi.echo('You must have a --vlan when giving an --ip.') return vm_ = gandi.iaas.info(attach) if attach else None if datacenter and vm_: dc_id = gandi.datacenter.usable_id(datacenter) if dc_id != vm_['datacenter_id']: gandi.echo('The datacenter you provided does not match the ' 'datacenter of the vm you want to attach to.') return if not datacenter: datacenter = vm_['datacenter_id'] if vm_ else 'LU' try: gandi.datacenter.is_opened(datacenter, 'iaas') except DatacenterLimited as exc: gandi.echo('/!\ Datacenter %s will be closed on %s, ' 'please consider using another datacenter.' % (datacenter, exc.date)) return gandi.ip.create(ip_version, datacenter, bandwidth, attach, vlan, ip, background)
Create a public or private ip
def load_usps(): dataset_path = _load('usps') df = _load_csv(dataset_path, 'data') X = _load_images(os.path.join(dataset_path, 'images'), df.image) y = df.label.values return Dataset(load_usps.__doc__, X, y, accuracy_score, stratify=True)
USPs Digits Dataset. The data of this dataset is a 3d numpy array vector with shape (224, 224, 3) containing 9298 224x224 RGB photos of handwritten digits, and the target is a 1d numpy integer array containing the label of the digit represented in the image.
def create_filter_predicate(self): assert self.query_content_id is not None, \ 'must call SearchEngine.set_query_id first' filter_names = self.query_params.getlist('filter') if len(filter_names) == 0 and 'already_labeled' in self._filters: filter_names = ['already_labeled'] init_filters = [(n, self._filters[n]) for n in filter_names] preds = [lambda _: True] for name, p in init_filters: preds.append(p.set_query_id(self.query_content_id) .set_query_params(self.query_params) .create_predicate()) return lambda (cid, fc): fc is not None and all(p((cid, fc)) for p in preds)
Creates a filter predicate. The list of available filters is given by calls to ``add_filter``, and the list of filters to use is given by parameters in ``params``. In this default implementation, multiple filters can be specified with the ``filter`` parameter. Each filter is initialized with the same set of query parameters given to the search engine. The returned function accepts a ``(content_id, FC)`` and returns ``True`` if and only if every selected predicate returns ``True`` on the same input.
def cx_to_networkx(cx): graph = networkx.MultiDiGraph() for node_entry in get_aspect(cx, 'nodes'): id = node_entry['@id'] attrs = get_attributes(get_aspect(cx, 'nodeAttributes'), id) attrs['n'] = node_entry['n'] graph.add_node(id, **attrs) for edge_entry in get_aspect(cx, 'edges'): id = edge_entry['@id'] attrs = get_attributes(get_aspect(cx, 'edgeAttributes'), id) attrs['i'] = edge_entry['i'] graph.add_edge(edge_entry['s'], edge_entry['t'], key=id, **attrs) return graph
Return a MultiDiGraph representation of a CX network.
def prep_pdf(qc_dir, config): html_file = os.path.join(qc_dir, "fastqc", "fastqc_report.html") html_fixed = "%s-fixed%s" % os.path.splitext(html_file) try: topdf = config_utils.get_program("wkhtmltopdf", config) except config_utils.CmdNotFound: topdf = None if topdf and utils.file_exists(html_file): out_file = "%s.pdf" % os.path.splitext(html_file)[0] if not utils.file_exists(out_file): cmd = ("sed 's/div.summary/div.summary-no/' %s | sed 's/div.main/div.main-no/' > %s" % (html_file, html_fixed)) do.run(cmd, "Fix fastqc CSS to be compatible with wkhtmltopdf") cmd = [topdf, html_fixed, out_file] do.run(cmd, "Convert QC HTML to PDF") return out_file
Create PDF from HTML summary outputs in QC directory. Requires wkhtmltopdf installed: http://www.msweet.org/projects.php?Z1 Thanks to: https://www.biostars.org/p/16991/ Works around issues with CSS conversion on CentOS by adjusting CSS.
def unpack(iterable, count, fill=None): iterable = list(enumerate(iterable)) cnt = count if count <= len(iterable) else len(iterable) results = [iterable[i][1] for i in range(cnt)] results = merge(results, [fill for i in range(count-cnt)]) return tuple(results)
The iter data unpack function. Example 1: In[1]: source = 'abc' In[2]: a, b = safe_unpack(source, 2) In[3]: print(a, b) a b Example 2: In[1]: source = 'abc' In[2]: a, b, c, d = safe_unpack(source, 4) In[3]: print(a, b, c, d) a b None None
def get_productivity_stats(self, api_token, **kwargs): params = { 'token': api_token } return self._get('get_productivity_stats', params, **kwargs)
Return a user's productivity stats. :param api_token: The user's login api_token. :type api_token: str :return: The HTTP response to the request. :rtype: :class:`requests.Response`
def aggregate_registry_timers(): import itertools timers = sorted(shared_registry.values(), key=lambda t: t.desc) aggregate_timers = [] for k, g in itertools.groupby(timers, key=lambda t: t.desc): group = list(g) num_calls = len(group) total_elapsed_ms = sum(t.elapsed_time_ms for t in group) first_start_time = min(t.start_time for t in group) aggregate_timers.append( (first_start_time, (k, total_elapsed_ms, num_calls))) aggregate_timers.sort() return zip(*aggregate_timers)[1]
Returns a list of aggregate timing information for registered timers. Each element is a 3-tuple of - timer description - aggregate elapsed time - number of calls The list is sorted by the first start time of each aggregate timer.
def copy_foreign_keys(self, event): event_keys = set(event._meta.fields.keys()) obj_keys = self._meta.fields.keys() matching_keys = event_keys.intersection(obj_keys) for key in matching_keys: if key == 'created_by': continue if not isinstance(self._meta.fields[key], peewee.ForeignKeyField): continue setattr(event, key, getattr(self, key)) possible_key = self.__class__.__name__.lower() if possible_key in event_keys and event.code != 'AUDIT_DELETE': setattr(event, possible_key, self)
Copies possible foreign key values from the object into the Event, skipping common keys like modified and created. Args: event (Event): The Event instance to copy the FKs into obj (fleaker.db.Model): The object to pull the values from
def purge(opts): old = False try: environment = Environment.load(opts['ENVIRONMENT'], opts['--site']) except DatacatsError: environment = Environment.load(opts['ENVIRONMENT'], opts['--site'], data_only=True) if get_format_version(environment.datadir) == 1: old = True environment = Environment.load(opts['ENVIRONMENT'], opts['--site'], allow_old=True) if not opts['--delete-environment'] and not old: environment.require_valid_site() sites = [opts['--site']] if not opts['--delete-environment'] else environment.sites if not opts['--yes']: y_or_n_prompt('datacats purge will delete all stored data') environment.stop_ckan() environment.stop_supporting_containers() environment.purge_data(sites) if opts['--delete-environment']: if environment.target: rmtree(environment.target) else: DatacatsError(("Unable to find the environment source" " directory so that it can be deleted.\n" "Chances are it's because it already does not exist"))
Purge environment database and uploaded files Usage: datacats purge [-s NAME | --delete-environment] [-y] [ENVIRONMENT] Options: --delete-environment Delete environment directory as well as its data, as well as the data for **all** sites. -s --site=NAME Specify a site to be purge [default: primary] -y --yes Respond yes to all prompts (i.e. force) ENVIRONMENT may be an environment name or a path to an environment directory. Default: '.'
def pixels_to_tiles(self, coords, clamp=True): tile_coords = Vector2() tile_coords.X = int(coords[0]) / self.spritesheet[0].width tile_coords.Y = int(coords[1]) / self.spritesheet[0].height if clamp: tile_coords.X, tile_coords.Y = self.clamp_within_range(tile_coords.X, tile_coords.Y) return tile_coords
Convert pixel coordinates into tile coordinates. clamp determines if we should clamp the tiles to ones only on the tilemap.
def has_uncacheable_headers(self, response): cc_dict = get_header_dict(response, 'Cache-Control') if cc_dict: if 'max-age' in cc_dict and cc_dict['max-age'] == '0': return True if 'no-cache' in cc_dict: return True if 'private' in cc_dict: return True if response.has_header('Expires'): if parse_http_date(response['Expires']) < time.time(): return True return False
Should this response be cached based on it's headers broken out from should_cache for flexibility
def send_msg_to_webhook(self, message): payload = { 'content':message } header = { 'Content-Type':'application/json' } try: request = requests.post( self.api_url, headers=header, json=payload ) request.raise_for_status() except Exception as error_msg: warning_msg = ( 'EXCEPTION: UNABLE TO COMMIT LOG MESSAGE' + '\n\texception={0}'.format(repr(error_msg)) + '\n\tmessage={0}'.format(message) ) warnings.warn( warning_msg, exceptions.WebhookFailedEmitWarning )
separated Requests logic for easier testing Args: message (str): actual logging string to be passed to REST endpoint Todo: * Requests.text/json return for better testing options
def remover(self, id_rack): if not is_valid_int_param(id_rack): raise InvalidParameterError( u'The identifier of Rack is invalid or was not informed.') url = 'rack/' + str(id_rack) + '/' code, xml = self.submit(None, 'DELETE', url) return self.response(code, xml)
Remove Rack by the identifier. :param id_rack: Identifier of the Rack. Integer value and greater than zero. :return: None :raise InvalidParameterError: The identifier of Rack is null and invalid. :raise RackNaoExisteError: Rack not registered. :raise RackError: Rack is associated with a script. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
def reflect_right(self, value): if value < self: value = self.reflect(value) return value
Only reflects the value if is < self.
def run(self): from zengine.models import User user = User(username=self.manager.args.username, superuser=self.manager.args.super) user.set_password(self.manager.args.password) user.save() print("New user created with ID: %s" % user.key)
Creates user, encrypts password.
def create_fw(self, tenant_id, data): try: return self._create_fw(tenant_id, data) except Exception as exc: LOG.error("Failed to create FW for device native, tenant " "%(tenant)s data %(data)s Exc %(exc)s", {'tenant': tenant_id, 'data': data, 'exc': exc}) return False
Top level routine called when a FW is created.
def ReplaceHomoglyphs(s): homoglyphs = { '\xa0': ' ', '\u00e3': '', '\u00a0': ' ', '\u00a9': '(C)', '\u00ae': '(R)', '\u2014': '-', '\u2018': "'", '\u2019': "'", '\u201c': '"', '\u201d': '"', '\u2026': '...', '\u2e3a': '-', } def _ReplaceOne(c): equiv = homoglyphs.get(c) if equiv is not None: return equiv try: c.encode('ascii') return c except UnicodeError: pass try: return c.encode('unicode-escape').decode('ascii') except UnicodeError: return '?' return ''.join([_ReplaceOne(c) for c in s])
Returns s with unicode homoglyphs replaced by ascii equivalents.
def get_first_model_with_rest_name(cls, rest_name): models = cls.get_models_with_rest_name(rest_name) if len(models) > 0: return models[0] return None
Get the first model corresponding to a rest_name Args: rest_name: the rest name
def outline(self, inner, outer): return self.dilate(outer).exclude(self.dilate(inner))
Compute region outline by differencing two dilations. Parameters ---------- inner : int Size of inner outline boundary (in pixels) outer : int Size of outer outline boundary (in pixels)
def kwargs_to_variable_assignment(kwargs: dict, value_representation=repr, assignment_operator: str = ' = ', statement_separator: str = '\n', statement_per_line: bool = False) -> str: code = [] join_str = '\n' if statement_per_line else '' for key, value in kwargs.items(): code.append(key + assignment_operator + value_representation(value)+statement_separator) return join_str.join(code)
Convert a dictionary into a string with assignments Each assignment is constructed based on: key assignment_operator value_representation(value) statement_separator, where key and value are the key and value of the dictionary. Moreover one can seprate the assignment statements by new lines. Parameters ---------- kwargs : dict assignment_operator: str, optional: Assignment operator (" = " in python) value_representation: str, optinal How to represent the value in the assignments (repr function in python) statement_separator : str, optional: Statement separator (new line in python) statement_per_line: bool, optional Insert each statement on a different line Returns ------- str All the assignemnts. >>> kwargs_to_variable_assignment({'a': 2, 'b': "abc"}) "a = 2\\nb = 'abc'\\n" >>> kwargs_to_variable_assignment({'a':2 ,'b': "abc"}, statement_per_line=True) "a = 2\\n\\nb = 'abc'\\n" >>> kwargs_to_variable_assignment({'a': 2}) 'a = 2\\n' >>> kwargs_to_variable_assignment({'a': 2}, statement_per_line=True) 'a = 2\\n'
def add_schema(self, schema): if isinstance(schema, SchemaBuilder): schema_uri = schema.schema_uri schema = schema.to_schema() if schema_uri is None: del schema['$schema'] elif isinstance(schema, SchemaNode): schema = schema.to_schema() if '$schema' in schema: self.schema_uri = self.schema_uri or schema['$schema'] schema = dict(schema) del schema['$schema'] self._root_node.add_schema(schema)
Merge in a JSON schema. This can be a ``dict`` or another ``SchemaBuilder`` :param schema: a JSON Schema .. note:: There is no schema validation. If you pass in a bad schema, you might get back a bad schema.
def gaussian_pdf(x, g): return ( numpy.exp(-(x - g.mean) ** 2 / 2. /g.var) / numpy.sqrt(g.var * 2 * numpy.pi) )
Gaussian probability density function at ``x`` for |GVar| ``g``.
def view_set(method_name): def view_set(value, context, **_params): method = getattr(context["view"], method_name) return _set(method, context["key"], value, (), {}) return view_set
Creates a setter that will call the view method with the context's key as first parameter and the value as second parameter. @param method_name: the name of a method belonging to the view. @type method_name: str
def download_as_json(url): try: return Response('application/json', request(url=url)).read() except HTTPError as err: raise ResponseException('application/json', err)
Download the data at the URL and load it as JSON
def get_filename(self, variable): fn2var = self._type2filename2variable.get(type(variable), {}) for (fn_, var) in fn2var.items(): if var == variable: return fn_ return None
Return the auxiliary file name the given variable is allocated to or |None| if the given variable is not allocated to any auxiliary file name. >>> from hydpy import dummies >>> eqb = dummies.v2af.eqb[0] >>> dummies.v2af.get_filename(eqb) 'file1' >>> eqb += 500.0 >>> dummies.v2af.get_filename(eqb)
def get_pdffilepath(pdffilename): return FILEPATHSTR.format( root_dir=ROOT_DIR, os_sep=os.sep, os_extsep=os.extsep, name=pdffilename, folder=PURPOSE.get("plots").get("folder", "plots"), ext=PURPOSE.get("plots").get("extension", "pdf") )
Returns the path for the pdf file args: pdffilename: string returns path for the plots folder / pdffilename.pdf
def ast_to_html(self, ast, link_resolver): out, _ = cmark.ast_to_html(ast, link_resolver) return out
See the documentation of `to_ast` for more information. Args: ast: PyCapsule, a capsule as returned by `to_ast` link_resolver: hotdoc.core.links.LinkResolver, a link resolver instance.
def _check_input_symbols(self, X): symbols = np.concatenate(X) if (len(symbols) == 1 or not np.issubdtype(symbols.dtype, np.integer) or (symbols < 0).any()): return False u = np.unique(symbols) return u[0] == 0 and u[-1] == len(u) - 1
Check if ``X`` is a sample from a Multinomial distribution. That is ``X`` should be an array of non-negative integers from range ``[min(X), max(X)]``, such that each integer from the range occurs in ``X`` at least once. For example ``[0, 0, 2, 1, 3, 1, 1]`` is a valid sample from a Multinomial distribution, while ``[0, 0, 3, 5, 10]`` is not.
def summary(self, h): _, losses, _ = self.run(h=h) df = pd.DataFrame(losses) df.index = ['Ensemble'] + self.model_names df.columns = [self.loss_name] return df
Summarize the results for each model for h steps of the algorithm Parameters ---------- h : int How many steps to run the aggregating algorithm on Returns ---------- - pd.DataFrame of losses for each model
async def init( self, *, advertise_addr: str = None, listen_addr: str = "0.0.0.0:2377", force_new_cluster: bool = False, swarm_spec: Mapping = None ) -> str: data = { "AdvertiseAddr": advertise_addr, "ListenAddr": listen_addr, "ForceNewCluster": force_new_cluster, "Spec": swarm_spec, } response = await self.docker._query_json("swarm/init", method="POST", data=data) return response
Initialize a new swarm. Args: ListenAddr: listen address used for inter-manager communication AdvertiseAddr: address advertised to other nodes. ForceNewCluster: Force creation of a new swarm. SwarmSpec: User modifiable swarm configuration. Returns: id of the swarm node
def _collect_unused(self, start: GridQubit, used: Set[GridQubit]) -> Set[GridQubit]: def collect(n: GridQubit, visited: Set[GridQubit]): visited.add(n) for m in self._c_adj[n]: if m not in used and m not in visited: collect(m, visited) visited = set() collect(start, visited) return visited
Lists all the qubits that are reachable from given qubit. Args: start: The first qubit for which connectivity should be calculated. Might be a member of used set. used: Already used qubits, which cannot be used during the collection. Returns: Set of qubits that are reachable from starting qubit without traversing any of the used qubits.
def merge_config( config: Mapping[str, Any], override_config: Mapping[str, Any] = None, override_config_fn: str = None, ) -> Mapping[str, Any]: if override_config_fn: with open(override_config_fn, "r") as f: override_config = yaml.load(f, Loader=yaml.SafeLoader) if not override_config: log.info("Missing override_config") return functools.reduce(rec_merge, (config, override_config))
Override config with additional configuration in override_config or override_config_fn Used in script to merge CLI options with Config Args: config: original configuration override_config: new configuration to override/extend current config override_config_fn: new configuration filename as YAML file
def new_project(self, project_root): profile = Profile(**self.to_profile_info()) profile.validate() project = Project.from_project_root(project_root, {}) cfg = self.from_parts( project=project, profile=profile, args=deepcopy(self.args), ) cfg.quoting = deepcopy(self.quoting) return cfg
Given a new project root, read in its project dictionary, supply the existing project's profile info, and create a new project file. :param project_root str: A filepath to a dbt project. :raises DbtProfileError: If the profile is invalid. :raises DbtProjectError: If project is missing or invalid. :returns RuntimeConfig: The new configuration.
def start_new_gui_thread(): PyGUIThread = getattr(ROOT, 'PyGUIThread', None) if PyGUIThread is not None: assert not PyGUIThread.isAlive(), "GUI thread already running!" assert _processRootEvents, ( "GUI thread wasn't started when rootwait was imported, " "so it can't be restarted") ROOT.keeppolling = 1 ROOT.PyGUIThread = threading.Thread( None, _processRootEvents, None, (ROOT,)) ROOT.PyGUIThread.finishSchedule = _finishSchedule ROOT.PyGUIThread.setDaemon(1) ROOT.PyGUIThread.start() log.debug("successfully started a new GUI thread")
Attempt to start a new GUI thread, if possible. It is only possible to start one if there was one running on module import.
async def get_version(self, timeout: int = 15) -> Optional[str]: command = ["-version"] is_open = await self.open(cmd=command, input_source=None, output="") if not is_open: _LOGGER.warning("Error starting FFmpeg.") return try: proc_func = functools.partial(self._proc.communicate, timeout=timeout) output, _ = await self._loop.run_in_executor(None, proc_func) result = re.search(r"ffmpeg version (\S*)", output.decode()) if result is not None: return result.group(1) except (subprocess.TimeoutExpired, ValueError): _LOGGER.warning("Timeout reading stdout.") self.kill() return None
Execute FFmpeg process and parse the version information. Return full FFmpeg version string. Such as 3.4.2-tessus
def flds_firstsort(d): shape = [ len( np.unique(d[l]) ) for l in ['xs', 'ys', 'zs'] ]; si = np.lexsort((d['z'],d['y'],d['x'])); return si,shape;
Perform a lexsort and return the sort indices and shape as a tuple.
def init_can(self, channel=Channel.CHANNEL_CH0, BTR=Baudrate.BAUD_1MBit, baudrate=BaudrateEx.BAUDEX_USE_BTR01, AMR=AMR_ALL, ACR=ACR_ALL, mode=Mode.MODE_NORMAL, OCR=OutputControl.OCR_DEFAULT, rx_buffer_entries=DEFAULT_BUFFER_ENTRIES, tx_buffer_entries=DEFAULT_BUFFER_ENTRIES): if not self._ch_is_initialized.get(channel, False): init_param = InitCanParam(mode, BTR, OCR, AMR, ACR, baudrate, rx_buffer_entries, tx_buffer_entries) UcanInitCanEx2(self._handle, channel, init_param) self._ch_is_initialized[channel] = True
Initializes a specific CAN channel of a device. :param int channel: CAN channel to be initialized (:data:`Channel.CHANNEL_CH0` or :data:`Channel.CHANNEL_CH1`). :param int BTR: Baud rate register BTR0 as high byte, baud rate register BTR1 as low byte (see enum :class:`Baudrate`). :param int baudrate: Baud rate register for all systec USB-CANmoduls (see enum :class:`BaudrateEx`). :param int AMR: Acceptance filter mask (see method :meth:`set_acceptance`). :param int ACR: Acceptance filter code (see method :meth:`set_acceptance`). :param int mode: Transmission mode of CAN channel (see enum :class:`Mode`). :param int OCR: Output Control Register (see enum :class:`OutputControl`). :param int rx_buffer_entries: The number of maximum entries in the receive buffer. :param int tx_buffer_entries: The number of maximum entries in the transmit buffer.
def show_warning(message): try: import Tkinter, tkMessageBox root = Tkinter.Tk() root.withdraw() tkMessageBox.showerror("Spyder", message) except ImportError: pass raise RuntimeError(message)
Show warning using Tkinter if available
def iter_breadth_first(self, root=None): if root == None: root = self yield root last = root for node in self.iter_breadth_first(root): if isinstance(node, DictCell): for subpart in node: yield subpart last = subpart if last == node: return
Traverses the belief state's structure breadth-first
def micromanager_metadata(self): if not self.is_micromanager: return None result = read_micromanager_metadata(self._fh) result.update(self.pages[0].tags['MicroManagerMetadata'].value) return result
Return consolidated MicroManager metadata as dict.
def handle_json_wrapper_GET(self, handler, parsed_params): schedule = self.server.schedule result = handler(parsed_params) content = ResultEncoder().encode(result) self.send_response(200) self.send_header('Content-Type', 'text/plain') self.send_header('Content-Length', str(len(content))) self.end_headers() self.wfile.write(content)
Call handler and output the return value in JSON.
def guess_wxr_version(self, tree): for v in ('1.2', '1.1', '1.0'): try: tree.find('channel/{%s}wxr_version' % (WP_NS % v)).text return v except AttributeError: pass raise CommandError('Cannot resolve the wordpress namespace')
We will try to guess the wxr version used to complete the wordpress xml namespace name.
def label_search(self, label:str) -> List[dict]: ilx_rows = self.label2rows(self.local_degrade(label)) if not ilx_rows: return None else: return ilx_rows
Returns the rows in InterLex associated with that label Note: Pressumed to have duplicated labels in InterLex Args: label: label of the entity you want to find Returns: None or List[dict]
def episode_info(self, cosmoid, season, episode, **kwargs): resource = 'season/%d/episode/%d/info' % (season, episode) return self._cosmoid_request(resource, cosmoid, **kwargs)
Returns information about an episode in a television series Maps to the `episode info <http://prod-doc.rovicorp.com/mashery/index.php/V1.MetaData.VideoService.Video:SeasonEpisode>`_ API method.
def enrich(self, columns): for column in columns: if column not in self.data.columns: return self.data first_column = list(self.data[columns[0]]) count = 0 append_df = pandas.DataFrame() for cell in first_column: if len(cell) >= 1: df = pandas.DataFrame() for column in columns: df[column] = self.data.loc[count, column] extra_df = pandas.DataFrame([self.data.loc[count]] * len(df)) for column in columns: extra_df[column] = list(df[column]) append_df = append_df.append(extra_df, ignore_index=True) extra_df = pandas.DataFrame() count = count + 1 self.data = self.data.append(append_df, ignore_index=True) return self.data
This method appends at the end of the dataframe as many rows as items are found in the list of elemnents in the provided columns. This assumes that the length of the lists for the several specified columns is the same. As an example, for the row A {"C1":"V1", "C2":field1, "C3":field2, "C4":field3} we have three cells with a list of four elements each of them: * field1: [1,2,3,4] * field2: ["a", "b", "c", "d"] * field3: [1.1, 2.2, 3.3, 4.4] This method converts each of the elements of each cell in a new row keeping the columns name: {"C1":"V1", "C2":1, "C3":"a", "C4":1.1} {"C1":"V1", "C2":2, "C3":"b", "C4":2.2} {"C1":"V1", "C2":3, "C3":"c", "C4":3.3} {"C1":"V1", "C2":4, "C3":"d", "C4":4.4} :param columns: list of strings :rtype pandas.DataFrame
def kruskal(graph, weight): uf = UnionFind(len(graph)) edges = [] for u in range(len(graph)): for v in graph[u]: edges.append((weight[u][v], u, v)) edges.sort() mst = [] for w, u, v in edges: if uf.union(u, v): mst.append((u, v)) return mst
Minimum spanning tree by Kruskal :param graph: undirected graph in listlist or listdict format :param weight: in matrix format or same listdict graph :returns: list of edges of the tree :complexity: ``O(|E|log|E|)``
def descriptions(cls): schema = cls.json_get('%s/status/schema' % cls.api_url, empty_key=True, send_key=False) descs = {} for val in schema['fields']['status']['value']: descs.update(val) return descs
Retrieve status descriptions from status.gandi.net.
def queue_files(dirpath, queue): for root, _, files in os.walk(os.path.abspath(dirpath)): if not files: continue for filename in files: queue.put(os.path.join(root, filename))
Add files in a directory to a queue
def generate_sample_cfn_module(env_root, module_dir=None): if module_dir is None: module_dir = os.path.join(env_root, 'sampleapp.cfn') generate_sample_module(module_dir) for i in ['stacks.yaml', 'dev-us-east-1.env']: shutil.copyfile( os.path.join(ROOT, 'templates', 'cfn', i), os.path.join(module_dir, i) ) os.mkdir(os.path.join(module_dir, 'templates')) with open(os.path.join(module_dir, 'templates', 'tf_state.yml'), 'w') as stream: stream.write( cfn_flip.flip( check_output( [sys.executable, os.path.join(ROOT, 'templates', 'stacker', 'tfstate_blueprints', 'tf_state.py')] ) ) ) LOGGER.info("Sample CloudFormation module created at %s", module_dir)
Generate skeleton CloudFormation sample module.
def namespace(self, prefix=None): e = self.__deref() if e is not None: return e.namespace(prefix) return super(Element, self).namespace()
Get this schema element's target namespace. In case of reference elements, the target namespace is defined by the referenced and not the referencing element node. @param prefix: The default prefix. @type prefix: str @return: The schema element's target namespace @rtype: (I{prefix},I{URI})
def query_boost_version(boost_root): boost_version = None if os.path.exists(os.path.join(boost_root,'Jamroot')): with codecs.open(os.path.join(boost_root,'Jamroot'), 'r', 'utf-8') as f: for line in f.readlines(): parts = line.split() if len(parts) >= 5 and parts[1] == 'BOOST_VERSION': boost_version = parts[3] break if not boost_version: boost_version = 'default' return boost_version
Read in the Boost version from a given boost_root.
def get_tool_definition(self, target): if target not in self.targets_mcu_list: logging.debug("Target not found in definitions") return None mcu_record = self.targets.get_mcu_record(target) if self.mcus.get_mcu_record(target) is None else self.mcus.get_mcu_record(target) try: return mcu_record['tool_specific'][self.tool] except KeyError: return None
Returns tool specific dic or None if it does not exist for defined tool
def get_rotated(self, angle): result = self.copy() result.rotate(angle) return result
Return a vector rotated by angle from the given vector. Angle measured in radians counter-clockwise.
def out_interactions_iter(self, nbunch=None, t=None): if nbunch is None: nodes_nbrs_succ = self._succ.items() else: nodes_nbrs_succ = [(n, self._succ[n]) for n in self.nbunch_iter(nbunch)] for n, nbrs in nodes_nbrs_succ: for nbr in nbrs: if t is not None: if self.__presence_test(n, nbr, t): yield (n, nbr, {"t": [t]}) else: if nbr in self._succ[n]: yield (n, nbr, self._succ[n][nbr])
Return an iterator over the out interactions present in a given snapshot. Edges are returned as tuples in the order (node, neighbor). Parameters ---------- nbunch : iterable container, optional (default= all nodes) A container of nodes. The container will be iterated through once. t : snapshot id (default=None) If None the the method returns an iterator over the edges of the flattened graph. Returns ------- edge_iter : iterator An iterator of (u,v) tuples of interaction. Notes ----- Nodes in nbunch that are not in the graph will be (quietly) ignored. For directed graphs this returns the out-interaction. Examples -------- >>> G = dn.DynDiGraph() >>> G.add_interaction(0,1, 0) >>> G.add_interaction(1,2, 0) >>> G.add_interaction(2,3,1) >>> [e for e in G.out_interactions_iter(t=0)] [(0, 1), (1, 2)] >>> list(G.out_interactions_iter()) [(0, 1), (1, 2), (2, 3)]