code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def share_project(project_id, usernames, read_only, share,**kwargs): user_id = kwargs.get('user_id') proj_i = _get_project(project_id) proj_i.check_share_permission(int(user_id)) user_id = int(user_id) for owner in proj_i.owners: if user_id == owner.user_id: break else: ...
Share an entire project with a list of users, identifed by their usernames. The read_only flag ('Y' or 'N') must be set to 'Y' to allow write access or sharing. The share flat ('Y' or 'N') must be set to 'Y' to allow the project to be shared with other users
def drawing_update(self): from MAVProxy.modules.mavproxy_map import mp_slipmap if self.draw_callback is None: return self.draw_line.append(self.click_position) if len(self.draw_line) > 1: self.mpstate.map.add_object(mp_slipmap.SlipPolygon('drawing', self.draw_line...
update line drawing
def add_concept_filter(self, concept, concept_name=None): if concept in self.query_params.keys(): if not concept_name: concept_name = concept if isinstance(self.query_params[concept], list): if self.es_version == '1': es_filter = {'or':...
Add a concept filter :param concept: concept which will be used as lowercase string in a search term :param concept_name: name of the place where there will be searched for
def exists(self, client=None): client = self._require_client(client) query_params = self._query_params query_params["fields"] = "name" try: client._connection.api_request( method="GET", path=self.path, query_params=query_params,...
Determines whether or not this blob exists. If :attr:`user_project` is set on the bucket, bills the API request to that project. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, ...
def pylog(self, *args, **kwargs): printerr(self.name, args, kwargs, traceback.format_exc())
Display all available logging information.
def export_to_dir(network, export_dir): package_path = ding0.__path__[0] network.export_to_csv_folder(os.path.join(package_path, 'output', 'debug', 'grid', ...
Exports PyPSA network as CSV files to directory Args: network: pypsa.Network export_dir: str Sub-directory in output/debug/grid/ where csv Files of PyPSA network are exported to.
def add_after(self, pipeline): if not isinstance(pipeline, Pipeline): pipeline = Pipeline(pipeline) self.pipes = self.pipes[:] + pipeline.pipes[:] return self
Add a Pipeline to be applied after this processing pipeline. Arguments: pipeline: The Pipeline or callable to apply after this Pipeline.
def call_async(func): @wraps(func) def wrapper(self, *args, **kw): def call(): try: func(self, *args, **kw) except Exception: logger.exception( "failed to call async [%r] with [%r] [%r]", func, args, kw ) ...
Decorates a function to be called async on the loop thread
def decode_cursor(self, request): encoded = request.query_params.get(self.cursor_query_param) if encoded is None: return None try: querystring = b64decode(encoded.encode('ascii')).decode('ascii') tokens = urlparse.parse_qs(querystring, keep_blank_values=True) ...
Given a request with a cursor, return a `Cursor` instance. Differs from the standard CursorPagination to handle a tuple in the position field.
def is_valid(self): return len(self.ref) == 1 and \ len(self.alt) == 1 and \ len(self.alt[0]) == 1
Only retain SNPs or single indels, and are bi-allelic
def display_animation(anim, **kwargs): from IPython.display import HTML return HTML(anim_to_html(anim, **kwargs))
Display the animation with an IPython HTML object
def count(y_true, y_score=None, countna=False): if not countna: return (~np.isnan(to_float(y_true))).sum() else: return len(y_true)
Counts the number of examples. If countna is False then only count labeled examples, i.e. those with y_true not NaN
def _process_path_prefix(path_prefix): _validate_path(path_prefix) if not _GCS_PATH_PREFIX_REGEX.match(path_prefix): raise ValueError('Path prefix should have format /bucket, /bucket/, ' 'or /bucket/prefix but got %s.' % path_prefix) bucket_name_end = path_prefix.find('/', 1) bucket = p...
Validate and process a Google Cloud Stoarge path prefix. Args: path_prefix: a Google Cloud Storage path prefix of format '/bucket/prefix' or '/bucket/' or '/bucket'. Raises: ValueError: if path is invalid. Returns: a tuple of /bucket and prefix. prefix can be None.
def main(argv: Optional[Sequence[str]] = None) -> None: args = parse_arguments(argv=argv) if args.logging: logging.basicConfig(level=logging.DEBUG) handle_skip() action = args.action request = parse_request() LOGGER.debug('Received action %s with request:\n%s', action, r...
Start the pass-git-helper script. Args: argv: If not ``None``, use the provided command line arguments for parsing. Otherwise, extract them automatically.
def parse_record(header, record): major_version = header[1] try: return RECORD_PARSER[major_version](header, record) except (KeyError, struct.error) as error: raise RuntimeError("Corrupted USN Record") from error
Parses a record according to its version.
def get_perm_model(): try: return django_apps.get_model(settings.PERM_MODEL, require_ready=False) except ValueError: raise ImproperlyConfigured("PERM_MODEL must be of the form 'app_label.model_name'") except LookupError: raise ImproperlyConfigured( "PERM_MODEL refers to m...
Returns the Perm model that is active in this project.
def group_experiments_greedy(tomo_expt: TomographyExperiment): diag_sets = _max_tpb_overlap(tomo_expt) grouped_expt_settings_list = list(diag_sets.values()) grouped_tomo_expt = TomographyExperiment(grouped_expt_settings_list, program=tomo_expt.program) return grouped_tomo_expt
Greedy method to group ExperimentSettings in a given TomographyExperiment :param tomo_expt: TomographyExperiment to group ExperimentSettings within :return: TomographyExperiment, with grouped ExperimentSettings according to whether it consists of PauliTerms diagonal in the same tensor product basis
def _get_hosts_from_ports(self, ports): hosts = map(lambda x: 'localhost:%d' % int(x.strip()), ports.split(',')) return list(set(hosts))
validate hostnames from a list of ports
def create_sequence_rule(self, sequence_rule_form): collection = JSONClientValidated('assessment_authoring', collection='SequenceRule', runtime=self._runtime) if not isinstance(sequence_rule_form, ABCSequenceRuleForm): ...
Creates a new ``SequenceRule``. arg: sequence_rule_form (osid.assessment.authoring.SequenceRuleForm): the form for this ``SequenceRule`` return: (osid.assessment.authoring.SequenceRule) - the new ``SequenceRule`` raise: IllegalState - ``sequen...
def fetch_viewers(self, game): r = self.kraken_request('GET', 'streams/summary', params={'game': game.name}).json() game.viewers = r['viewers'] game.channels = r['channels'] return game
Query the viewers and channels of the given game and set them on the object :returns: the given game :rtype: :class:`models.Game` :raises: None
def find_substring_edge(self, substring, suffix_tree_id): suffix_tree = self.suffix_tree_repo[suffix_tree_id] started = datetime.datetime.now() edge, ln = find_substring_edge(substring=substring, suffix_tree=suffix_tree, edge_repo=self.edge_repo) print(" - searched for edge in {} for sub...
Returns an edge that matches the given substring.
def tau_from_final_mass_spin(final_mass, final_spin, l=2, m=2, nmodes=1): return get_lm_f0tau(final_mass, final_spin, l, m, nmodes)[1]
Returns QNM damping time for the given mass and spin and mode. Parameters ---------- final_mass : float or array Mass of the black hole (in solar masses). final_spin : float or array Dimensionless spin of the final black hole. l : int or array, optional l-index of the harmon...
def add_repo(self, repo, team): for t in self.iter_teams(): if team == t.name: return t.add_repo(repo) return False
Add ``repo`` to ``team``. .. note:: This method is of complexity O(n). This iterates over all teams in your organization and only adds the repo when the team name matches the team parameter above. If you want constant time, you should retrieve the team and call `...
def dependency_status(data): parents_statuses = set( DataDependency.objects.filter( child=data, kind=DataDependency.KIND_IO ).distinct('parent__status').values_list('parent__status', flat=True) ) if not parents_statuses: return Data.STATUS_DONE if None in parents_stat...
Return abstracted status of dependencies. - ``STATUS_ERROR`` .. one dependency has error status or was deleted - ``STATUS_DONE`` .. all dependencies have done status - ``None`` .. other
def upload_to_s3(self, key, filename): extra_args = {'ACL': self.acl} guess = mimetypes.guess_type(filename) content_type = guess[0] encoding = guess[1] if content_type: extra_args['ContentType'] = content_type if (self.gzip and content_type in self.gzip_conte...
Set the content type and gzip headers if applicable and upload the item to S3
def untar(fname, verbose=True): if fname.lower().endswith(".tar.gz"): dirpath = os.path.join(BIGDATA_PATH, os.path.basename(fname)[:-7]) if os.path.isdir(dirpath): return dirpath with tarfile.open(fname) as tf: members = tf.getmembers() for member in tqdm(...
Uunzip and untar a tar.gz file into a subdir of the BIGDATA_PATH directory
def add_vtt_file(self, vtt_file, language_type=None): if not isinstance(vtt_file, DataInputStream): raise InvalidArgument('vtt_file') locale = DEFAULT_LANGUAGE_TYPE.identifier if language_type is not None: locale = language_type.identifier self.my_osid_object_form...
Adds a vtt file tagged as the given language. arg: vtt_file (displayText): the new vtt_file raise: InvalidArgument - ``vtt_file`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``media_description`` is ``null`` *compliance: mandato...
def conditional_jit(function=None, **kwargs): def wrapper(function): try: numba = importlib.import_module("numba") return numba.jit(**kwargs)(function) except ImportError: return function if function: return wrapper(function) else: return w...
Use numba's jit decorator if numba is installed. Notes ----- If called without arguments then return wrapped function. @conditional_jit def my_func(): return else called with arguments @conditional_jit(nopython=True) def my_func(): ret...
def set_error_output_file(filename): filename = os.path.abspath(os.path.expanduser(filename)) fileOutputWindow = vtk.vtkFileOutputWindow() fileOutputWindow.SetFileName(filename) outputWindow = vtk.vtkOutputWindow() outputWindow.SetInstance(fileOutputWindow) return fileOutputWindow, outputWindow
Sets a file to write out the VTK errors
def for_display(self): skip = "" if self.skip: skip = " [SKIP]" result = "{step_num}: {path}{skip}".format( step_num=self.step_num, path=self.path, skip=skip ) description = self.task_config.get("description") if description: result += ...
Step details formatted for logging output.
def assets(self, asset_type=None): if not self.can_update(): self._tcex.handle_error(910, [self.type]) if not asset_type: return self.tc_requests.adversary_assets( self.api_type, self.api_sub_type, self.unique_id ) if asset_type == 'PHONE': ...
Retrieves all of the assets of a given asset_type Args: asset_type: (str) Either None, PHONE, HANDLER, or URL Returns:
def utime(self, tarinfo, targetpath): if not hasattr(os, 'utime'): return try: os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime)) except EnvironmentError as e: raise ExtractError("could not change modification time")
Set modification time of targetpath according to tarinfo.
def get_from_category_qs(cls, category): ids = cls.get_ties_for_categories_qs(category).values_list('object_id').distinct() filter_kwargs = {'id__in': [i[0] for i in ids]} return cls.objects.filter(**filter_kwargs)
Returns a QuerySet of objects of this type associated with the given category. :param Category category: :rtype: list :return:
def drawImage(self, image): padding = image.width() % 4 for i in range(0, image.height()): tmp = image.copy(0, i, image.width() + padding, 1) ptr = tmp.bits() ptr.setsize(tmp.byteCount()) self._controller.sendUpdate(self._dx, i + self._dy, image.width() + ...
Render of widget
def check_dependencies_remote(args): cmd = [args.python, '-m', 'depends', args.requirement] env = dict(PYTHONPATH=os.path.dirname(__file__)) return subprocess.check_call(cmd, env=env)
Invoke this command on a remote Python.
def coverage(reportdir=None, extra=None): import coverage as coverage_api cov = coverage_api.coverage() opts = {'directory': reportdir} if reportdir else {} cov.start() test(extra) cov.stop() cov.html_report(**opts)
Test this project with coverage reports
def encode_data_items(self, *args): str_list = [] for arg in args: if isinstance(arg, str): arg_str = arg elif isinstance(arg, int): arg_str = self.INTEGER_PREFIX + self.encode_int(arg) else: arg_str = str(arg) ...
Encodes a list of integers and strings into a concatenated string. - encode string items as-is. - encode integer items as base-64 with a ``'~'`` prefix. - concatenate encoded items with a ``'|'`` separator. Example: ``encode_data_items('abc', 123, 'xyz')`` returns ``'abc|~B...
def _is_defaultable(i, entry, table, check_for_aliases=True): if (len(entry.sources) == 1 and len(entry.route) == 1 and None not in entry.sources): source = next(iter(entry.sources)) sink = next(iter(entry.route)) if source.is_link and sink.is_link: if sou...
Determine if an entry may be removed from a routing table and be replaced by a default route. Parameters ---------- i : int Position of the entry in the table entry : RoutingTableEntry The entry itself table : [RoutingTableEntry, ...] The table containing the entry. ...
def export(self): fields = ['id', 'host', 'port', 'user'] out = {} for field in fields: out[field] = getattr(self, field, None) out['mountOptions'] = self.mount_opts out['mountPoint'] = self.mount_point out['beforeMount'] = self.cmd_before_mount out['a...
Serializes to JSON.
def string_array_to_list(a): result = [] length = javabridge.get_env().get_array_length(a) wrapped = javabridge.get_env().get_object_array_elements(a) for i in range(length): result.append(javabridge.get_env().get_string(wrapped[i])) return result
Turns the Java string array into Python unicode string list. :param a: the string array to convert :type a: JB_Object :return: the string list :rtype: list
def list_all_native_quantities(self, with_info=False): q = self._native_quantities return {k: self.get_quantity_info(k) for k in q} if with_info else list(q)
Return a list of all available native quantities in this catalog. If *with_info* is `True`, return a dict with quantity info. See also: list_all_quantities
def commit_buildroot(self): logger.info("committing buildroot") self.ensure_is_built() commit_message = "docker build of '%s' (%s)" % (self.image, self.uri) self.buildroot_image_name = ImageName( repo="buildroot-%s" % self.image, tag=datetime.datetime.now().strfti...
create image from buildroot :return:
def set_posts_param_modified_after(self, params, post_type, status): if not self.purge_first and not self.full and not self.modified_after: if status == "any": latest = Post.objects.filter(post_type=post_type).order_by("-modified").first() else: latest = P...
Set modified_after date to "continue where we left off" if appropriate :param params: the GET params dict, which may be updated to include the "modified_after" key :param post_type: post, page, attachment, or any custom post type set up in the WP API :param status: publish, private, draft, etc....
def get_output_score_metadata(self): metadata = dict(self._mdata['output_score']) metadata.update({'existing_decimal_values': self._my_map['outputScore']}) return Metadata(**metadata)
Gets the metadata for the output score start range. return: (osid.Metadata) - metadata for the output score start range *compliance: mandatory -- This method must be implemented.*
def integers(self, start: int = 0, end: int = 10, length: int = 10) -> List[int]: return self.random.randints( length, start, end)
Generate a list of random integers. Integers can be negative or positive numbers. .. note: You can use both positive and negative numbers. :param start: Start. :param end: End. :param length: Length of list. :return: List of integers. :Example: [-20...
def create(gandi, datacenter, bandwidth, ip_version, vlan, ip, attach, background): if ip_version != 4 and vlan: gandi.echo('You must have an --ip-version to 4 when having a vlan.') return if ip and not vlan: gandi.echo('You must have a --vlan when giving an --ip.') re...
Create a public or private ip
def load_usps(): dataset_path = _load('usps') df = _load_csv(dataset_path, 'data') X = _load_images(os.path.join(dataset_path, 'images'), df.image) y = df.label.values return Dataset(load_usps.__doc__, X, y, accuracy_score, stratify=True)
USPs Digits Dataset. The data of this dataset is a 3d numpy array vector with shape (224, 224, 3) containing 9298 224x224 RGB photos of handwritten digits, and the target is a 1d numpy integer array containing the label of the digit represented in the image.
def create_filter_predicate(self): assert self.query_content_id is not None, \ 'must call SearchEngine.set_query_id first' filter_names = self.query_params.getlist('filter') if len(filter_names) == 0 and 'already_labeled' in self._filters: filter_names = ['already_labeled...
Creates a filter predicate. The list of available filters is given by calls to ``add_filter``, and the list of filters to use is given by parameters in ``params``. In this default implementation, multiple filters can be specified with the ``filter`` parameter. Each filter is ...
def cx_to_networkx(cx): graph = networkx.MultiDiGraph() for node_entry in get_aspect(cx, 'nodes'): id = node_entry['@id'] attrs = get_attributes(get_aspect(cx, 'nodeAttributes'), id) attrs['n'] = node_entry['n'] graph.add_node(id, **attrs) for edge_entry in get_aspect(cx, 'ed...
Return a MultiDiGraph representation of a CX network.
def prep_pdf(qc_dir, config): html_file = os.path.join(qc_dir, "fastqc", "fastqc_report.html") html_fixed = "%s-fixed%s" % os.path.splitext(html_file) try: topdf = config_utils.get_program("wkhtmltopdf", config) except config_utils.CmdNotFound: topdf = None if topdf and utils.file_ex...
Create PDF from HTML summary outputs in QC directory. Requires wkhtmltopdf installed: http://www.msweet.org/projects.php?Z1 Thanks to: https://www.biostars.org/p/16991/ Works around issues with CSS conversion on CentOS by adjusting CSS.
def unpack(iterable, count, fill=None): iterable = list(enumerate(iterable)) cnt = count if count <= len(iterable) else len(iterable) results = [iterable[i][1] for i in range(cnt)] results = merge(results, [fill for i in range(count-cnt)]) return tuple(results)
The iter data unpack function. Example 1: In[1]: source = 'abc' In[2]: a, b = safe_unpack(source, 2) In[3]: print(a, b) a b Example 2: In[1]: source = 'abc' In[2]: a, b, c, d = safe_unpack(source, 4) In[3]: print(a, b, c, d) a b None None
def get_productivity_stats(self, api_token, **kwargs): params = { 'token': api_token } return self._get('get_productivity_stats', params, **kwargs)
Return a user's productivity stats. :param api_token: The user's login api_token. :type api_token: str :return: The HTTP response to the request. :rtype: :class:`requests.Response`
def aggregate_registry_timers(): import itertools timers = sorted(shared_registry.values(), key=lambda t: t.desc) aggregate_timers = [] for k, g in itertools.groupby(timers, key=lambda t: t.desc): group = list(g) num_calls = len(group) total_elapsed_ms = sum(t.elapsed_time_ms for...
Returns a list of aggregate timing information for registered timers. Each element is a 3-tuple of - timer description - aggregate elapsed time - number of calls The list is sorted by the first start time of each aggregate timer.
def copy_foreign_keys(self, event): event_keys = set(event._meta.fields.keys()) obj_keys = self._meta.fields.keys() matching_keys = event_keys.intersection(obj_keys) for key in matching_keys: if key == 'created_by': continue if not isinstance(self....
Copies possible foreign key values from the object into the Event, skipping common keys like modified and created. Args: event (Event): The Event instance to copy the FKs into obj (fleaker.db.Model): The object to pull the values from
def purge(opts): old = False try: environment = Environment.load(opts['ENVIRONMENT'], opts['--site']) except DatacatsError: environment = Environment.load(opts['ENVIRONMENT'], opts['--site'], data_only=True) if get_format_version(environment.datadir) == 1: old = True ...
Purge environment database and uploaded files Usage: datacats purge [-s NAME | --delete-environment] [-y] [ENVIRONMENT] Options: --delete-environment Delete environment directory as well as its data, as well as the data for **all** sites. -s --site=NAME Specify a site to be pu...
def pixels_to_tiles(self, coords, clamp=True): tile_coords = Vector2() tile_coords.X = int(coords[0]) / self.spritesheet[0].width tile_coords.Y = int(coords[1]) / self.spritesheet[0].height if clamp: tile_coords.X, tile_coords.Y = self.clamp_within_range(tile_coords.X, tile_c...
Convert pixel coordinates into tile coordinates. clamp determines if we should clamp the tiles to ones only on the tilemap.
def has_uncacheable_headers(self, response): cc_dict = get_header_dict(response, 'Cache-Control') if cc_dict: if 'max-age' in cc_dict and cc_dict['max-age'] == '0': return True if 'no-cache' in cc_dict: return True if 'private' in cc_di...
Should this response be cached based on it's headers broken out from should_cache for flexibility
def send_msg_to_webhook(self, message): payload = { 'content':message } header = { 'Content-Type':'application/json' } try: request = requests.post( self.api_url, headers=header, json=payload ...
separated Requests logic for easier testing Args: message (str): actual logging string to be passed to REST endpoint Todo: * Requests.text/json return for better testing options
def remover(self, id_rack): if not is_valid_int_param(id_rack): raise InvalidParameterError( u'The identifier of Rack is invalid or was not informed.') url = 'rack/' + str(id_rack) + '/' code, xml = self.submit(None, 'DELETE', url) return self.response(code, x...
Remove Rack by the identifier. :param id_rack: Identifier of the Rack. Integer value and greater than zero. :return: None :raise InvalidParameterError: The identifier of Rack is null and invalid. :raise RackNaoExisteError: Rack not registered. :raise RackError: Rack is associa...
def reflect_right(self, value): if value < self: value = self.reflect(value) return value
Only reflects the value if is < self.
def run(self): from zengine.models import User user = User(username=self.manager.args.username, superuser=self.manager.args.super) user.set_password(self.manager.args.password) user.save() print("New user created with ID: %s" % user.key)
Creates user, encrypts password.
def create_fw(self, tenant_id, data): try: return self._create_fw(tenant_id, data) except Exception as exc: LOG.error("Failed to create FW for device native, tenant " "%(tenant)s data %(data)s Exc %(exc)s", {'tenant': tenant_id, 'data':...
Top level routine called when a FW is created.
def ReplaceHomoglyphs(s): homoglyphs = { '\xa0': ' ', '\u00e3': '', '\u00a0': ' ', '\u00a9': '(C)', '\u00ae': '(R)', '\u2014': '-', '\u2018': "'", '\u2019': "'", '\u201c': '"', '\u201d': '"', '\u2026': '...', '\u2e3a': '...
Returns s with unicode homoglyphs replaced by ascii equivalents.
def get_first_model_with_rest_name(cls, rest_name): models = cls.get_models_with_rest_name(rest_name) if len(models) > 0: return models[0] return None
Get the first model corresponding to a rest_name Args: rest_name: the rest name
def outline(self, inner, outer): return self.dilate(outer).exclude(self.dilate(inner))
Compute region outline by differencing two dilations. Parameters ---------- inner : int Size of inner outline boundary (in pixels) outer : int Size of outer outline boundary (in pixels)
def kwargs_to_variable_assignment(kwargs: dict, value_representation=repr, assignment_operator: str = ' = ', statement_separator: str = '\n', statement_per_line: bool = False) -> str: code = [] join_str = '\n' ...
Convert a dictionary into a string with assignments Each assignment is constructed based on: key assignment_operator value_representation(value) statement_separator, where key and value are the key and value of the dictionary. Moreover one can seprate the assignment statements by new lines. Parame...
def add_schema(self, schema): if isinstance(schema, SchemaBuilder): schema_uri = schema.schema_uri schema = schema.to_schema() if schema_uri is None: del schema['$schema'] elif isinstance(schema, SchemaNode): schema = schema.to_schema() ...
Merge in a JSON schema. This can be a ``dict`` or another ``SchemaBuilder`` :param schema: a JSON Schema .. note:: There is no schema validation. If you pass in a bad schema, you might get back a bad schema.
def gaussian_pdf(x, g): return ( numpy.exp(-(x - g.mean) ** 2 / 2. /g.var) / numpy.sqrt(g.var * 2 * numpy.pi) )
Gaussian probability density function at ``x`` for |GVar| ``g``.
def view_set(method_name): def view_set(value, context, **_params): method = getattr(context["view"], method_name) return _set(method, context["key"], value, (), {}) return view_set
Creates a setter that will call the view method with the context's key as first parameter and the value as second parameter. @param method_name: the name of a method belonging to the view. @type method_name: str
def download_as_json(url): try: return Response('application/json', request(url=url)).read() except HTTPError as err: raise ResponseException('application/json', err)
Download the data at the URL and load it as JSON
def get_filename(self, variable): fn2var = self._type2filename2variable.get(type(variable), {}) for (fn_, var) in fn2var.items(): if var == variable: return fn_ return None
Return the auxiliary file name the given variable is allocated to or |None| if the given variable is not allocated to any auxiliary file name. >>> from hydpy import dummies >>> eqb = dummies.v2af.eqb[0] >>> dummies.v2af.get_filename(eqb) 'file1' >>> eqb += 500.0 ...
def get_pdffilepath(pdffilename): return FILEPATHSTR.format( root_dir=ROOT_DIR, os_sep=os.sep, os_extsep=os.extsep, name=pdffilename, folder=PURPOSE.get("plots").get("folder", "plots"), ext=PURPOSE.get("plots").get("extension", "pdf") )
Returns the path for the pdf file args: pdffilename: string returns path for the plots folder / pdffilename.pdf
def ast_to_html(self, ast, link_resolver): out, _ = cmark.ast_to_html(ast, link_resolver) return out
See the documentation of `to_ast` for more information. Args: ast: PyCapsule, a capsule as returned by `to_ast` link_resolver: hotdoc.core.links.LinkResolver, a link resolver instance.
def _check_input_symbols(self, X): symbols = np.concatenate(X) if (len(symbols) == 1 or not np.issubdtype(symbols.dtype, np.integer) or (symbols < 0).any()): return False u = np.unique(symbols) return u[0] == 0 and u[-1] == len(u) - 1
Check if ``X`` is a sample from a Multinomial distribution. That is ``X`` should be an array of non-negative integers from range ``[min(X), max(X)]``, such that each integer from the range occurs in ``X`` at least once. For example ``[0, 0, 2, 1, 3, 1, 1]`` is a valid sample from a ...
def summary(self, h): _, losses, _ = self.run(h=h) df = pd.DataFrame(losses) df.index = ['Ensemble'] + self.model_names df.columns = [self.loss_name] return df
Summarize the results for each model for h steps of the algorithm Parameters ---------- h : int How many steps to run the aggregating algorithm on Returns ---------- - pd.DataFrame of losses for each model
async def init( self, *, advertise_addr: str = None, listen_addr: str = "0.0.0.0:2377", force_new_cluster: bool = False, swarm_spec: Mapping = None ) -> str: data = { "AdvertiseAddr": advertise_addr, "ListenAddr": listen_addr, ...
Initialize a new swarm. Args: ListenAddr: listen address used for inter-manager communication AdvertiseAddr: address advertised to other nodes. ForceNewCluster: Force creation of a new swarm. SwarmSpec: User modifiable swarm configuration. Returns: ...
def _collect_unused(self, start: GridQubit, used: Set[GridQubit]) -> Set[GridQubit]: def collect(n: GridQubit, visited: Set[GridQubit]): visited.add(n) for m in self._c_adj[n]: if m not in used and m not in visited: collect(m, v...
Lists all the qubits that are reachable from given qubit. Args: start: The first qubit for which connectivity should be calculated. Might be a member of used set. used: Already used qubits, which cannot be used during the collection. Returns...
def merge_config( config: Mapping[str, Any], override_config: Mapping[str, Any] = None, override_config_fn: str = None, ) -> Mapping[str, Any]: if override_config_fn: with open(override_config_fn, "r") as f: override_config = yaml.load(f, Loader=yaml.SafeLoader) if not override_c...
Override config with additional configuration in override_config or override_config_fn Used in script to merge CLI options with Config Args: config: original configuration override_config: new configuration to override/extend current config override_config_fn: new configuration filenam...
def new_project(self, project_root): profile = Profile(**self.to_profile_info()) profile.validate() project = Project.from_project_root(project_root, {}) cfg = self.from_parts( project=project, profile=profile, args=deepcopy(self.args), ) ...
Given a new project root, read in its project dictionary, supply the existing project's profile info, and create a new project file. :param project_root str: A filepath to a dbt project. :raises DbtProfileError: If the profile is invalid. :raises DbtProjectError: If project is missing o...
def start_new_gui_thread(): PyGUIThread = getattr(ROOT, 'PyGUIThread', None) if PyGUIThread is not None: assert not PyGUIThread.isAlive(), "GUI thread already running!" assert _processRootEvents, ( "GUI thread wasn't started when rootwait was imported, " "so it can't be restarted") ...
Attempt to start a new GUI thread, if possible. It is only possible to start one if there was one running on module import.
async def get_version(self, timeout: int = 15) -> Optional[str]: command = ["-version"] is_open = await self.open(cmd=command, input_source=None, output="") if not is_open: _LOGGER.warning("Error starting FFmpeg.") return try: proc_func = functools.par...
Execute FFmpeg process and parse the version information. Return full FFmpeg version string. Such as 3.4.2-tessus
def flds_firstsort(d): shape = [ len( np.unique(d[l]) ) for l in ['xs', 'ys', 'zs'] ]; si = np.lexsort((d['z'],d['y'],d['x'])); return si,shape;
Perform a lexsort and return the sort indices and shape as a tuple.
def init_can(self, channel=Channel.CHANNEL_CH0, BTR=Baudrate.BAUD_1MBit, baudrate=BaudrateEx.BAUDEX_USE_BTR01, AMR=AMR_ALL, ACR=ACR_ALL, mode=Mode.MODE_NORMAL, OCR=OutputControl.OCR_DEFAULT, rx_buffer_entries=DEFAULT_BUFFER_ENTRIES, tx_buffer_entries=DEFAULT_BUFFER_ENTRIES): if...
Initializes a specific CAN channel of a device. :param int channel: CAN channel to be initialized (:data:`Channel.CHANNEL_CH0` or :data:`Channel.CHANNEL_CH1`). :param int BTR: Baud rate register BTR0 as high byte, baud rate register BTR1 as low byte (see enum :class:`Baudrate`). :pa...
def show_warning(message): try: import Tkinter, tkMessageBox root = Tkinter.Tk() root.withdraw() tkMessageBox.showerror("Spyder", message) except ImportError: pass raise RuntimeError(message)
Show warning using Tkinter if available
def iter_breadth_first(self, root=None): if root == None: root = self yield root last = root for node in self.iter_breadth_first(root): if isinstance(node, DictCell): for subpart in node: yield subpart last ...
Traverses the belief state's structure breadth-first
def micromanager_metadata(self): if not self.is_micromanager: return None result = read_micromanager_metadata(self._fh) result.update(self.pages[0].tags['MicroManagerMetadata'].value) return result
Return consolidated MicroManager metadata as dict.
def handle_json_wrapper_GET(self, handler, parsed_params): schedule = self.server.schedule result = handler(parsed_params) content = ResultEncoder().encode(result) self.send_response(200) self.send_header('Content-Type', 'text/plain') self.send_header('Content-Length', str(len(content))) sel...
Call handler and output the return value in JSON.
def guess_wxr_version(self, tree): for v in ('1.2', '1.1', '1.0'): try: tree.find('channel/{%s}wxr_version' % (WP_NS % v)).text return v except AttributeError: pass raise CommandError('Cannot resolve the wordpress namespace')
We will try to guess the wxr version used to complete the wordpress xml namespace name.
def label_search(self, label:str) -> List[dict]: ilx_rows = self.label2rows(self.local_degrade(label)) if not ilx_rows: return None else: return ilx_rows
Returns the rows in InterLex associated with that label Note: Pressumed to have duplicated labels in InterLex Args: label: label of the entity you want to find Returns: None or List[dict]
def episode_info(self, cosmoid, season, episode, **kwargs): resource = 'season/%d/episode/%d/info' % (season, episode) return self._cosmoid_request(resource, cosmoid, **kwargs)
Returns information about an episode in a television series Maps to the `episode info <http://prod-doc.rovicorp.com/mashery/index.php/V1.MetaData.VideoService.Video:SeasonEpisode>`_ API method.
def enrich(self, columns): for column in columns: if column not in self.data.columns: return self.data first_column = list(self.data[columns[0]]) count = 0 append_df = pandas.DataFrame() for cell in first_column: if len(cell) >= 1: ...
This method appends at the end of the dataframe as many rows as items are found in the list of elemnents in the provided columns. This assumes that the length of the lists for the several specified columns is the same. As an example, for the row A {"C1":"V1", "C2":field1, "C3":f...
def kruskal(graph, weight): uf = UnionFind(len(graph)) edges = [] for u in range(len(graph)): for v in graph[u]: edges.append((weight[u][v], u, v)) edges.sort() mst = [] for w, u, v in edges: if uf.union(u, v): mst.append((u, v)) return mst
Minimum spanning tree by Kruskal :param graph: undirected graph in listlist or listdict format :param weight: in matrix format or same listdict graph :returns: list of edges of the tree :complexity: ``O(|E|log|E|)``
def descriptions(cls): schema = cls.json_get('%s/status/schema' % cls.api_url, empty_key=True, send_key=False) descs = {} for val in schema['fields']['status']['value']: descs.update(val) return descs
Retrieve status descriptions from status.gandi.net.
def queue_files(dirpath, queue): for root, _, files in os.walk(os.path.abspath(dirpath)): if not files: continue for filename in files: queue.put(os.path.join(root, filename))
Add files in a directory to a queue
def generate_sample_cfn_module(env_root, module_dir=None): if module_dir is None: module_dir = os.path.join(env_root, 'sampleapp.cfn') generate_sample_module(module_dir) for i in ['stacks.yaml', 'dev-us-east-1.env']: shutil.copyfile( os.path.join(ROOT, 't...
Generate skeleton CloudFormation sample module.
def namespace(self, prefix=None): e = self.__deref() if e is not None: return e.namespace(prefix) return super(Element, self).namespace()
Get this schema element's target namespace. In case of reference elements, the target namespace is defined by the referenced and not the referencing element node. @param prefix: The default prefix. @type prefix: str @return: The schema element's target namespace @rtype:...
def query_boost_version(boost_root): boost_version = None if os.path.exists(os.path.join(boost_root,'Jamroot')): with codecs.open(os.path.join(boost_root,'Jamroot'), 'r', 'utf-8') as f: for line in f.readlines(): parts = line.split() if...
Read in the Boost version from a given boost_root.
def get_tool_definition(self, target): if target not in self.targets_mcu_list: logging.debug("Target not found in definitions") return None mcu_record = self.targets.get_mcu_record(target) if self.mcus.get_mcu_record(target) is None else self.mcus.get_mcu_record(target) t...
Returns tool specific dic or None if it does not exist for defined tool
def get_rotated(self, angle): result = self.copy() result.rotate(angle) return result
Return a vector rotated by angle from the given vector. Angle measured in radians counter-clockwise.
def out_interactions_iter(self, nbunch=None, t=None): if nbunch is None: nodes_nbrs_succ = self._succ.items() else: nodes_nbrs_succ = [(n, self._succ[n]) for n in self.nbunch_iter(nbunch)] for n, nbrs in nodes_nbrs_succ: for nbr in nbrs: if t i...
Return an iterator over the out interactions present in a given snapshot. Edges are returned as tuples in the order (node, neighbor). Parameters ---------- nbunch : iterable container, optional (default= all nodes) A container of nodes. The container will be iterat...