code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def start_compress(codec, image, stream): OPENJP2.opj_start_compress.argtypes = [CODEC_TYPE, ctypes.POINTER(ImageType), STREAM_TYPE_P] OPENJP2.opj_start_compress.restype = check_error OPENJP2.opj_start_compress(codec, image, stream)
Wraps openjp2 library function opj_start_compress. Start to compress the current image. Parameters ---------- codec : CODEC_TYPE Compressor handle. image : pointer to ImageType Input filled image. stream : STREAM_TYPE_P Input stream. Raises ------ RuntimeError If the OpenJPEG library routine opj_start_compress fails.
def from_global_id(global_id): unbased_global_id = unbase64(global_id) _type, _id = unbased_global_id.split(':', 1) return _type, _id
Takes the "global ID" created by toGlobalID, and retuns the type name and ID used to create it.
def column_summary_data(self): assembled_summary = self._to_cluster_summary_assembled() pct_id, read_depth = self._pc_id_and_read_depth_of_longest() columns = { 'assembled': self._to_cluster_summary_assembled(), 'match': self._has_match(assembled_summary), 'ref_seq': self.ref_name, 'pct_id': str(pct_id), 'ctg_cov': str(read_depth), 'known_var': self._to_cluster_summary_has_known_nonsynonymous(assembled_summary), 'novel_var': self._to_cluster_summary_has_novel_nonsynonymous(assembled_summary) } return columns
Returns a dictionary of column name -> value, for cluster-level results
def update(self, **kwargs): for arg in kwargs: if hasattr(self, arg): setattr(self, arg, kwargs[arg]) else: raise ValueError("Invalid RayParams parameter in" " update: %s" % arg) self._check_usage()
Update the settings according to the keyword arguments. Args: kwargs: The keyword arguments to set corresponding fields.
def count_leases_by_owner(self, leases): owners = [l.owner for l in leases] return dict(Counter(owners))
Returns a dictionary of leases by current owner.
def get_uri(self, ncname: str) -> Optional[str]: uri = cu.expand_uri(ncname + ':', self.curi_maps) return uri if uri and uri.startswith('http') else None
Get the URI associated with ncname @param ncname:
def guessFormat(self): c = [ord(x) for x in self.quals] mi, ma = min(c), max(c) r = [] for entry_format, v in iteritems(RANGES): m1, m2 = v if mi >= m1 and ma < m2: r.append(entry_format) return r
return quality score format - might return several if ambiguous.
def set_features(self): self.scores = {} for t_or_d, feats in zip(['target', 'decoy'], [self.target, self.decoy]): self.scores[t_or_d] = {} self.scores[t_or_d]['scores'] = self.score_get_fun( feats, self.featuretype, self.prepare_percolator_output) self.scores[t_or_d]['fn'] = '{}_qvality_input.txt'.format(t_or_d) writers.write_qvality_input(self.scores[t_or_d]['scores'], self.scores[t_or_d]['fn'])
Creates scorefiles for qvality's target and decoy distributions
def gethash(compiled): lines = compiled.splitlines() if len(lines) < 3 or not lines[2].startswith(hash_prefix): return None else: return lines[2][len(hash_prefix):]
Retrieve a hash from a header.
def delete_job(job_id, deployment_name, token_manager=None, app_url=defaults.APP_URL): headers = token_manager.get_access_token_headers() data_url = get_data_url_for_job(job_id, deployment_name, token_manager=token_manager, app_url=app_url) url = '%s/api/v1/jobs/%s' % (data_url, job_id) response = requests.delete(url, headers=headers) if response.status_code != 200: raise JutException('Error %s: %s' % (response.status_code, response.text))
delete a job with a specific job id
def plugins(): plugins = current_app.config['PLUGINS'] for name, description in entrypoints.ENTRYPOINTS.items(): echo('{0} ({1})'.format(white(description), name)) if name == 'udata.themes': actives = [current_app.config['THEME']] elif name == 'udata.avatars': actives = [avatar_config('provider')] else: actives = plugins for ep in sorted(entrypoints.iter_all(name), key=by_name): echo('> {0}: {1}'.format(ep.name, is_active(ep, actives)))
Display some details about the local plugins
def _check_asset_node_def(node_def): if node_def.op != "Const": raise TypeError("Asset node must be of type constant.") if tf.as_dtype(node_def.attr["dtype"].type) != tf.string: raise TypeError("Asset node must be of dtype string.") if len(node_def.attr["value"].tensor.string_val) != 1: raise TypeError("Asset node must be a scalar.")
Raises TypeError if `node_def` does not match the expectations.
def get_task_trackers(properties=None, hadoop_conf_dir=None, offline=False): if offline: if not hadoop_conf_dir: hadoop_conf_dir = pydoop.hadoop_conf() slaves = os.path.join(hadoop_conf_dir, "slaves") try: with open(slaves) as f: task_trackers = [(l.strip(), 0) for l in f] except IOError: task_trackers = [] else: stdout = run_class( "org.apache.hadoop.mapred.JobClient", ["-list-active-trackers"], properties=properties, hadoop_conf_dir=hadoop_conf_dir, keep_streams=True ) task_trackers = [] for line in stdout.splitlines(): if not line: continue line = line.split(":") task_trackers.append((line[0].split("_")[1], int(line[-1]))) return task_trackers
Get the list of task trackers in the Hadoop cluster. Each element in the returned list is in the ``(host, port)`` format. All arguments are passed to :func:`run_class`. If ``offline`` is :obj:`True`, try getting the list of task trackers from the ``slaves`` file in Hadoop's configuration directory (no attempt is made to contact the Hadoop daemons). In this case, ports are set to 0.
def StringIO(*args, **kwargs): raw = sync_io.StringIO(*args, **kwargs) return AsyncStringIOWrapper(raw)
StringIO constructor shim for the async wrapper.
def setFixedHeight(self, height): super(XViewPanelBar, self).setFixedHeight(height) if self.layout(): for i in xrange(self.layout().count()): try: self.layout().itemAt(i).widget().setFixedHeight(height) except StandardError: continue
Sets the fixed height for this bar to the inputed height. :param height | <int>
async def getTempCortex(mods=None): with s_common.getTempDir() as dirn: async with await Cortex.anit(dirn) as core: if mods: for mod in mods: await core.loadCoreModule(mod) async with core.getLocalProxy() as prox: yield prox
Get a proxy to a cortex backed by a temporary directory. Args: mods (list): A list of modules which are loaded into the cortex. Notes: The cortex and temporary directory are town down on exit. This should only be called from synchronous code. Returns: Proxy to the cortex.
def _load_attributes(new_class): for field_name, field_obj in new_class.meta_.declared_fields.items(): new_class.meta_.attributes[field_obj.get_attribute_name()] = field_obj
Load list of attributes from declared fields
def _init_threading(self, function, params={}, num_threads=10): q = Queue(maxsize=0) for i in range(num_threads): worker = Thread(target=function, args=(q, params)) worker.setDaemon(True) worker.start() return q
Initialize queue and threads :param function: :param params: :param num_threads: :return:
def get_configuration(form, out): config = configuration.Configuration() config["recursionlevel"] = int(formvalue(form, "level")) config["logger"] = config.logger_new('html', fd=out, encoding=HTML_ENCODING) config["threads"] = 2 if "anchors" in form: config["enabledplugins"].append("AnchorCheck") if "errors" not in form: config["verbose"] = True pat = "!^%s$" % urlutil.safe_url_pattern config["externlinks"].append(get_link_pat(pat, strict=True)) config.sanitize() return config
Initialize a CGI configuration.
def _filter_image(self, url): "The param is the image URL, which is returned if it passes all the filters." return reduce(lambda f, g: f and g(f), [ filters.AdblockURLFilter()(url), filters.NoImageFilter(), filters.SizeImageFilter(), filters.MonoImageFilter(), filters.FormatImageFilter(), ])
The param is the image URL, which is returned if it passes all the filters.
def inject_trace_header(headers, entity): if not entity: return if hasattr(entity, 'type') and entity.type == 'subsegment': header = entity.parent_segment.get_origin_trace_header() else: header = entity.get_origin_trace_header() data = header.data if header else None to_insert = TraceHeader( root=entity.trace_id, parent=entity.id, sampled=entity.sampled, data=data, ) value = to_insert.to_header_str() headers[http.XRAY_HEADER] = value
Extract trace id, entity id and sampling decision from the input entity and inject these information to headers. :param dict headers: http headers to inject :param Entity entity: trace entity that the trace header value generated from.
def remove_draft_child(self): if self.draft_child: with db.session.begin_nested(): super(PIDNodeVersioning, self).remove_child(self.draft_child, reorder=True)
Remove the draft child from versioning.
def link_head(self, node): assert not node.tail old_head = self.head if old_head: assert old_head.tail == self old_head.tail = node node.head = old_head node.tail = self self.head = node
Add a node to the head.
def close(self): gevent.killall(self._tasks, block=True) self._tasks = [] self._ssh.close()
Terminate a bridge session
def no_operation(self, onerror = None): request.NoOperation(display = self.display, onerror = onerror)
Do nothing but send a request to the server.
def get_settings(all,key): with Database("settings") as s: if all: for k, v in zip(list(s.keys()), list(s.values())): print("{} = {}".format(k, v)) elif key: print("{} = {}".format(key, s[key])) else: print("Don't know what you want? Try --all")
View Hitman internal settings. Use 'all' for all keys
def create_dn_in_filter(filter_class, filter_value, helper): in_filter = FilterFilter() in_filter.AddChild(create_dn_wcard_filter(filter_class, filter_value)) return in_filter
Creates filter object for given class name, and DN values.
def control(self) -> Optional[HTMLElement]: id = self.getAttribute('for') if id: if self.ownerDocument: return self.ownerDocument.getElementById(id) elif isinstance(id, str): from wdom.document import getElementById return getElementById(id) else: raise TypeError('"for" attribute must be string') return None
Return related HTMLElement object.
def MakeUniformPmf(low, high, n): pmf = Pmf() for x in numpy.linspace(low, high, n): pmf.Set(x, 1) pmf.Normalize() return pmf
Make a uniform Pmf. low: lowest value (inclusive) high: highest value (inclusize) n: number of values
def colorspace(im, bw=False, replace_alpha=False, **kwargs): if im.mode == 'I': im = im.point(list(_points_table()), 'L') is_transparent = utils.is_transparent(im) is_grayscale = im.mode in ('L', 'LA') new_mode = im.mode if is_grayscale or bw: new_mode = 'L' else: new_mode = 'RGB' if is_transparent: if replace_alpha: if im.mode != 'RGBA': im = im.convert('RGBA') base = Image.new('RGBA', im.size, replace_alpha) base.paste(im, mask=im) im = base else: new_mode = new_mode + 'A' if im.mode != new_mode: im = im.convert(new_mode) return im
Convert images to the correct color space. A passive option (i.e. always processed) of this method is that all images (unless grayscale) are converted to RGB colorspace. This processor should be listed before :func:`scale_and_crop` so palette is changed before the image is resized. bw Make the thumbnail grayscale (not really just black & white). replace_alpha Replace any transparency layer with a solid color. For example, ``replace_alpha='#fff'`` would replace the transparency layer with white.
def hexblock_byte(cls, data, address = None, bits = None, separator = ' ', width = 16): return cls.hexblock_cb(cls.hexadecimal, data, address, bits, width, cb_kwargs = {'separator': separator})
Dump a block of hexadecimal BYTEs from binary data. @type data: str @param data: Binary data. @type address: str @param address: Memory address where the data was read from. @type bits: int @param bits: (Optional) Number of bits of the target architecture. The default is platform dependent. See: L{HexDump.address_size} @type separator: str @param separator: Separator between the hexadecimal representation of each BYTE. @type width: int @param width: (Optional) Maximum number of BYTEs to convert per text line. @rtype: str @return: Multiline output text.
def get_dicts(self): reader = csv.DictReader(open(self.path, "r", encoding=self.encoding)) for row in reader: if row: yield row
Gets dicts in file :return: (generator of) of dicts with data from .csv file
def xdifference(self, to): x,y = 0,1 assert self.level == to.level self_tile = list(self.to_tile()[0]) to_tile = list(to.to_tile()[0]) if self_tile[x] >= to_tile[x] and self_tile[y] <= self_tile[y]: ne_tile, sw_tile = self_tile, to_tile else: sw_tile, ne_tile = self_tile, to_tile cur = ne_tile[:] while cur[x] >= sw_tile[x]: while cur[y] <= sw_tile[y]: yield from_tile(tuple(cur), self.level) cur[y] += 1 cur[x] -= 1 cur[y] = ne_tile[y]
Generator Gives the difference of quadkeys between self and to Generator in case done on a low level Only works with quadkeys of same level
def strip_filter(value): if isinstance(value, basestring): value = bleach.clean(value, tags=ALLOWED_TAGS, attributes=ALLOWED_ATTRIBUTES, styles=ALLOWED_STYLES, strip=True) return value
Strips HTML tags from strings according to SANITIZER_ALLOWED_TAGS, SANITIZER_ALLOWED_ATTRIBUTES and SANITIZER_ALLOWED_STYLES variables in settings. Example usage: {% load sanitizer %} {{ post.content|strip_html }}
def random(game): game.valid_moves = tuple(sorted(game.valid_moves, key=lambda _: rand.random()))
Prefers moves randomly. :param Game game: game to play :return: None
def wait_for_browser_close(b): if b: if not __ACTIVE: wait_failover(wait_for_browser_close) return wait_for_frame(b.GetBrowserImp().GetMainFrame())
Can be used to wait until a TBrowser is closed
def log_startup_info(): LOG.always("Starting mongo-connector version: %s", __version__) if "dev" in __version__: LOG.warning( "This is a development version (%s) of mongo-connector", __version__ ) LOG.always("Python version: %s", sys.version) LOG.always("Platform: %s", platform.platform()) if hasattr(pymongo, "__version__"): pymongo_version = pymongo.__version__ else: pymongo_version = pymongo.version LOG.always("pymongo version: %s", pymongo_version) if not pymongo.has_c(): LOG.warning( "pymongo version %s was installed without the C extensions. " '"InvalidBSON: Date value out of range" errors may occur if ' "there are documents with BSON Datetimes that represent times " "outside of Python's datetime limit.", pymongo.__version__, )
Log info about the current environment.
def format_terminal_row(headers, example_row): def format_column(col): if isinstance(col, str): return '{{:{w}.{w}}}' return '{{:<{w}}}' widths = [max(len(h), len(str(d))) for h, d in zip(headers, example_row)] original_last_width = widths[-1] if sys.stdout.isatty(): widths[-1] = max( len(headers[-1]), tty.width() - sum(w + 2 for w in widths[0:-1]) - 3) cols = [format_column(c).format(w=w) for c, w in zip(example_row, widths)] format_string = ' '.join(cols) if original_last_width > widths[-1]: format_string += '...' return format_string
Uses headers and a row of example data to generate a format string for printing a single row of data. Args: headers (tuple of strings): The headers for each column of data example_row (tuple): A representative tuple of strings or ints Returns string: A format string with a size for each column
def __content_type_matches(self, content_type, available_content_types): if content_type is None: return False if content_type in available_content_types: return True for available_content_type in available_content_types: if available_content_type in content_type: return True return False
Check if the given content type matches one of the available content types. Args: content_type (str): The given content type. available_content_types list(str): All the available content types. Returns: bool: True if a match was found, False otherwise.
def get_root_repositories(self): if self._catalog_session is not None: return self._catalog_session.get_root_catalogs() return RepositoryLookupSession( self._proxy, self._runtime).get_repositories_by_ids(list(self.get_root_repository_ids()))
Gets the root repositories in the repository hierarchy. A node with no parents is an orphan. While all repository ``Ids`` are known to the hierarchy, an orphan does not appear in the hierarchy unless explicitly added as a root node or child of another node. return: (osid.repository.RepositoryList) - the root repositories raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method is must be implemented.*
def derive(self, new_version: Union[tuple, list]=None) -> "Model": meta = self.meta first_time = self._initial_version == self.version if new_version is None: new_version = meta["version"] new_version[-1] += 1 if not isinstance(new_version, (tuple, list)): raise ValueError("new_version must be either a list or a tuple, got %s" % type(new_version)) meta["version"] = list(new_version) if first_time: meta["parent"] = meta["uuid"] meta["uuid"] = str(uuid.uuid4()) return self
Inherit the new model from the current one - used for versioning. \ This operation is in-place. :param new_version: The version of the new model. :return: The derived model - self.
def connect(host, port, username, password): session = ftplib.FTP() session.connect(host, port) session.login(username, password) return session
Connect and login to an FTP server and return ftplib.FTP object.
def _get_chart_info(df, vtype, cat, prep, callers): maxval_raw = max(list(df["value.floor"])) curdf = df[(df["variant.type"] == vtype) & (df["category"] == cat) & (df["bamprep"] == prep)] vals = [] labels = [] for c in callers: row = curdf[df["caller"] == c] if len(row) > 0: vals.append(list(row["value.floor"])[0]) labels.append(list(row["value"])[0]) else: vals.append(1) labels.append("") return vals, labels, maxval_raw
Retrieve values for a specific variant type, category and prep method.
def list_after(self, message_id, limit=None): return self.list(after_id=message_id, limit=limit)
Return a page of group messages created after a message. This is used to page forwards through messages. :param str message_id: the ID of a message :param int limit: maximum number of messages per page :return: group messages :rtype: :class:`~groupy.pagers.MessageList`
def _slice2rows(self, start, stop, step=None): nrows = self._info['nrows'] if start is None: start = 0 if stop is None: stop = nrows if step is None: step = 1 tstart = self._fix_range(start) tstop = self._fix_range(stop) if tstart == 0 and tstop == nrows: return None if stop < start: raise ValueError("start is greater than stop in slice") return numpy.arange(tstart, tstop, step, dtype='i8')
Convert a slice to an explicit array of rows
def _read_repos(conf_file, repos, filename, regex): for line in conf_file: line = salt.utils.stringutils.to_unicode(line) if not regex.search(line): continue repo = _create_repo(line, filename) if repo['uri'] not in repos: repos[repo['uri']] = [repo]
Read repos from configuration file
def _ToJsonName(name): capitalize_next = False result = [] for c in name: if c == '_': capitalize_next = True elif capitalize_next: result.append(c.upper()) capitalize_next = False else: result += c return ''.join(result)
Converts name to Json name and returns it.
def install_time(self): time1970 = self.__mod_time1970 try: date_string, item_type = \ win32api.RegQueryValueEx(self.__reg_uninstall_handle, 'InstallDate') except pywintypes.error as exc: if exc.winerror == winerror.ERROR_FILE_NOT_FOUND: return time1970 else: raise if item_type == win32con.REG_SZ: try: date_object = datetime.datetime.strptime(date_string, "%Y%m%d") time1970 = time.mktime(date_object.timetuple()) except ValueError: pass return time1970
Return the install time, or provide an estimate of install time. Installers or even self upgrading software must/should update the date held within InstallDate field when they change versions. Some installers do not set ``InstallDate`` at all so we use the last modified time on the registry key. Returns: int: Seconds since 1970 UTC.
def query(self): if not self.b64_query: return None s = QSerializer(base64=True) return s.loads(self.b64_query)
De-serialize, decode and return an ORM query stored in b64_query.
def commit(self, session=None): if self.__cleared: return if self._parent: self._commit_parent() else: self._commit_repository() self._clear()
Merge modified objects into parent transaction. Once commited a transaction object is not usable anymore :param:session: current sqlalchemy Session
def configure(self, transport, auth, address, port): self.transport = transport self.username = auth.username self.address = address self.port = port
Connect paramiko transport :type auth: :py:class`margaritashotgun.auth.AuthMethods` :param auth: authentication object :type address: str :param address: remote server ip or hostname :type port: int :param port: remote server port :type hostkey: :py:class:`paramiko.key.HostKey` :param hostkey: remote host ssh server key
def get_workflow_actions(obj): def translate(id): return t(PMF(id + "_transition_title")) transids = getAllowedTransitions(obj) actions = [{'id': it, 'title': translate(it)} for it in transids] return actions
Compile a list of possible workflow transitions for this object
def home(self) -> 'InstrumentContext': def home_dummy(mount): pass cmds.do_publish(self.broker, cmds.home, home_dummy, 'before', None, None, self._mount.name.lower()) self._hw_manager.hardware.home_z(self._mount) self._hw_manager.hardware.home_plunger(self._mount) cmds.do_publish(self.broker, cmds.home, home_dummy, 'after', self, None, self._mount.name.lower()) return self
Home the robot. :returns: This instance.
def combine_inputs(self, args, kw, ignore_args): "Combines the args and kw in a unique way, such that ordering of kwargs does not lead to recompute" inputs= args + tuple(c[1] for c in sorted(kw.items(), key=lambda x: x[0])) return [a for i,a in enumerate(inputs) if i not in ignore_args]
Combines the args and kw in a unique way, such that ordering of kwargs does not lead to recompute
def _ParseIdentifierMappingsTable(self, parser_mediator, esedb_table): identifier_mappings = {} for esedb_record in esedb_table.records: if parser_mediator.abort: break identifier, mapped_value = self._ParseIdentifierMappingRecord( parser_mediator, esedb_table.name, esedb_record) if identifier is None or mapped_value is None: continue if identifier in identifier_mappings: parser_mediator.ProduceExtractionWarning( 'identifier: {0:d} already exists in mappings.'.format(identifier)) continue identifier_mappings[identifier] = mapped_value return identifier_mappings
Extracts identifier mappings from the SruDbIdMapTable table. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. esedb_table (pyesedb.table): table. Returns: dict[int, str]: mapping of numeric identifiers to their string representation.
def enqueue_data(self, event_type, data): with self.lock: listeners = self.listeners.values() for listener in listeners: listener.enqueue(event_type, data) self.must_process = True
Enqueue a data item for specific event type
def _collapse_to_cwl_record_single(data, want_attrs, input_files): out = {} for key in want_attrs: key_parts = key.split("__") out[key] = _to_cwl(tz.get_in(key_parts, data), input_files) return out
Convert a single sample into a CWL record.
def set_string(self, option, value): if not isinstance(value, str): raise TypeError("%s must be a string" % option) self.options[option] = value
Set a string option. Args: option (str): name of option. value (str): value of the option. Raises: TypeError: Value must be a string.
def preprocess_topics(source_groupid, source_topics, dest_groupid, topics_dest_group): common_topics = [topic for topic in topics_dest_group if topic in source_topics] if common_topics: print( "Error: Consumer Group ID: {groupid} is already " "subscribed to following topics: {topic}.\nPlease delete this " "topics from new group before re-running the " "command.".format( groupid=dest_groupid, topic=', '.join(common_topics), ), file=sys.stderr, ) sys.exit(1) if topics_dest_group: in_str = ( "New Consumer Group: {dest_groupid} already " "exists.\nTopics subscribed to by the consumer groups are listed " "below:\n{source_groupid}: {source_group_topics}\n" "{dest_groupid}: {dest_group_topics}\nDo you intend to copy into" "existing consumer destination-group? (y/n)".format( source_groupid=source_groupid, source_group_topics=source_topics, dest_groupid=dest_groupid, dest_group_topics=topics_dest_group, ) ) prompt_user_input(in_str)
Pre-process the topics in source and destination group for duplicates.
def import_file(self, filepath, filterindex): post_command_event(self.main_window, self.ContentChangedMsg) if filterindex == 0: return self._import_csv(filepath) elif filterindex == 1: return self._import_txt(filepath) else: msg = _("Unknown import choice {choice}.") msg = msg.format(choice=filterindex) short_msg = _('Error reading CSV file') self.main_window.interfaces.display_warning(msg, short_msg)
Imports external file Parameters ---------- filepath: String \tPath of import file filterindex: Integer \tIndex for type of file, 0: csv, 1: tab-delimited text file
def init_default(self): import f311 if self.default_filename is None: raise RuntimeError("Class '{}' has no default filename".format(self.__class__.__name__)) fullpath = f311.get_default_data_path(self.default_filename, class_=self.__class__) self.load(fullpath) self.filename = None
Initializes object with its default values Tries to load self.default_filename from default data directory. For safety, filename is reset to None so that it doesn't point to the original file.
def get_sequence_rules_by_genus_type(self, sequence_rule_genus_type): collection = JSONClientValidated('assessment_authoring', collection='SequenceRule', runtime=self._runtime) result = collection.find( dict({'genusTypeId': str(sequence_rule_genus_type)}, **self._view_filter())).sort('_id', DESCENDING) return objects.SequenceRuleList(result, runtime=self._runtime, proxy=self._proxy)
Gets a ``SequenceRuleList`` corresponding to the given sequence rule genus ``Type`` which does not include sequence rule of genus types derived from the specified ``Type``. arg: sequence_rule_genus_type (osid.type.Type): a sequence rule genus type return: (osid.assessment.authoring.SequenceRuleList) - the returned ``SequenceRule`` list raise: NullArgument - ``sequence_rule_genus_type`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def descendants(self): for i in self.current_item.items: self.move_to(i) if i.type == TYPE_COLLECTION: for c in self.children: yield c else: yield i self.move_up()
Recursively return every dataset below current item.
def check_type_of_nest_spec_keys_and_values(nest_spec): try: assert all([isinstance(k, str) for k in nest_spec]) assert all([isinstance(nest_spec[k], list) for k in nest_spec]) except AssertionError: msg = "All nest_spec keys/values must be strings/lists." raise TypeError(msg) return None
Ensures that the keys and values of `nest_spec` are strings and lists. Raises a helpful ValueError if they are. Parameters ---------- nest_spec : OrderedDict, or None, optional. Keys are strings that define the name of the nests. Values are lists of alternative ids, denoting which alternatives belong to which nests. Each alternative id must only be associated with a single nest! Default == None. Returns ------- None.
def _update_nonce_explicit(self): ne = self.nonce_explicit + 1 self.nonce_explicit = ne % 2**(self.nonce_explicit_len * 8)
Increment the explicit nonce while avoiding any overflow.
def prepare_destruction(self): self._tool = None self._painter = None self.relieve_model(self._selection) self._selection = None self._Observer__PROP_TO_METHS.clear() self._Observer__METH_TO_PROPS.clear() self._Observer__PAT_TO_METHS.clear() self._Observer__METH_TO_PAT.clear() self._Observer__PAT_METH_TO_KWARGS.clear()
Get rid of circular references
def _expand_to_beam_size(tensor, beam_size): tensor = tf.expand_dims(tensor, axis=1) tile_dims = [1] * tensor.shape.ndims tile_dims[1] = beam_size return tf.tile(tensor, tile_dims)
Tiles a given tensor by beam_size. Args: tensor: tensor to tile [batch_size, ...] beam_size: How much to tile the tensor by. Returns: Tiled tensor [batch_size, beam_size, ...]
def AddShowToTVLibrary(self, showName): goodlogging.Log.Info("DB", "Adding {0} to TV library".format(showName), verbosity=self.logVerbosity) currentShowValues = self.SearchTVLibrary(showName = showName) if currentShowValues is None: self._ActionDatabase("INSERT INTO TVLibrary (ShowName) VALUES (?)", (showName, )) showID = self._ActionDatabase("SELECT (ShowID) FROM TVLibrary WHERE ShowName=?", (showName, ))[0][0] return showID else: goodlogging.Log.Fatal("DB", "An entry for {0} already exists in the TV library".format(showName))
Add show to TVLibrary table. If the show already exists in the table a fatal error is raised. Parameters ---------- showName : string Show name to add to TV library table. Returns ---------- int Unique show id generated for show when it is added to the table. Used across the database to reference this show.
def is_step_visible(self, step): return self.idempotent_dict.get(step, True) or \ step not in self.storage.validated_step_data
Returns whether the given `step` should be included in the wizard; it is included if either the form is idempotent or not filled in before.
def split_cmdline(cmdline): path, cmd = os.path.split(cmdline[0]) arguments = ' '.join(cmdline[1:]) return path, cmd, arguments
Return path, cmd and arguments for a process cmdline.
def _get_field_mapping(self, schema): if 'mapping' in schema: return schema['mapping'] elif schema['type'] == 'dict' and 'schema' in schema: return self._get_mapping(schema['schema']) elif schema['type'] == 'list' and 'schema' in schema.get('schema', {}): return self._get_mapping(schema['schema']['schema']) elif schema['type'] == 'datetime': return {'type': 'date'} elif schema['type'] == 'string' and schema.get('unique'): return {'type': 'string', 'index': 'not_analyzed'}
Get mapping for single field schema. :param schema: field schema
def rebuild( self ): plugins.init() self.blockSignals(True) self.setUpdatesEnabled(False) if ( self._editor ): self._editor.close() self._editor.setParent(None) self._editor.deleteLater() self._editor = None plugin_class = plugins.widgets.get(self._columnType) if ( plugin_class ): self._editor = plugin_class(self) self.layout().addWidget(self._editor) self.blockSignals(False) self.setUpdatesEnabled(True)
Clears out all the child widgets from this widget and creates the widget that best matches the column properties for this edit.
def load(self, infile): model = pickle.load(infile) self.__dict__.update(model.__dict__)
Deserialize a model from a stored file. By default, unpickle an entire object. If `dump` is overridden to use a different storage format, `load` should be as well. :param file outfile: A file-like object from which to retrieve the serialized model.
def setmem(vm_, memory, config=False, **kwargs): conn = __get_conn(**kwargs) dom = _get_domain(conn, vm_) if VIRT_STATE_NAME_MAP.get(dom.info()[0], 'unknown') != 'shutdown': return False flags = libvirt.VIR_DOMAIN_MEM_MAXIMUM if config: flags = flags | libvirt.VIR_DOMAIN_AFFECT_CONFIG ret1 = dom.setMemoryFlags(memory * 1024, flags) ret2 = dom.setMemoryFlags(memory * 1024, libvirt.VIR_DOMAIN_AFFECT_CURRENT) conn.close() return ret1 == ret2 == 0
Changes the amount of memory allocated to VM. The VM must be shutdown for this to work. :param vm_: name of the domain :param memory: memory amount to set in MB :param config: if True then libvirt will be asked to modify the config as well :param connection: libvirt connection URI, overriding defaults .. versionadded:: 2019.2.0 :param username: username to connect with, overriding defaults .. versionadded:: 2019.2.0 :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' virt.setmem <domain> <size> salt '*' virt.setmem my_domain 768
def in6_getLocalUniquePrefix(): tod = time.time() i = int(tod) j = int((tod - i) * (2**32)) tod = struct.pack("!II", i, j) mac = RandMAC() eui64 = inet_pton(socket.AF_INET6, '::' + in6_mactoifaceid(mac))[8:] import hashlib globalid = hashlib.sha1(tod + eui64).digest()[:5] return inet_ntop(socket.AF_INET6, b'\xfd' + globalid + b'\x00' * 10)
Returns a pseudo-randomly generated Local Unique prefix. Function follows recommendation of Section 3.2.2 of RFC 4193 for prefix generation.
def _get_token(): username = __opts__.get('rallydev', {}).get('username', None) password = __opts__.get('rallydev', {}).get('password', None) path = 'https://rally1.rallydev.com/slm/webservice/v2.0/security/authorize' result = salt.utils.http.query( path, decode=True, decode_type='json', text=True, status=True, username=username, password=password, cookies=True, persist_session=True, opts=__opts__, ) if 'dict' not in result: return None return result['dict']['OperationResult']['SecurityToken']
Get an auth token
def draw(self, **kwargs): x = self.n_feature_subsets_ means = self.cv_scores_.mean(axis=1) sigmas = self.cv_scores_.std(axis=1) self.ax.fill_between(x, means - sigmas, means+sigmas, alpha=0.25) self.ax.plot(x, means, 'o-') self.ax.axvline( self.n_features_, c='k', ls='--', label="n_features = {}\nscore = {:0.3f}".format( self.n_features_, self.cv_scores_.mean(axis=1).max() ) ) return self.ax
Renders the rfecv curve.
def charm_dir(): d = os.environ.get('JUJU_CHARM_DIR') if d is not None: return d return os.environ.get('CHARM_DIR')
Return the root directory of the current charm
def gid_exists(gid): try: grp.getgrgid(gid) gid_exists = True except KeyError: gid_exists = False return gid_exists
Check if a gid exists
def create_changelog(project_dir=os.curdir, bugtracker_url='', rpm_format=False): pkg_info_file = os.path.join(project_dir, 'PKG-INFO') if os.path.exists(pkg_info_file): return with open('CHANGELOG', 'wb') as changelog_fd: changelog_fd.write( get_changelog( project_dir=project_dir, bugtracker_url=bugtracker_url, rpm_format=rpm_format, ).encode('utf-8') )
Creates the changelog file, if not in a package. :param project_dir: Path to the git repo of the project. :type project_dir: str :param bugtracker_url: Url to the bug tracker for the issues. :type bugtracker_url: str :param rpm_format: if set to True, will make the changelog rpm-compatible. :type rpm_format: bool :rises RuntimeError: If the changelog could not be retrieved
def track(*fields): def inner(cls): _track_class(cls, fields) _add_get_tracking_url(cls) return cls return inner
Decorator used to track changes on Model's fields. :Example: >>> @track('name') ... class Human(models.Model): ... name = models.CharField(max_length=30)
def master_pub(self): return _get_master_uri(self.opts['master_ip'], self.publish_port, source_ip=self.opts.get('source_ip'), source_port=self.opts.get('source_publish_port'))
Return the master publish port
def get_all_current_trains(self, train_type=None, direction=None): params = None if train_type: url = self.api_base_url + 'getCurrentTrainsXML_WithTrainType' params = { 'TrainType': STATION_TYPE_TO_CODE_DICT[train_type] } else: url = self.api_base_url + 'getCurrentTrainsXML' response = requests.get( url, params=params, timeout=10) if response.status_code != 200: return [] trains = self._parse_all_train_data(response.content) if direction is not None: return self._prune_trains(trains, direction=direction) return trains
Returns all trains that are due to start in the next 10 minutes @param train_type: ['mainline', 'suburban', 'dart']
def safe_version(version): try: return str(packaging.version.Version(version)) except packaging.version.InvalidVersion: version = version.replace(' ', '.') return re.sub('[^A-Za-z0-9.]+', '-', version)
Convert an arbitrary string to a standard version string
def tiles_are_equal(tile_data_1, tile_data_2, fmt): if fmt and fmt == zip_format: return metatiles_are_equal(tile_data_1, tile_data_2) else: return tile_data_1 == tile_data_2
Returns True if the tile data is equal in tile_data_1 and tile_data_2. For most formats, this is a simple byte-wise equality check. For zipped metatiles, we need to check the contents, as the zip format includes metadata such as timestamps and doesn't control file ordering.
def where(self, predicate): if self.closed(): raise ValueError("Attempt to call where() on a closed Queryable.") if not is_callable(predicate): raise TypeError("where() parameter predicate={predicate} is not " "callable".format(predicate=repr(predicate))) return self._create(ifilter(predicate, self))
Filters elements according to whether they match a predicate. Note: This method uses deferred execution. Args: predicate: A unary function which is applied to each element in the source sequence. Source elements for which the predicate returns True will be present in the result. Returns: A Queryable over those elements of the source sequence for which the predicate is True. Raises: ValueError: If the Queryable is closed. TypeError: If the predicate is not callable.
def watch(self, selector, callback): if selector not in self._monitors: self._monitors[selector] = set() self._monitors[selector].add(callback)
Call a function whenever a stream changes. Args: selector (DataStreamSelector): The selector to watch. If this is None, it is treated as a wildcard selector that matches every stream. callback (callable): The function to call when a new reading is pushed. Callback is called as: callback(stream, value)
def sign_statement(self, statement, node_name, key=None, key_file=None, node_id=None, id_attr=''): if not id_attr: id_attr = self.id_attr if not key_file and key: _, key_file = make_temp(str(key).encode(), '.pem') if not key and not key_file: key_file = self.key_file return self.crypto.sign_statement( statement, node_name, key_file, node_id, id_attr)
Sign a SAML statement. :param statement: The statement to be signed :param node_name: string like 'urn:oasis:names:...:Assertion' :param key: The key to be used for the signing, either this or :param key_file: The file where the key can be found :param node_id: :param id_attr: The attribute name for the identifier, normally one of 'id','Id' or 'ID' :return: The signed statement
def load(self, data, many=None, partial=None): result = super(ResumptionTokenSchema, self).load( data, many=many, partial=partial ) result.data.update( result.data.get('resumptionToken', {}).get('kwargs', {}) ) return result
Deserialize a data structure to an object.
def post_build(self, pkt, pay): if self.length is None: pkt = pkt[:4] + chb(len(pay)) + pkt[5:] return pkt + pay
This will set the ByteField 'length' to the correct value.
def bar(h: Histogram2D, *, barmode: str = DEFAULT_BARMODE, alpha: float = DEFAULT_ALPHA, **kwargs): get_data_kwargs = pop_many(kwargs, "density", "cumulative", "flatten") data = [go.Bar( x=histogram.bin_centers, y=get_data(histogram, **get_data_kwargs), width=histogram.bin_widths, name=histogram.name, opacity=alpha, **kwargs ) for histogram in h] layout = go.Layout(barmode=barmode) _add_ticks(layout.xaxis, h[0], kwargs) figure = go.Figure(data=data, layout=layout) return figure
Bar plot. Parameters ---------- alpha: Opacity (0.0 - 1.0) barmode : "overlay" | "group" | "stack"
def mkdir(self, pathobj, _): if not pathobj.drive or not pathobj.root: raise RuntimeError("Full path required: '%s'" % str(pathobj)) if pathobj.exists(): raise OSError(17, "File exists: '%s'" % str(pathobj)) url = str(pathobj) + '/' text, code = self.rest_put(url, session=pathobj.session, verify=pathobj.verify, cert=pathobj.cert) if not code == 201: raise RuntimeError("%s %d" % (text, code))
Creates remote directory Note that this operation is not recursive
def _query_select_options(self, query, select_columns=None): if select_columns: _load_options = list() for column in select_columns: if "." in column: model_relation = self.get_related_model(column.split(".")[0]) if not self.is_model_already_joinded(query, model_relation): query = query.join(model_relation) _load_options.append( Load(model_relation).load_only(column.split(".")[1]) ) else: if not self.is_relation(column) and not hasattr( getattr(self.obj, column), "__call__" ): _load_options.append(Load(self.obj).load_only(column)) else: _load_options.append(Load(self.obj)) query = query.options(*tuple(_load_options)) return query
Add select load options to query. The goal is to only SQL select what is requested :param query: SQLAlchemy Query obj :param select_columns: (list) of columns :return: SQLAlchemy Query obj
def put_cache(self, minions): self.cupd_out.send(self.serial.dumps(minions))
published the given minions to the ConCache
def coarse_grain(G, ncg): if ncg <= 1: return G G = numpy.asarray(G) nbin, remainder = divmod(G.shape[-1], ncg) if remainder != 0: nbin += 1 return numpy.transpose([ numpy.sum(G[..., i:i+ncg], axis=-1) / G[..., i:i+ncg].shape[-1] for i in numpy.arange(0, ncg * nbin, ncg) ])
Coarse-grain last index of array ``G``. Bin the last index of array ``G`` in bins of width ``ncg``, and replace each bin by its average. Return the binned results. Args: G: Array to be coarse-grained. ncg: Bin width for coarse-graining.
def check_bidi(chars): if not chars: return has_RandALCat = any(is_RandALCat(c) for c in chars) if not has_RandALCat: return has_LCat = any(is_LCat(c) for c in chars) if has_LCat: raise ValueError("L and R/AL characters must not occur in the same" " string") if not is_RandALCat(chars[0]) or not is_RandALCat(chars[-1]): raise ValueError("R/AL string must start and end with R/AL character.")
Check proper bidirectionality as per stringprep. Operates on a list of unicode characters provided in `chars`.
def _add_finder(importer, finder): existing_finder = _get_finder(importer) if not existing_finder: pkg_resources.register_finder(importer, finder) else: pkg_resources.register_finder(importer, ChainedFinder.of(existing_finder, finder))
Register a new pkg_resources path finder that does not replace the existing finder.
def grid_reload_from_ids(oargrid_jobids): gk = get_api_client() jobs = [] for site, job_id in oargrid_jobids: jobs.append(gk.sites[site].jobs[job_id]) return jobs
Reload all running or pending jobs of Grid'5000 from their ids Args: oargrid_jobids (list): list of ``(site, oar_jobid)`` identifying the jobs on each site Returns: The list of python-grid5000 jobs retrieved
def list_files(start_path): s = u'\n' for root, dirs, files in os.walk(start_path): level = root.replace(start_path, '').count(os.sep) indent = ' ' * 4 * level s += u'{}{}/\n'.format(indent, os.path.basename(root)) sub_indent = ' ' * 4 * (level + 1) for f in files: s += u'{}{}\n'.format(sub_indent, f) return s
tree unix command replacement.
def remove_gaps(A, B): a_seq, b_seq = [], [] for a, b in zip(list(A), list(B)): if a == '-' or a == '.' or b == '-' or b == '.': continue a_seq.append(a) b_seq.append(b) return ''.join(a_seq), ''.join(b_seq)
skip column if either is a gap