code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def asynchronous(function, event): thread = Thread(target=synchronous, args=(function, event)) thread.daemon = True thread.start()
Runs the function asynchronously taking care of exceptions.
def after_import(self, dataset, result, using_transactions, dry_run, **kwargs): if not dry_run and any(r.import_type == RowResult.IMPORT_TYPE_NEW for r in result.rows): connection = connections[DEFAULT_DB_ALIAS] sequence_sql = connection.ops.sequence_reset_sql(no_style(), [self._meta.mod...
Reset the SQL sequences after new objects are imported
def newMail(self, data, message): r = self.doQuery('mail', method='POST', postParameters={'response_id': str(data), 'message': str(message)}) if r.status_code == 200: data = r.json() return data['result'] == 'Ok' return False
Send a mail to a plugit server
def __get_num_preds(self, num_iteration, nrow, predict_type): if nrow > MAX_INT32: raise LightGBMError('LightGBM cannot perform prediction for data' 'with number of rows greater than MAX_INT32 (%d).\n' 'You can split your data into chun...
Get size of prediction result.
def rmgen(self, idx): stagens = [] for device, stagen in zip(self.devman.devices, self.call.stagen): if stagen: stagens.append(device) for gen in idx: for stagen in stagens: if gen in self.__dict__[stagen].uid.keys(): se...
Remove the static generators if their dynamic models exist Parameters ---------- idx : list A list of static generator idx Returns ------- None
def isObjectClassified(self, objectName, minOverlap=None, maxL2Size=None): L2Representation = self.getL2Representations() objectRepresentation = self.objectL2Representations[objectName] sdrSize = self.config["L2Params"]["sdrSize"] if minOverlap is None: minOverlap = sdrSize / 2 if maxL2Size is...
Return True if objectName is currently unambiguously classified by every L2 column. Classification is correct and unambiguous if the current L2 overlap with the true object is greater than minOverlap and if the size of the L2 representation is no more than maxL2Size :param minOverlap: min overlap to co...
def add_rel(self, source_id, target_id, rel): if (source_id, target_id) not in self.rel_cache: self.workbench.add_rel(source_id, target_id, rel) self.rel_cache.add((source_id, target_id))
Cache aware add_rel
def run(self): target = getattr(self, '_Thread__target', getattr(self, '_target', None)) args = getattr(self, '_Thread__args', getattr(self, '_args', None)) kwargs = getattr(self, '_Thread__kwargs', getattr(self, '_kwargs', None)) if target is not None: self._return = target(...
Runs the thread. Args: self (ThreadReturn): the ``ThreadReturn`` instance Returns: ``None``
def toimages(self, chunk_size='auto'): from thunder.images.images import Images if chunk_size is 'auto': chunk_size = str(max([int(1e5/prod(self.baseshape)), 1])) n = len(self.shape) - 1 if self.mode == 'spark': return Images(self.values.swap(tuple(range(n)), (0,)...
Converts to images data. This method is equivalent to series.toblocks(size).toimages(). Parameters ---------- chunk_size : str or tuple, size of series chunk used during conversion, default = 'auto' String interpreted as memory size (in kilobytes, e.g. '64'). Th...
def _get_prefix_length(number1, number2, bits): for i in range(bits): if number1 >> i == number2 >> i: return bits - i return 0
Get the number of leading bits that are same for two numbers. Args: number1: an integer. number2: another integer. bits: the maximum number of bits to compare. Returns: The number of leading bits that are the same for two numbers.
def basic_lstm(inputs, state, num_units, name=None): input_shape = common_layers.shape_list(inputs) cell = tf.nn.rnn_cell.BasicLSTMCell( num_units, name=name, reuse=tf.AUTO_REUSE) if state is None: state = cell.zero_state(input_shape[0], tf.float32) outputs, new_state = cell(inputs, state) return ou...
Basic LSTM.
def make_subdirs(self): subpath = self.full_path[len(self.context.root):] log.debug("make_subdirs: subpath is %s", subpath) dirs = subpath.split(os.sep)[:-1] log.debug("dirs is %s", dirs) current = self.context.root for dir in dirs: if dir: cur...
The purpose of this method is to, if necessary, create all of the subdirectories leading up to the file to the written.
def fetch(self): since = time.mktime(self.options.since.datetime.timetuple()) until = time.mktime(self.options.until.datetime.timetuple()) log.info("Searching for links saved by {0}".format(self.user)) self.stats = self.parent.bitly.user_link_history(created_after=since, ...
Bit.ly API expect unix timestamps
def _single_feature_logliks_one_step(self, feature, models): x_non_na = feature[~feature.isnull()] if x_non_na.empty: return pd.DataFrame() else: dfs = [] for name, model in models.items(): df = model.single_feature_logliks(feature) ...
Get log-likelihood of models at each parameterization for given data Parameters ---------- feature : pandas.Series Percent-based values of a single feature. May contain NAs, but only non-NA values are used. Returns ------- logliks : pandas.DataFr...
def convert_block_dicts_to_string(self, block_1st2nd, block_1st, block_2nd, block_3rd): out = "" if self.codon_positions in ['ALL', '1st-2nd']: for gene_code, seqs in block_1st2nd.items(): out += '>{0}_1st-2nd\n----\n'.format(gene_code) for seq in seqs: ...
Takes into account whether we need to output all codon positions.
def status(self, message, rofi_args=None, **kwargs): rofi_args = rofi_args or [] args = ['rofi', '-e', message] args.extend(self._common_args(allow_fullscreen=False, **kwargs)) args.extend(rofi_args) self._run_nonblocking(args)
Show a status message. This method is non-blocking, and intended to give a status update to the user while something is happening in the background. To close the window, either call the close() method or use any of the display methods to replace it with a different window. Ful...
def json_decoder(content, *args, **kwargs): if not content: return None json_value = content.decode() return json.loads(json_value)
Json decoder parser to be used by service_client
def create_process_worker(self, cmd_list, environ=None): worker = ProcessWorker(cmd_list, environ=environ) self._create_worker(worker) return worker
Create a new process worker instance.
def normalize(name): ret = name.replace(':', '') ret = ret.replace('%', '') ret = ret.replace(' ', '_') return ret
Normalize name for the Statsd convention
def flip_alleles(genotypes): warnings.warn("deprecated: use 'Genotypes.flip_coded'", DeprecationWarning) genotypes.reference, genotypes.coded = (genotypes.coded, genotypes.reference) genotypes.genotypes = 2 - genotypes.genotypes return genotypes
Flip the alleles of an Genotypes instance.
def s_demand(self, bus): Svl = array([complex(g.p, g.q) for g in self.generators if (g.bus == bus) and g.is_load], dtype=complex64) Sd = complex(bus.p_demand, bus.q_demand) return -sum(Svl) + Sd
Returns the total complex power demand.
def validate(self, signature, timestamp, nonce): if not self.token: raise RuntimeError('WEIXIN_TOKEN is missing') if self.expires_in: try: timestamp = int(timestamp) except (ValueError, TypeError): return False delta = time....
Validate request signature. :param signature: A string signature parameter sent by weixin. :param timestamp: A int timestamp parameter sent by weixin. :param nonce: A int nonce parameter sent by weixin.
def op_decanonicalize(op_name, canonical_op): global DECANONICALIZE_METHODS if op_name not in DECANONICALIZE_METHODS: return canonical_op else: return DECANONICALIZE_METHODS[op_name](canonical_op)
Get the current representation of a parsed operation's data, given the canonical representation Meant for backwards-compatibility
def get_data(histogram: HistogramBase, density: bool = False, cumulative: bool = False, flatten: bool = False) -> np.ndarray: if density: if cumulative: data = (histogram / histogram.total).cumulative_frequencies else: data = histogram.densities else: if cumulativ...
Get histogram data based on plotting parameters. Parameters ---------- density : Whether to divide bin contents by bin size cumulative : Whether to return cumulative sums instead of individual flatten : Whether to flatten multidimensional bins
def install(path, name=None): if name is None: name = os.path.splitext(os.path.basename(path))[0] callermod = inspect.getmodule(inspect.stack()[1][0]) name = '%s.%s' % (callermod.__name__, name) if name in sys.modules: return sys.modules[name] if not os.path.isabs(path): call...
Compiles a Thrift file and installs it as a submodule of the caller. Given a tree organized like so:: foo/ __init__.py bar.py my_service.thrift You would do, .. code-block:: python my_service = thriftrw.install('my_service.thrift') To install ``m...
def _render_relationships(self, resource): relationships = {} related_models = resource.__mapper__.relationships.keys() primary_key_val = getattr(resource, self.primary_key) if self.dasherize: mapped_relationships = { x: dasherize(underscore(x)) for x in relat...
Render the resource's relationships.
def get(self, key, lang=None): if lang is not None: for o in self.graph.objects(self.asNode(), key): if o.language == lang: yield o else: for o in self.graph.objects(self.asNode(), key): yield o
Returns triple related to this node. Can filter on lang :param key: Predicate of the triple :param lang: Language of the triple if applicable :rtype: Literal or BNode or URIRef
def _make_scatter_logfile_name(cls, key, linkname, job_config): logfile = job_config.get('logfile', "%s_%s_%s.log" % (cls.default_prefix_logfile, linkname, key)) job_config['logfile'] = logfile
Hook to inster the name of a logfile into the input config
def maybe_convert_platform_interval(values): if isinstance(values, (list, tuple)) and len(values) == 0: return np.array([], dtype=np.int64) elif is_categorical_dtype(values): values = np.asarray(values) return maybe_convert_platform(values)
Try to do platform conversion, with special casing for IntervalArray. Wrapper around maybe_convert_platform that alters the default return dtype in certain cases to be compatible with IntervalArray. For example, empty lists return with integer dtype instead of object dtype, which is prohibited for Inte...
def config_default(option, default=None, type=None, section=cli.name): def f(option=option, default=default, type=type, section=section): config = read_config() if type is None and default is not None: type = builtins.type(default) get_option = option_getter(type) try: ...
Guesses a default value of a CLI option from the configuration. :: @click.option('--locale', default=config_default('locale'))
def register_pubkey(self): p = pkcs_os2ip(self.dh_p) g = pkcs_os2ip(self.dh_g) pn = dh.DHParameterNumbers(p, g) y = pkcs_os2ip(self.dh_Ys) public_numbers = dh.DHPublicNumbers(y, pn) s = self.tls_session s.server_kx_pubkey = public_numbers.public_key(default_backen...
XXX Check that the pubkey received is in the group.
def name(self): if not self._initialized: raise pycdlibexception.PyCdlibInternalError('SL record not yet initialized!') outlist = [] continued = False for comp in self.symlink_components: name = comp.name() if name == b'/': outlist = []...
Generate a string that contains all components of the symlink. Parameters: None Returns: String containing all components of the symlink.
def get_default_location(self): res = [] for location in self.distdefault: res.extend(self.get_location(location)) return res
Return the default location.
def new(self, **dict): if not self._item_new_path: raise AttributeError('new is not available for %s' % self._item_name) for tag in self._object._remap_to_id: self._object._remap_tag_to_tag_id(tag, dict) target = self._item_new_path payload = json.dumps({self._ite...
Create a new item with the provided dict information. Returns the new item.
def get_week_start_end_day(): t = date.today() wd = t.weekday() return (t - timedelta(wd), t + timedelta(6 - wd))
Get the week start date and end date
def __recieve_raw_data(self, size): data = [] if self.verbose: print ("expecting {} bytes raw data".format(size)) while size > 0: data_recv = self.__sock.recv(size) recieved = len(data_recv) if self.verbose: print ("partial recv {}".format(recieved)) ...
partial data ?
def d(msg, *args, **kwargs): return logging.log(DEBUG, msg, *args, **kwargs)
log a message at debug level;
def remove_group_roles(request, group, domain=None, project=None): client = keystoneclient(request, admin=True) roles = client.roles.list(group=group, domain=domain, project=project) for role in roles: remove_group_role(request, role=role.id, group=group, domain=domain, pro...
Removes all roles from a group on a domain or project.
def _as_array(self, fmt): assert self.dimensions, \ '{}: cannot get value as {} array!'.format(self.name, fmt) elems = array.array(fmt) elems.fromstring(self.bytes) return np.array(elems).reshape(self.dimensions)
Unpack the raw bytes of this param using the given data format.
def _gc_rule_from_pb(gc_rule_pb): rule_name = gc_rule_pb.WhichOneof("rule") if rule_name is None: return None if rule_name == "max_num_versions": return MaxVersionsGCRule(gc_rule_pb.max_num_versions) elif rule_name == "max_age": max_age = _helpers._duration_pb_to_timedelta(gc_rul...
Convert a protobuf GC rule to a native object. :type gc_rule_pb: :class:`.table_v2_pb2.GcRule` :param gc_rule_pb: The GC rule to convert. :rtype: :class:`GarbageCollectionRule` or :data:`NoneType <types.NoneType>` :returns: An instance of one of the native rules defined in :module:`colum...
def add_transaction(self, transaction): for item in transaction: if item not in self.__transaction_index_map: self.__items.append(item) self.__transaction_index_map[item] = set() self.__transaction_index_map[item].add(self.__num_transaction) self._...
Add a transaction. Arguments: transaction -- A transaction as an iterable object (eg. ['A', 'B']).
def _sorted_keys(self, keys, startkey, reverse=False): tuple_key = lambda t: t[::-1] if reverse: tuple_cmp = lambda t: t[::-1] > startkey[::-1] else: tuple_cmp = lambda t: t[::-1] < startkey[::-1] searchkeys = sorted(keys, key=tuple_key, reverse=reverse) s...
Generator that yields sorted keys starting with startkey Parameters ---------- keys: Iterable of tuple/list \tKey sequence that is sorted startkey: Tuple/list \tFirst key to be yielded reverse: Bool \tSort direction reversed if True
def list_components(self, dependency_order=True): if dependency_order: return list(itertools.chain.from_iterable([sorted(list(batch)) for batch in foundations.common.dependency_resolver( ...
Lists the Components by dependency resolving. Usage:: >>> manager = Manager(("./manager/tests/tests_manager/resources/components/core",)) >>> manager.register_components() True >>> manager.list_components() [u'core.tests_component_a', u'core.tests_co...
def unbind(self, dependency, svc, svc_ref): with self._lock: self.check_lifecycle() self.__unset_binding(dependency, svc, svc_ref) if self.update_bindings(): self.check_lifecycle()
Called by a dependency manager to remove an injected service and to update the component life cycle.
def address(self, ip, **kwargs): indicator_obj = Address(ip, **kwargs) return self._indicator(indicator_obj)
Add Address data to Batch object. Args: ip (str): The value for this Indicator. confidence (str, kwargs): The threat confidence for this Indicator. date_added (str, kwargs): The date timestamp the Indicator was created. last_modified (str, kwargs): The date times...
def job_detail(job_id=None): jobs = [job for job in get_jobs() if str(job['job_id']) == job_id] if not jobs: abort(404) return render_template('job_detail.html', job=jobs[0], hosts=dagobah.get_hosts())
Show a detailed description of a Job's status.
def cast(self, mapping): for c, p in mapping.items(): self.doc.note_citation(c) self.doc.note_explicit_target(c, c) c.persona = p self.log.debug("{0} to be played by {1}".format( c["names"][0].capitalize(), p) ) return self
Allocate the scene script a cast of personae for each of its entities. :param mapping: A dictionary of {Entity, Persona} :return: The SceneScript object.
def with_arg_count(self, count): exp = self._get_current_call() exp.expected_arg_count = count return self
Set the last call to expect an exact argument count. I.E.:: >>> auth = Fake('auth').provides('login').with_arg_count(2) >>> auth.login('joe_user') # forgot password Traceback (most recent call last): ... AssertionError: fake:auth.login() was called w...
def memwarp_multi(src_ds_list, res='first', extent='intersection', t_srs='first', r='cubic', verbose=True, dst_ndv=0): return warp_multi(src_ds_list, res, extent, t_srs, r, warptype=memwarp, verbose=verbose, dst_ndv=dst_ndv)
Helper function for memwarp of multiple input GDAL Datasets
def compare(self, item, article_candidates): result = ArticleCandidate() result.title = self.comparer_title.extract(item, article_candidates) result.description = self.comparer_desciption.extract(item, article_candidates) result.text = self.comparer_text.extract(item, article_candidates)...
Compares the article candidates using the different submodules and saves the best results in new ArticleCandidate object :param item: The NewscrawlerItem related to the ArticleCandidates :param article_candidates: The list of ArticleCandidate-Objects which have been extracted :return: A...
def encloses_annulus(x_min, x_max, y_min, y_max, nx, ny, r_in, r_out): gout = circular_overlap_grid(x_min, x_max, y_min, y_max, nx, ny, r_out, 1, 1) gin = circular_overlap_grid(x_min, x_max, y_min, y_max, nx, ny, r_in, 1, 1) return gout - gin
Encloses function backported from old photutils
def _parse(args): ordered = [] opt_full = dict() opt_abbrev = dict() args = args + [''] i = 0 while i < len(args) - 1: arg = args[i] arg_next = args[i+1] if arg.startswith('--'): if arg_next.startswith('-'): raise ValueError('{} lacks value'.fo...
Parse passed arguments from shell.
def _apply_hardware_version(hardware_version, config_spec, operation='add'): log.trace('Configuring virtual machine hardware ' 'version version=%s', hardware_version) if operation == 'edit': log.trace('Scheduling hardware version ' 'upgrade to %s', hardware_version) ...
Specifies vm container version or schedules upgrade, returns True on change and False if nothing have been changed. hardware_version Hardware version string eg. vmx-08 config_spec Configuration spec object operation Defines the operation which should be used, the possi...
def delete_all_but_self(self): prefix = self.settings.alias name = self.settings.index if prefix == name: Log.note("{{index_name}} will not be deleted", index_name= prefix) for a in self.cluster.get_aliases(): if re.match(re.escape(prefix) + "\\d{8}_\\d{6}", a.in...
DELETE ALL INDEXES WITH GIVEN PREFIX, EXCEPT name
def hook(module): if "INSTANA_DEV" in os.environ: print("==============================================================") print("Instana: Running flask hook") print("==============================================================") wrapt.wrap_function_wrapper('flask', 'Flask.__init__', wr...
Hook method to install the Instana middleware into Flask
def union(self, other): if self._jrdd_deserializer == other._jrdd_deserializer: rdd = RDD(self._jrdd.union(other._jrdd), self.ctx, self._jrdd_deserializer) else: self_copy = self._reserialize() other_copy = other._reserialize() rdd = ...
Return the union of this RDD and another one. >>> rdd = sc.parallelize([1, 1, 2, 3]) >>> rdd.union(rdd).collect() [1, 1, 2, 3, 1, 1, 2, 3]
def text(self): try: return self._text except AttributeError: if IS_PYTHON_3: encoding = self._response.headers.get_content_charset("utf-8") else: encoding = self._response.headers.getparam("charset") self._text = self._resp...
Get the raw text for the response
def reset(self): self._reset_ptr[0] = True self._commands.clear() for _ in range(self._pre_start_steps + 1): self.tick() return self._default_state_fn()
Resets the environment, and returns the state. If it is a single agent environment, it returns that state for that agent. Otherwise, it returns a dict from agent name to state. Returns: tuple or dict: For single agent environment, returns the same as `step`. For mult...
def SafeLoadKextManager(self, fn_table): dll = None try: dll = SetCTypesForLibrary('IOKit', fn_table) except AttributeError as ae: if 'KextManagerUnloadKextWithIdentifier' in str(ae): logging.debug('Using legacy kextunload') dll = self.SafeLoadKextManager( FilterFnTab...
Load the kextmanager, replacing unavailable symbols.
def setdefault(self, key, value): try: super(FlaskConfigStorage, self).setdefault(key, value) except RuntimeError: self._defaults.__setitem__(key, value)
We may not always be connected to an app, but we still need to provide a way to the base environment to set it's defaults.
def get_all(self, security): url = 'http://www.google.com/finance?q=%s' % security page = self._request(url) soup = BeautifulSoup(page) snapData = soup.find("table", {"class": "snap-data"}) if snapData is None: raise UfException(Errors.STOCK_SYMBOL_ERROR, "Can f...
Get all available quote data for the given ticker security. Returns a dictionary.
def guest_delete(self, userid): userid = userid.upper() if not self._vmops.check_guests_exist_in_db(userid, raise_exc=False): if zvmutils.check_userid_exist(userid): LOG.error("Guest '%s' does not exist in guests database" % userid) r...
Delete guest. :param userid: the user id of the vm
def install(self): try: if self.args.server is not None: server = ServerLists(self.server_type) DynamicImporter( 'ezhost', server.name, args=self.args, configure=self.configure ...
install the server
def validate_labels(known_classes, passed_labels, argument_name): known_classes = np.array(known_classes) passed_labels = np.array(passed_labels) unique_labels, unique_indexes = np.unique(passed_labels, return_index=True) if len(passed_labels) != len(unique_labels): indexes = np.arange(0, len(pa...
Validates the labels passed into the true_labels or pred_labels arguments in the plot_confusion_matrix function. Raises a ValueError exception if any of the passed labels are not in the set of known classes or if there are duplicate labels. Otherwise returns None. Args: known_classes (arra...
def encode(self, pad=106): opcode = struct.pack('>H', self.defn.opcode) offset = len(opcode) size = max(offset + self.defn.argsize, pad) encoded = bytearray(size) encoded[0:offset] = opcode encoded[offset] = self.defn.argsize offset += 1 i...
Encodes this AIT command to binary. If pad is specified, it indicates the maximum size of the encoded command in bytes. If the encoded command is less than pad, the remaining bytes are set to zero. Commands sent to ISS payloads over 1553 are limited to 64 words (128 bytes) wit...
def list_flags(self, only_name=False): paths = glob.glob(os.path.join(self.outfolder, flag_name("*"))) if only_name: return [os.path.split(p)[1] for p in paths] else: return paths
Determine the flag files associated with this pipeline. :param bool only_name: Whether to return only flag file name(s) (True), or full flag file paths (False); default False (paths) :return list[str]: flag files associated with this pipeline.
def hierarchy_name(self, adjust_for_printing=True): if adjust_for_printing: adjust = lambda x: adjust_name_for_printing(x) else: adjust = lambda x: x if self.has_parent(): return self._parent_.hierarchy_name() + "." + adjust(self.name) return adjust(self.name)
return the name for this object with the parents names attached by dots. :param bool adjust_for_printing: whether to call :func:`~adjust_for_printing()` on the names, recursively
def windowing(size, padding=UNSET, window_type=tuple): if size < 1: raise ValueError("windowing() size {} is not at least 1".format(size)) def windowing_transducer(reducer): return Windowing(reducer, size, padding, window_type) return windowing_transducer
Create a transducer which produces a moving window over items.
def _cleanup_channel(self, channel_id): with self.lock: if channel_id not in self._channels: return del self._channels[channel_id]
Remove the the channel from the list of available channels. :param int channel_id: Channel id :return:
def extract_keyhandle(path, filepath): keyhandle = filepath.lstrip(path) keyhandle = keyhandle.split("/") return keyhandle[0]
extract keyhandle value from the path
def _encryption_context_hash(hasher, encryption_context): serialized_encryption_context = serialize_encryption_context(encryption_context) hasher.update(serialized_encryption_context) return hasher.finalize()
Generates the expected hash for the provided encryption context. :param hasher: Existing hasher to use :type hasher: cryptography.hazmat.primitives.hashes.Hash :param dict encryption_context: Encryption context to hash :returns: Complete hash :rtype: bytes
def update_subtotals(self, current, sub_key): if not self.sub_counts.get(sub_key): self.sub_counts[sub_key] = {} for item in current: try: self.sub_counts[sub_key][item] += 1 except KeyError: self.sub_counts[sub_key][item] = 1
updates sub_total counts for the class instance based on the current dictionary counts args: ----- current: current dictionary counts sub_key: the key/value to use for the subtotals
def wrap_field_error(self, data, renderer_context): response = renderer_context.get("response", None) status_code = response and response.status_code if status_code != 400: raise WrapperNotApplicable('Status code must be 400.') return self.wrap_error( data, render...
Convert field error native data to the JSON API Error format See the note about the JSON API Error format on `wrap_error`. The native format for field errors is a dictionary where the keys are field names (or 'non_field_errors' for additional errors) and the values are a list of error ...
def add_check(self, check_item): self.checks.append(check_item) for other in self.others: other.add_check(check_item)
Adds a check universally.
def subsystem_dependencies_iter(cls): for dep in cls.subsystem_dependencies(): if isinstance(dep, SubsystemDependency): yield dep else: yield SubsystemDependency(dep, GLOBAL_SCOPE, removal_version=None, removal_hint=None)
Iterate over the direct subsystem dependencies of this Optionable.
def _parse_user_flags(): try: idx = list(sys.argv).index('--user-flags') user_flags_file = sys.argv[idx + 1] except (ValueError, IndexError): user_flags_file = '' if user_flags_file and os.path.isfile(user_flags_file): from ryu.utils import _import_module_file _import...
Parses user-flags file and loads it to register user defined options.
def centroid(self): centroid = np.average(self.triangles_center, axis=0, weights=self.area_faces) centroid.flags.writeable = False return centroid
The point in space which is the average of the triangle centroids weighted by the area of each triangle. This will be valid even for non- watertight meshes, unlike self.center_mass Returns ---------- centroid : (3,) float The average vertex weighted by face ar...
def infer_operands_size(operands): size = None for oprnd in operands: if oprnd.size: size = oprnd.size break if size: for oprnd in operands: if not oprnd.size: oprnd.size = size else: for oprnd in operands: if isinst...
Infer x86 instruction operand size based on other operands.
def _getProcessedImage(self): if self.imageDisp is None: self.imageDisp = self.image self.levelMin, self.levelMax = self._quickLevels( self.imageDisp) return self.imageDisp
Returns the image data after it has been processed by any normalization options in use. This method also sets the attributes self.levelMin and self.levelMax to indicate the range of data in the image.
def sem(self, ddof=1): return self.std(ddof=ddof) / np.sqrt(self.count())
Compute standard error of the mean of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex. Parameters ---------- ddof : integer, default 1 degrees of freedom
def login(self, login=None, password=None): if (login is not None) and (password is not None): login_data = {'user': login, 'pass': password} elif (self.default_login is not None) and (self.default_password is not None): login_data = {'user': self.default_login, 'pass': self.defa...
Login with default or supplied credetials. .. note:: Calling this method is not necessary when HTTP basic or HTTP digest_auth authentication is used and RT accepts it as external authentication method, because the login in this case is done transparently by requ...
def run_ffitch(distfile, outtreefile, intreefile=None, **kwargs): cl = FfitchCommandline(datafile=distfile, outtreefile=outtreefile, \ intreefile=intreefile, **kwargs) r, e = cl.run() if e: print("***ffitch could not run", file=sys.stderr) return None else: print("ffi...
Infer tree branch lengths using ffitch in EMBOSS PHYLIP
def reltags(self, src, cache=None): if not self._tag_assocs: return set() if cache == None: cache = {} q = _otq() q.append(src) updateq = _otq() while q: i = q.popleft() if i in cache: continue cache[i] = set() for (s,t) in self.transitions_to(i): ...
returns all the tags that are relevant at this state cache should be a dictionary and it is updated by the function
def get_default_gentation(gender, orientation): gender = gender.lower()[0] orientation = orientation.lower() return gender_to_orientation_to_gentation[gender][orientation]
Return the default gentation for the given gender and orientation.
def sort(in_file, out_file): filehandle = open(in_file, 'r') lines = filehandle.readlines() filehandle.close() lines.sort() filehandle = open(out_file, 'w') for line in lines: filehandle.write(line) filehandle.close()
Sorts the given file.
def _scale_shape(dshape, scale = (1,1,1)): nshape = np.round(np.array(dshape) * np.array(scale)) return tuple(nshape.astype(np.int))
returns the shape after scaling (should be the same as ndimage.zoom
def printParameters(self): print "------------PY TemporalPooler Parameters ------------------" print "numInputs = ", self.getNumInputs() print "numColumns = ", self.getNumColumns() print "columnDimensions = ", self._columnDimensions print "numActiveColumns...
Useful for debugging.
def execute(filelocation, outpath, executable, args=None, switchArgs=None): procArgs = ['java', '-jar', executable] procArgs.extend(['-output_path', outpath]) if args is not None: for arg in args: procArgs.extend(['-'+arg[0], arg[1]]) if switchArgs is not None: procArgs.exten...
Executes the dinosaur tool on Windows operating systems. :param filelocation: either a single mgf file path or a list of file paths. :param outpath: path of the output file, file must not exist :param executable: must specify the complete file path of the spectra-cluster-cli.jar file, supported ver...
def get_isa(self, oneq_type='Xhalves', twoq_type='CZ') -> ISA: qubits = [Qubit(id=q.id, type=oneq_type, dead=q.dead) for q in self._isa.qubits] edges = [Edge(targets=e.targets, type=twoq_type, dead=e.dead) for e in self._isa.edges] return ISA(qubits, edges)
Construct an ISA suitable for targeting by compilation. This will raise an exception if the requested ISA is not supported by the device. :param oneq_type: The family of one-qubit gates to target :param twoq_type: The family of two-qubit gates to target
def process_infile(f, fileStore): if isinstance(f, tuple): return f elif isinstance(f, list): return process_array_infile(f, fileStore) elif isinstance(f, basestring): return process_single_infile(f, fileStore) else: raise RuntimeError('Error processing file: '.format(str...
Takes an array of files or a single file and imports into the jobstore. This returns a tuple or an array of tuples replacing all previous path strings. Toil does not preserve a file's original name upon import and so the tuple keeps track of this with the format: '(filepath, preserveThisFilename)' :p...
def create(self, qname): try: if self.exists(qname): log.error('Queues "%s" already exists. Nothing done.', qname) return True self.conn.create(qname) return True except pyrax.exceptions as err_msg: log.error('RackSpace API ...
Create RackSpace Queue.
def main(): if '-h' in sys.argv: print(main.__doc__) sys.exit() file=sys.argv[1] f=open(file,'r') Input=f.readlines() f.close() out=open(file,'w') for line in Input: out.write(line) out.close()
NAME convert2unix.py DESCRIPTION converts mac or dos formatted file to unix file in place SYNTAX convert2unix.py FILE OPTIONS -h prints help and quits
def cpos(string, chars, start): string = stypes.stringToCharP(string) chars = stypes.stringToCharP(chars) start = ctypes.c_int(start) return libspice.cpos_c(string, chars, start)
Find the first occurrence in a string of a character belonging to a collection of characters, starting at a specified location, searching forward. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/cpos_c.html :param string: Any character string. :type string: str :param chars: A collecti...
def _get_colors(n): import matplotlib.pyplot as plt from matplotlib.colors import rgb2hex as r2h from numpy import linspace cols = linspace(0.05, .95, n) cmap = plt.get_cmap('nipy_spectral') return [r2h(cmap(i)) for i in cols]
Returns n unique and "evenly" spaced colors for the backgrounds of the projects. Args: n (int): The number of unique colors wanted. Returns: colors (list of str): The colors in hex form.
def capakey_rest_gateway_request(url, headers={}, params={}): try: res = requests.get(url, headers=headers, params=params) res.raise_for_status() return res except requests.ConnectionError as ce: raise GatewayRuntimeException( 'Could not execute request due to connect...
Utility function that helps making requests to the CAPAKEY REST service. :param string url: URL to request. :param dict headers: Headers to send with the URL. :param dict params: Parameters to send with the URL. :returns: Result of the call.
def _select(self, pointer): return YAMLChunk( self._ruamelparsed, pointer=pointer, label=self._label, strictparsed=self._strictparsed, key_association=copy(self._key_association), )
Get a YAMLChunk referenced by a pointer.
def populate_fw_dev(self, fw_id, mgmt_ip, new): for cnt in self.res: used = self.res.get(cnt).get('used') if mgmt_ip == self.res[cnt].get('mgmt_ip'): if new: self.res[cnt]['used'] = used + 1 self.res[cnt]['fw_id_lst'].append(fw_id) ...
Populate the class after a restart.
def get_default_widget(): default_widget = forms.Textarea if hasattr(settings, 'BLEACH_DEFAULT_WIDGET'): default_widget = load_widget(settings.BLEACH_DEFAULT_WIDGET) return default_widget
Get the default widget or the widget defined in settings
def parse_extras(extras_str): from pkg_resources import Requirement extras = Requirement.parse("fakepkg{0}".format(extras_to_string(extras_str))).extras return sorted(dedup([extra.lower() for extra in extras]))
Turn a string of extras into a parsed extras list
def map_data(self): with open(self.src_file, "r") as f: for line in f: cols = line.split(',') print(cols)
provides a mapping from the CSV file to the aikif data structures.