code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def asynchronous(function, event): thread = Thread(target=synchronous, args=(function, event)) thread.daemon = True thread.start()
Runs the function asynchronously taking care of exceptions.
def after_import(self, dataset, result, using_transactions, dry_run, **kwargs): if not dry_run and any(r.import_type == RowResult.IMPORT_TYPE_NEW for r in result.rows): connection = connections[DEFAULT_DB_ALIAS] sequence_sql = connection.ops.sequence_reset_sql(no_style(), [self._meta.model]) if sequence_sql: cursor = connection.cursor() try: for line in sequence_sql: cursor.execute(line) finally: cursor.close()
Reset the SQL sequences after new objects are imported
def newMail(self, data, message): r = self.doQuery('mail', method='POST', postParameters={'response_id': str(data), 'message': str(message)}) if r.status_code == 200: data = r.json() return data['result'] == 'Ok' return False
Send a mail to a plugit server
def __get_num_preds(self, num_iteration, nrow, predict_type): if nrow > MAX_INT32: raise LightGBMError('LightGBM cannot perform prediction for data' 'with number of rows greater than MAX_INT32 (%d).\n' 'You can split your data into chunks' 'and then concatenate predictions for them' % MAX_INT32) n_preds = ctypes.c_int64(0) _safe_call(_LIB.LGBM_BoosterCalcNumPredict( self.handle, ctypes.c_int(nrow), ctypes.c_int(predict_type), ctypes.c_int(num_iteration), ctypes.byref(n_preds))) return n_preds.value
Get size of prediction result.
def rmgen(self, idx): stagens = [] for device, stagen in zip(self.devman.devices, self.call.stagen): if stagen: stagens.append(device) for gen in idx: for stagen in stagens: if gen in self.__dict__[stagen].uid.keys(): self.__dict__[stagen].disable_gen(gen)
Remove the static generators if their dynamic models exist Parameters ---------- idx : list A list of static generator idx Returns ------- None
def isObjectClassified(self, objectName, minOverlap=None, maxL2Size=None): L2Representation = self.getL2Representations() objectRepresentation = self.objectL2Representations[objectName] sdrSize = self.config["L2Params"]["sdrSize"] if minOverlap is None: minOverlap = sdrSize / 2 if maxL2Size is None: maxL2Size = 1.5*sdrSize numCorrectClassifications = 0 for col in xrange(self.numColumns): overlapWithObject = len(objectRepresentation[col] & L2Representation[col]) if ( overlapWithObject >= minOverlap and len(L2Representation[col]) <= maxL2Size ): numCorrectClassifications += 1 return numCorrectClassifications == self.numColumns
Return True if objectName is currently unambiguously classified by every L2 column. Classification is correct and unambiguous if the current L2 overlap with the true object is greater than minOverlap and if the size of the L2 representation is no more than maxL2Size :param minOverlap: min overlap to consider the object as recognized. Defaults to half of the SDR size :param maxL2Size: max size for the L2 representation Defaults to 1.5 * SDR size :return: True/False
def add_rel(self, source_id, target_id, rel): if (source_id, target_id) not in self.rel_cache: self.workbench.add_rel(source_id, target_id, rel) self.rel_cache.add((source_id, target_id))
Cache aware add_rel
def run(self): target = getattr(self, '_Thread__target', getattr(self, '_target', None)) args = getattr(self, '_Thread__args', getattr(self, '_args', None)) kwargs = getattr(self, '_Thread__kwargs', getattr(self, '_kwargs', None)) if target is not None: self._return = target(*args, **kwargs) return None
Runs the thread. Args: self (ThreadReturn): the ``ThreadReturn`` instance Returns: ``None``
def toimages(self, chunk_size='auto'): from thunder.images.images import Images if chunk_size is 'auto': chunk_size = str(max([int(1e5/prod(self.baseshape)), 1])) n = len(self.shape) - 1 if self.mode == 'spark': return Images(self.values.swap(tuple(range(n)), (0,), size=chunk_size)) if self.mode == 'local': return Images(self.values.transpose((n,) + tuple(range(0, n))))
Converts to images data. This method is equivalent to series.toblocks(size).toimages(). Parameters ---------- chunk_size : str or tuple, size of series chunk used during conversion, default = 'auto' String interpreted as memory size (in kilobytes, e.g. '64'). The exception is the string 'auto', which will choose a chunk size to make the resulting blocks ~100 MB in size. Int interpreted as 'number of elements'. Only valid in spark mode.
def _get_prefix_length(number1, number2, bits): for i in range(bits): if number1 >> i == number2 >> i: return bits - i return 0
Get the number of leading bits that are same for two numbers. Args: number1: an integer. number2: another integer. bits: the maximum number of bits to compare. Returns: The number of leading bits that are the same for two numbers.
def basic_lstm(inputs, state, num_units, name=None): input_shape = common_layers.shape_list(inputs) cell = tf.nn.rnn_cell.BasicLSTMCell( num_units, name=name, reuse=tf.AUTO_REUSE) if state is None: state = cell.zero_state(input_shape[0], tf.float32) outputs, new_state = cell(inputs, state) return outputs, new_state
Basic LSTM.
def make_subdirs(self): subpath = self.full_path[len(self.context.root):] log.debug("make_subdirs: subpath is %s", subpath) dirs = subpath.split(os.sep)[:-1] log.debug("dirs is %s", dirs) current = self.context.root for dir in dirs: if dir: current = os.path.join(current, dir) if os.path.isdir(current): log.debug("%s is already an existing directory", current) else: os.mkdir(current, 0o700)
The purpose of this method is to, if necessary, create all of the subdirectories leading up to the file to the written.
def fetch(self): since = time.mktime(self.options.since.datetime.timetuple()) until = time.mktime(self.options.until.datetime.timetuple()) log.info("Searching for links saved by {0}".format(self.user)) self.stats = self.parent.bitly.user_link_history(created_after=since, created_before=until)
Bit.ly API expect unix timestamps
def _single_feature_logliks_one_step(self, feature, models): x_non_na = feature[~feature.isnull()] if x_non_na.empty: return pd.DataFrame() else: dfs = [] for name, model in models.items(): df = model.single_feature_logliks(feature) df['Modality'] = name dfs.append(df) return pd.concat(dfs, ignore_index=True)
Get log-likelihood of models at each parameterization for given data Parameters ---------- feature : pandas.Series Percent-based values of a single feature. May contain NAs, but only non-NA values are used. Returns ------- logliks : pandas.DataFrame
def convert_block_dicts_to_string(self, block_1st2nd, block_1st, block_2nd, block_3rd): out = "" if self.codon_positions in ['ALL', '1st-2nd']: for gene_code, seqs in block_1st2nd.items(): out += '>{0}_1st-2nd\n----\n'.format(gene_code) for seq in seqs: out += seq elif self.codon_positions == '1st': for gene_code, seqs in block_1st.items(): out += '>{0}_1st\n----\n'.format(gene_code) for seq in seqs: out += seq elif self.codon_positions == '2nd': for gene_code, seqs in block_2nd.items(): out += '>{0}_2nd\n----\n'.format(gene_code) for seq in seqs: out += seq if self.codon_positions in ['ALL', '3rd']: for gene_code, seqs in block_3rd.items(): out += '\n>{0}_3rd\n----\n'.format(gene_code) for seq in seqs: out += seq return out
Takes into account whether we need to output all codon positions.
def status(self, message, rofi_args=None, **kwargs): rofi_args = rofi_args or [] args = ['rofi', '-e', message] args.extend(self._common_args(allow_fullscreen=False, **kwargs)) args.extend(rofi_args) self._run_nonblocking(args)
Show a status message. This method is non-blocking, and intended to give a status update to the user while something is happening in the background. To close the window, either call the close() method or use any of the display methods to replace it with a different window. Fullscreen mode is not supported for status messages and if specified will be ignored. Parameters ---------- message: string Progress message to show.
def json_decoder(content, *args, **kwargs): if not content: return None json_value = content.decode() return json.loads(json_value)
Json decoder parser to be used by service_client
def create_process_worker(self, cmd_list, environ=None): worker = ProcessWorker(cmd_list, environ=environ) self._create_worker(worker) return worker
Create a new process worker instance.
def normalize(name): ret = name.replace(':', '') ret = ret.replace('%', '') ret = ret.replace(' ', '_') return ret
Normalize name for the Statsd convention
def flip_alleles(genotypes): warnings.warn("deprecated: use 'Genotypes.flip_coded'", DeprecationWarning) genotypes.reference, genotypes.coded = (genotypes.coded, genotypes.reference) genotypes.genotypes = 2 - genotypes.genotypes return genotypes
Flip the alleles of an Genotypes instance.
def s_demand(self, bus): Svl = array([complex(g.p, g.q) for g in self.generators if (g.bus == bus) and g.is_load], dtype=complex64) Sd = complex(bus.p_demand, bus.q_demand) return -sum(Svl) + Sd
Returns the total complex power demand.
def validate(self, signature, timestamp, nonce): if not self.token: raise RuntimeError('WEIXIN_TOKEN is missing') if self.expires_in: try: timestamp = int(timestamp) except (ValueError, TypeError): return False delta = time.time() - timestamp if delta < 0: return False if delta > self.expires_in: return False values = [self.token, str(timestamp), str(nonce)] s = ''.join(sorted(values)) hsh = hashlib.sha1(s.encode('utf-8')).hexdigest() return signature == hsh
Validate request signature. :param signature: A string signature parameter sent by weixin. :param timestamp: A int timestamp parameter sent by weixin. :param nonce: A int nonce parameter sent by weixin.
def op_decanonicalize(op_name, canonical_op): global DECANONICALIZE_METHODS if op_name not in DECANONICALIZE_METHODS: return canonical_op else: return DECANONICALIZE_METHODS[op_name](canonical_op)
Get the current representation of a parsed operation's data, given the canonical representation Meant for backwards-compatibility
def get_data(histogram: HistogramBase, density: bool = False, cumulative: bool = False, flatten: bool = False) -> np.ndarray: if density: if cumulative: data = (histogram / histogram.total).cumulative_frequencies else: data = histogram.densities else: if cumulative: data = histogram.cumulative_frequencies else: data = histogram.frequencies if flatten: data = data.flatten() return data
Get histogram data based on plotting parameters. Parameters ---------- density : Whether to divide bin contents by bin size cumulative : Whether to return cumulative sums instead of individual flatten : Whether to flatten multidimensional bins
def install(path, name=None): if name is None: name = os.path.splitext(os.path.basename(path))[0] callermod = inspect.getmodule(inspect.stack()[1][0]) name = '%s.%s' % (callermod.__name__, name) if name in sys.modules: return sys.modules[name] if not os.path.isabs(path): callerfile = callermod.__file__ path = os.path.normpath( os.path.join(os.path.dirname(callerfile), path) ) sys.modules[name] = mod = load(path, name=name) return mod
Compiles a Thrift file and installs it as a submodule of the caller. Given a tree organized like so:: foo/ __init__.py bar.py my_service.thrift You would do, .. code-block:: python my_service = thriftrw.install('my_service.thrift') To install ``my_service`` as a submodule of the module from which you made the call. If the call was made in ``foo/bar.py``, the compiled Thrift file will be installed as ``foo.bar.my_service``. If the call was made in ``foo/__init__.py``, the compiled Thrift file will be installed as ``foo.my_service``. This allows other modules to import ``from`` the compiled module like so, .. code-block:: python from foo.my_service import MyService .. versionadded:: 0.2 :param path: Path of the Thrift file. This may be an absolute path, or a path relative to the Python module making the call. :param str name: Name of the submodule. Defaults to the basename of the Thrift file. :returns: The compiled module
def _render_relationships(self, resource): relationships = {} related_models = resource.__mapper__.relationships.keys() primary_key_val = getattr(resource, self.primary_key) if self.dasherize: mapped_relationships = { x: dasherize(underscore(x)) for x in related_models} else: mapped_relationships = {x: x for x in related_models} for model in related_models: relationships[mapped_relationships[model]] = { 'links': { 'self': '/{}/{}/relationships/{}'.format( resource.__tablename__, primary_key_val, mapped_relationships[model]), 'related': '/{}/{}/{}'.format( resource.__tablename__, primary_key_val, mapped_relationships[model]) } } return relationships
Render the resource's relationships.
def get(self, key, lang=None): if lang is not None: for o in self.graph.objects(self.asNode(), key): if o.language == lang: yield o else: for o in self.graph.objects(self.asNode(), key): yield o
Returns triple related to this node. Can filter on lang :param key: Predicate of the triple :param lang: Language of the triple if applicable :rtype: Literal or BNode or URIRef
def _make_scatter_logfile_name(cls, key, linkname, job_config): logfile = job_config.get('logfile', "%s_%s_%s.log" % (cls.default_prefix_logfile, linkname, key)) job_config['logfile'] = logfile
Hook to inster the name of a logfile into the input config
def maybe_convert_platform_interval(values): if isinstance(values, (list, tuple)) and len(values) == 0: return np.array([], dtype=np.int64) elif is_categorical_dtype(values): values = np.asarray(values) return maybe_convert_platform(values)
Try to do platform conversion, with special casing for IntervalArray. Wrapper around maybe_convert_platform that alters the default return dtype in certain cases to be compatible with IntervalArray. For example, empty lists return with integer dtype instead of object dtype, which is prohibited for IntervalArray. Parameters ---------- values : array-like Returns ------- array
def config_default(option, default=None, type=None, section=cli.name): def f(option=option, default=default, type=type, section=section): config = read_config() if type is None and default is not None: type = builtins.type(default) get_option = option_getter(type) try: return get_option(config, section, option) except (NoOptionError, NoSectionError): return default return f
Guesses a default value of a CLI option from the configuration. :: @click.option('--locale', default=config_default('locale'))
def register_pubkey(self): p = pkcs_os2ip(self.dh_p) g = pkcs_os2ip(self.dh_g) pn = dh.DHParameterNumbers(p, g) y = pkcs_os2ip(self.dh_Ys) public_numbers = dh.DHPublicNumbers(y, pn) s = self.tls_session s.server_kx_pubkey = public_numbers.public_key(default_backend()) if not s.client_kx_ffdh_params: s.client_kx_ffdh_params = pn.parameters(default_backend())
XXX Check that the pubkey received is in the group.
def name(self): if not self._initialized: raise pycdlibexception.PyCdlibInternalError('SL record not yet initialized!') outlist = [] continued = False for comp in self.symlink_components: name = comp.name() if name == b'/': outlist = [] continued = False name = b'' if not continued: outlist.append(name) else: outlist[-1] += name continued = comp.is_continued() return b'/'.join(outlist)
Generate a string that contains all components of the symlink. Parameters: None Returns: String containing all components of the symlink.
def get_default_location(self): res = [] for location in self.distdefault: res.extend(self.get_location(location)) return res
Return the default location.
def new(self, **dict): if not self._item_new_path: raise AttributeError('new is not available for %s' % self._item_name) for tag in self._object._remap_to_id: self._object._remap_tag_to_tag_id(tag, dict) target = self._item_new_path payload = json.dumps({self._item_type:dict}) json_data = self._redmine.post(target, payload) data = self._redmine.unwrap_json(self._item_type, json_data) data['_source_path'] = target return self._objectify(data=data)
Create a new item with the provided dict information. Returns the new item.
def get_week_start_end_day(): t = date.today() wd = t.weekday() return (t - timedelta(wd), t + timedelta(6 - wd))
Get the week start date and end date
def __recieve_raw_data(self, size): data = [] if self.verbose: print ("expecting {} bytes raw data".format(size)) while size > 0: data_recv = self.__sock.recv(size) recieved = len(data_recv) if self.verbose: print ("partial recv {}".format(recieved)) if recieved < 100 and self.verbose: print (" recv {}".format(codecs.encode(data_recv, 'hex'))) data.append(data_recv) size -= recieved if self.verbose: print ("still need {}".format(size)) return b''.join(data)
partial data ?
def d(msg, *args, **kwargs): return logging.log(DEBUG, msg, *args, **kwargs)
log a message at debug level;
def remove_group_roles(request, group, domain=None, project=None): client = keystoneclient(request, admin=True) roles = client.roles.list(group=group, domain=domain, project=project) for role in roles: remove_group_role(request, role=role.id, group=group, domain=domain, project=project)
Removes all roles from a group on a domain or project.
def _as_array(self, fmt): assert self.dimensions, \ '{}: cannot get value as {} array!'.format(self.name, fmt) elems = array.array(fmt) elems.fromstring(self.bytes) return np.array(elems).reshape(self.dimensions)
Unpack the raw bytes of this param using the given data format.
def _gc_rule_from_pb(gc_rule_pb): rule_name = gc_rule_pb.WhichOneof("rule") if rule_name is None: return None if rule_name == "max_num_versions": return MaxVersionsGCRule(gc_rule_pb.max_num_versions) elif rule_name == "max_age": max_age = _helpers._duration_pb_to_timedelta(gc_rule_pb.max_age) return MaxAgeGCRule(max_age) elif rule_name == "union": return GCRuleUnion([_gc_rule_from_pb(rule) for rule in gc_rule_pb.union.rules]) elif rule_name == "intersection": rules = [_gc_rule_from_pb(rule) for rule in gc_rule_pb.intersection.rules] return GCRuleIntersection(rules) else: raise ValueError("Unexpected rule name", rule_name)
Convert a protobuf GC rule to a native object. :type gc_rule_pb: :class:`.table_v2_pb2.GcRule` :param gc_rule_pb: The GC rule to convert. :rtype: :class:`GarbageCollectionRule` or :data:`NoneType <types.NoneType>` :returns: An instance of one of the native rules defined in :module:`column_family` or :data:`None` if no values were set on the protobuf passed in. :raises: :class:`ValueError <exceptions.ValueError>` if the rule name is unexpected.
def add_transaction(self, transaction): for item in transaction: if item not in self.__transaction_index_map: self.__items.append(item) self.__transaction_index_map[item] = set() self.__transaction_index_map[item].add(self.__num_transaction) self.__num_transaction += 1
Add a transaction. Arguments: transaction -- A transaction as an iterable object (eg. ['A', 'B']).
def _sorted_keys(self, keys, startkey, reverse=False): tuple_key = lambda t: t[::-1] if reverse: tuple_cmp = lambda t: t[::-1] > startkey[::-1] else: tuple_cmp = lambda t: t[::-1] < startkey[::-1] searchkeys = sorted(keys, key=tuple_key, reverse=reverse) searchpos = sum(1 for _ in ifilter(tuple_cmp, searchkeys)) searchkeys = searchkeys[searchpos:] + searchkeys[:searchpos] for key in searchkeys: yield key
Generator that yields sorted keys starting with startkey Parameters ---------- keys: Iterable of tuple/list \tKey sequence that is sorted startkey: Tuple/list \tFirst key to be yielded reverse: Bool \tSort direction reversed if True
def list_components(self, dependency_order=True): if dependency_order: return list(itertools.chain.from_iterable([sorted(list(batch)) for batch in foundations.common.dependency_resolver( dict((key, value.require) for (key, value) in self))])) else: return [key for (key, value) in self]
Lists the Components by dependency resolving. Usage:: >>> manager = Manager(("./manager/tests/tests_manager/resources/components/core",)) >>> manager.register_components() True >>> manager.list_components() [u'core.tests_component_a', u'core.tests_component_b'] :param dependency_order: Components are returned by dependency order. :type dependency_order: bool
def unbind(self, dependency, svc, svc_ref): with self._lock: self.check_lifecycle() self.__unset_binding(dependency, svc, svc_ref) if self.update_bindings(): self.check_lifecycle()
Called by a dependency manager to remove an injected service and to update the component life cycle.
def address(self, ip, **kwargs): indicator_obj = Address(ip, **kwargs) return self._indicator(indicator_obj)
Add Address data to Batch object. Args: ip (str): The value for this Indicator. confidence (str, kwargs): The threat confidence for this Indicator. date_added (str, kwargs): The date timestamp the Indicator was created. last_modified (str, kwargs): The date timestamp the Indicator was last modified. rating (str, kwargs): The threat rating for this Indicator. xid (str, kwargs): The external id for this Indicator. Returns: obj: An instance of Address.
def job_detail(job_id=None): jobs = [job for job in get_jobs() if str(job['job_id']) == job_id] if not jobs: abort(404) return render_template('job_detail.html', job=jobs[0], hosts=dagobah.get_hosts())
Show a detailed description of a Job's status.
def cast(self, mapping): for c, p in mapping.items(): self.doc.note_citation(c) self.doc.note_explicit_target(c, c) c.persona = p self.log.debug("{0} to be played by {1}".format( c["names"][0].capitalize(), p) ) return self
Allocate the scene script a cast of personae for each of its entities. :param mapping: A dictionary of {Entity, Persona} :return: The SceneScript object.
def with_arg_count(self, count): exp = self._get_current_call() exp.expected_arg_count = count return self
Set the last call to expect an exact argument count. I.E.:: >>> auth = Fake('auth').provides('login').with_arg_count(2) >>> auth.login('joe_user') # forgot password Traceback (most recent call last): ... AssertionError: fake:auth.login() was called with 1 arg(s) but expected 2
def memwarp_multi(src_ds_list, res='first', extent='intersection', t_srs='first', r='cubic', verbose=True, dst_ndv=0): return warp_multi(src_ds_list, res, extent, t_srs, r, warptype=memwarp, verbose=verbose, dst_ndv=dst_ndv)
Helper function for memwarp of multiple input GDAL Datasets
def compare(self, item, article_candidates): result = ArticleCandidate() result.title = self.comparer_title.extract(item, article_candidates) result.description = self.comparer_desciption.extract(item, article_candidates) result.text = self.comparer_text.extract(item, article_candidates) result.topimage = self.comparer_topimage.extract(item, article_candidates) result.author = self.comparer_author.extract(item, article_candidates) result.publish_date = self.comparer_date.extract(item, article_candidates) result.language = self.comparer_language.extract(item, article_candidates) return result
Compares the article candidates using the different submodules and saves the best results in new ArticleCandidate object :param item: The NewscrawlerItem related to the ArticleCandidates :param article_candidates: The list of ArticleCandidate-Objects which have been extracted :return: An ArticleCandidate-object containing the best results
def encloses_annulus(x_min, x_max, y_min, y_max, nx, ny, r_in, r_out): gout = circular_overlap_grid(x_min, x_max, y_min, y_max, nx, ny, r_out, 1, 1) gin = circular_overlap_grid(x_min, x_max, y_min, y_max, nx, ny, r_in, 1, 1) return gout - gin
Encloses function backported from old photutils
def _parse(args): ordered = [] opt_full = dict() opt_abbrev = dict() args = args + [''] i = 0 while i < len(args) - 1: arg = args[i] arg_next = args[i+1] if arg.startswith('--'): if arg_next.startswith('-'): raise ValueError('{} lacks value'.format(arg)) else: opt_full[arg[2:]] = arg_next i += 2 elif arg.startswith('-'): if arg_next.startswith('-'): raise ValueError('{} lacks value'.format(arg)) else: opt_abbrev[arg[1:]] = arg_next i += 2 else: ordered.append(arg) i += 1 return ordered, opt_full, opt_abbrev
Parse passed arguments from shell.
def _apply_hardware_version(hardware_version, config_spec, operation='add'): log.trace('Configuring virtual machine hardware ' 'version version=%s', hardware_version) if operation == 'edit': log.trace('Scheduling hardware version ' 'upgrade to %s', hardware_version) scheduled_hardware_upgrade = vim.vm.ScheduledHardwareUpgradeInfo() scheduled_hardware_upgrade.upgradePolicy = 'always' scheduled_hardware_upgrade.versionKey = hardware_version config_spec.scheduledHardwareUpgradeInfo = scheduled_hardware_upgrade elif operation == 'add': config_spec.version = str(hardware_version)
Specifies vm container version or schedules upgrade, returns True on change and False if nothing have been changed. hardware_version Hardware version string eg. vmx-08 config_spec Configuration spec object operation Defines the operation which should be used, the possibles values: 'add' and 'edit', the default value is 'add'
def delete_all_but_self(self): prefix = self.settings.alias name = self.settings.index if prefix == name: Log.note("{{index_name}} will not be deleted", index_name= prefix) for a in self.cluster.get_aliases(): if re.match(re.escape(prefix) + "\\d{8}_\\d{6}", a.index) and a.index != name: self.cluster.delete_index(a.index)
DELETE ALL INDEXES WITH GIVEN PREFIX, EXCEPT name
def hook(module): if "INSTANA_DEV" in os.environ: print("==============================================================") print("Instana: Running flask hook") print("==============================================================") wrapt.wrap_function_wrapper('flask', 'Flask.__init__', wrapper)
Hook method to install the Instana middleware into Flask
def union(self, other): if self._jrdd_deserializer == other._jrdd_deserializer: rdd = RDD(self._jrdd.union(other._jrdd), self.ctx, self._jrdd_deserializer) else: self_copy = self._reserialize() other_copy = other._reserialize() rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx, self.ctx.serializer) if (self.partitioner == other.partitioner and self.getNumPartitions() == rdd.getNumPartitions()): rdd.partitioner = self.partitioner return rdd
Return the union of this RDD and another one. >>> rdd = sc.parallelize([1, 1, 2, 3]) >>> rdd.union(rdd).collect() [1, 1, 2, 3, 1, 1, 2, 3]
def text(self): try: return self._text except AttributeError: if IS_PYTHON_3: encoding = self._response.headers.get_content_charset("utf-8") else: encoding = self._response.headers.getparam("charset") self._text = self._response.read().decode(encoding or "utf-8") return self._text
Get the raw text for the response
def reset(self): self._reset_ptr[0] = True self._commands.clear() for _ in range(self._pre_start_steps + 1): self.tick() return self._default_state_fn()
Resets the environment, and returns the state. If it is a single agent environment, it returns that state for that agent. Otherwise, it returns a dict from agent name to state. Returns: tuple or dict: For single agent environment, returns the same as `step`. For multi-agent environment, returns the same as `tick`.
def SafeLoadKextManager(self, fn_table): dll = None try: dll = SetCTypesForLibrary('IOKit', fn_table) except AttributeError as ae: if 'KextManagerUnloadKextWithIdentifier' in str(ae): logging.debug('Using legacy kextunload') dll = self.SafeLoadKextManager( FilterFnTable(fn_table, 'KextManagerUnloadKextWithIdentifier')) dll.KextManagerUnloadKextWithIdentifier = self.LegacyKextunload elif 'KextManagerLoadKextWithURL' in str(ae): logging.debug('Using legacy kextload') dll = self.SafeLoadKextManager( FilterFnTable(fn_table, 'KextManagerLoadKextWithURL')) dll.KextManagerLoadKextWithURL = self.LegacyKextload else: raise OSError('Can\'t resolve KextManager symbols:{0}'.format(str(ae))) return dll
Load the kextmanager, replacing unavailable symbols.
def setdefault(self, key, value): try: super(FlaskConfigStorage, self).setdefault(key, value) except RuntimeError: self._defaults.__setitem__(key, value)
We may not always be connected to an app, but we still need to provide a way to the base environment to set it's defaults.
def get_all(self, security): url = 'http://www.google.com/finance?q=%s' % security page = self._request(url) soup = BeautifulSoup(page) snapData = soup.find("table", {"class": "snap-data"}) if snapData is None: raise UfException(Errors.STOCK_SYMBOL_ERROR, "Can find data for stock %s, security error?" % security) data = {} for row in snapData.findAll('tr'): keyTd, valTd = row.findAll('td') data[keyTd.getText()] = valTd.getText() return data
Get all available quote data for the given ticker security. Returns a dictionary.
def guest_delete(self, userid): userid = userid.upper() if not self._vmops.check_guests_exist_in_db(userid, raise_exc=False): if zvmutils.check_userid_exist(userid): LOG.error("Guest '%s' does not exist in guests database" % userid) raise exception.SDKObjectNotExistError( obj_desc=("Guest '%s'" % userid), modID='guest') else: LOG.debug("The guest %s does not exist." % userid) return action = "delete guest '%s'" % userid with zvmutils.log_and_reraise_sdkbase_error(action): return self._vmops.delete_vm(userid)
Delete guest. :param userid: the user id of the vm
def install(self): try: if self.args.server is not None: server = ServerLists(self.server_type) DynamicImporter( 'ezhost', server.name, args=self.args, configure=self.configure ) else: ServerCommand(self.args) except Exception as e: raise e
install the server
def validate_labels(known_classes, passed_labels, argument_name): known_classes = np.array(known_classes) passed_labels = np.array(passed_labels) unique_labels, unique_indexes = np.unique(passed_labels, return_index=True) if len(passed_labels) != len(unique_labels): indexes = np.arange(0, len(passed_labels)) duplicate_indexes = indexes[~np.in1d(indexes, unique_indexes)] duplicate_labels = [str(x) for x in passed_labels[duplicate_indexes]] msg = "The following duplicate labels were passed into {0}: {1}" \ .format(argument_name, ", ".join(duplicate_labels)) raise ValueError(msg) passed_labels_absent = ~np.in1d(passed_labels, known_classes) if np.any(passed_labels_absent): absent_labels = [str(x) for x in passed_labels[passed_labels_absent]] msg = ("The following labels " "were passed into {0}, " "but were not found in " "labels: {1}").format(argument_name, ", ".join(absent_labels)) raise ValueError(msg) return
Validates the labels passed into the true_labels or pred_labels arguments in the plot_confusion_matrix function. Raises a ValueError exception if any of the passed labels are not in the set of known classes or if there are duplicate labels. Otherwise returns None. Args: known_classes (array-like): The classes that are known to appear in the data. passed_labels (array-like): The labels that were passed in through the argument. argument_name (str): The name of the argument being validated. Example: >>> known_classes = ["A", "B", "C"] >>> passed_labels = ["A", "B"] >>> validate_labels(known_classes, passed_labels, "true_labels")
def encode(self, pad=106): opcode = struct.pack('>H', self.defn.opcode) offset = len(opcode) size = max(offset + self.defn.argsize, pad) encoded = bytearray(size) encoded[0:offset] = opcode encoded[offset] = self.defn.argsize offset += 1 index = 0 for defn in self.defn.argdefns: if defn.fixed: value = defn.value else: value = self.args[index] index += 1 encoded[defn.slice(offset)] = defn.encode(value) return encoded
Encodes this AIT command to binary. If pad is specified, it indicates the maximum size of the encoded command in bytes. If the encoded command is less than pad, the remaining bytes are set to zero. Commands sent to ISS payloads over 1553 are limited to 64 words (128 bytes) with 11 words (22 bytes) of CCSDS overhead (SSP 52050J, Section 3.2.3.4). This leaves 53 words (106 bytes) for the command itself.
def list_flags(self, only_name=False): paths = glob.glob(os.path.join(self.outfolder, flag_name("*"))) if only_name: return [os.path.split(p)[1] for p in paths] else: return paths
Determine the flag files associated with this pipeline. :param bool only_name: Whether to return only flag file name(s) (True), or full flag file paths (False); default False (paths) :return list[str]: flag files associated with this pipeline.
def hierarchy_name(self, adjust_for_printing=True): if adjust_for_printing: adjust = lambda x: adjust_name_for_printing(x) else: adjust = lambda x: x if self.has_parent(): return self._parent_.hierarchy_name() + "." + adjust(self.name) return adjust(self.name)
return the name for this object with the parents names attached by dots. :param bool adjust_for_printing: whether to call :func:`~adjust_for_printing()` on the names, recursively
def windowing(size, padding=UNSET, window_type=tuple): if size < 1: raise ValueError("windowing() size {} is not at least 1".format(size)) def windowing_transducer(reducer): return Windowing(reducer, size, padding, window_type) return windowing_transducer
Create a transducer which produces a moving window over items.
def _cleanup_channel(self, channel_id): with self.lock: if channel_id not in self._channels: return del self._channels[channel_id]
Remove the the channel from the list of available channels. :param int channel_id: Channel id :return:
def extract_keyhandle(path, filepath): keyhandle = filepath.lstrip(path) keyhandle = keyhandle.split("/") return keyhandle[0]
extract keyhandle value from the path
def _encryption_context_hash(hasher, encryption_context): serialized_encryption_context = serialize_encryption_context(encryption_context) hasher.update(serialized_encryption_context) return hasher.finalize()
Generates the expected hash for the provided encryption context. :param hasher: Existing hasher to use :type hasher: cryptography.hazmat.primitives.hashes.Hash :param dict encryption_context: Encryption context to hash :returns: Complete hash :rtype: bytes
def update_subtotals(self, current, sub_key): if not self.sub_counts.get(sub_key): self.sub_counts[sub_key] = {} for item in current: try: self.sub_counts[sub_key][item] += 1 except KeyError: self.sub_counts[sub_key][item] = 1
updates sub_total counts for the class instance based on the current dictionary counts args: ----- current: current dictionary counts sub_key: the key/value to use for the subtotals
def wrap_field_error(self, data, renderer_context): response = renderer_context.get("response", None) status_code = response and response.status_code if status_code != 400: raise WrapperNotApplicable('Status code must be 400.') return self.wrap_error( data, renderer_context, keys_are_fields=True, issue_is_title=False)
Convert field error native data to the JSON API Error format See the note about the JSON API Error format on `wrap_error`. The native format for field errors is a dictionary where the keys are field names (or 'non_field_errors' for additional errors) and the values are a list of error strings: { "min": [ "min must be greater than 0.", "min must be an even number." ], "max": ["max must be a positive number."], "non_field_errors": [ "Select either a range or an enumeration, not both."] } It is rendered into this JSON API error format: { "errors": [{ "status": "400", "path": "/min", "detail": "min must be greater than 0." },{ "status": "400", "path": "/min", "detail": "min must be an even number." },{ "status": "400", "path": "/max", "detail": "max must be a positive number." },{ "status": "400", "path": "/-", "detail": "Select either a range or an enumeration, not both." }] }
def add_check(self, check_item): self.checks.append(check_item) for other in self.others: other.add_check(check_item)
Adds a check universally.
def subsystem_dependencies_iter(cls): for dep in cls.subsystem_dependencies(): if isinstance(dep, SubsystemDependency): yield dep else: yield SubsystemDependency(dep, GLOBAL_SCOPE, removal_version=None, removal_hint=None)
Iterate over the direct subsystem dependencies of this Optionable.
def _parse_user_flags(): try: idx = list(sys.argv).index('--user-flags') user_flags_file = sys.argv[idx + 1] except (ValueError, IndexError): user_flags_file = '' if user_flags_file and os.path.isfile(user_flags_file): from ryu.utils import _import_module_file _import_module_file(user_flags_file)
Parses user-flags file and loads it to register user defined options.
def centroid(self): centroid = np.average(self.triangles_center, axis=0, weights=self.area_faces) centroid.flags.writeable = False return centroid
The point in space which is the average of the triangle centroids weighted by the area of each triangle. This will be valid even for non- watertight meshes, unlike self.center_mass Returns ---------- centroid : (3,) float The average vertex weighted by face area
def infer_operands_size(operands): size = None for oprnd in operands: if oprnd.size: size = oprnd.size break if size: for oprnd in operands: if not oprnd.size: oprnd.size = size else: for oprnd in operands: if isinstance(oprnd, X86ImmediateOperand) and not oprnd.size: oprnd.size = arch_info.architecture_size
Infer x86 instruction operand size based on other operands.
def _getProcessedImage(self): if self.imageDisp is None: self.imageDisp = self.image self.levelMin, self.levelMax = self._quickLevels( self.imageDisp) return self.imageDisp
Returns the image data after it has been processed by any normalization options in use. This method also sets the attributes self.levelMin and self.levelMax to indicate the range of data in the image.
def sem(self, ddof=1): return self.std(ddof=ddof) / np.sqrt(self.count())
Compute standard error of the mean of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex. Parameters ---------- ddof : integer, default 1 degrees of freedom
def login(self, login=None, password=None): if (login is not None) and (password is not None): login_data = {'user': login, 'pass': password} elif (self.default_login is not None) and (self.default_password is not None): login_data = {'user': self.default_login, 'pass': self.default_password} elif self.session.auth: login_data = None else: raise AuthorizationError('Credentials required, fill login and password.') try: self.login_result = self.__get_status_code(self.__request('', post_data=login_data, without_login=True)) == 200 except AuthorizationError: return False return self.login_result
Login with default or supplied credetials. .. note:: Calling this method is not necessary when HTTP basic or HTTP digest_auth authentication is used and RT accepts it as external authentication method, because the login in this case is done transparently by requests module. Anyway this method can be useful to check whether given credentials are valid or not. :keyword login: Username used for RT, if not supplied together with *password* :py:attr:`~Rt.default_login` and :py:attr:`~Rt.default_password` are used instead :keyword password: Similarly as *login* :returns: ``True`` Successful login ``False`` Otherwise :raises AuthorizationError: In case that credentials are not supplied neither during inicialization or call of this method.
def run_ffitch(distfile, outtreefile, intreefile=None, **kwargs): cl = FfitchCommandline(datafile=distfile, outtreefile=outtreefile, \ intreefile=intreefile, **kwargs) r, e = cl.run() if e: print("***ffitch could not run", file=sys.stderr) return None else: print("ffitch:", cl, file=sys.stderr) return outtreefile
Infer tree branch lengths using ffitch in EMBOSS PHYLIP
def reltags(self, src, cache=None): if not self._tag_assocs: return set() if cache == None: cache = {} q = _otq() q.append(src) updateq = _otq() while q: i = q.popleft() if i in cache: continue cache[i] = set() for (s,t) in self.transitions_to(i): q.append(s) if self.is_tagged(t,s,i): cache[i].add((self.tag(t,s,i),s, i)) updateq.appendleft((i, s)) while updateq: i = updateq.popleft() cache[i[0]].update(cache[i[1]]) return cache[src]
returns all the tags that are relevant at this state cache should be a dictionary and it is updated by the function
def get_default_gentation(gender, orientation): gender = gender.lower()[0] orientation = orientation.lower() return gender_to_orientation_to_gentation[gender][orientation]
Return the default gentation for the given gender and orientation.
def sort(in_file, out_file): filehandle = open(in_file, 'r') lines = filehandle.readlines() filehandle.close() lines.sort() filehandle = open(out_file, 'w') for line in lines: filehandle.write(line) filehandle.close()
Sorts the given file.
def _scale_shape(dshape, scale = (1,1,1)): nshape = np.round(np.array(dshape) * np.array(scale)) return tuple(nshape.astype(np.int))
returns the shape after scaling (should be the same as ndimage.zoom
def printParameters(self): print "------------PY TemporalPooler Parameters ------------------" print "numInputs = ", self.getNumInputs() print "numColumns = ", self.getNumColumns() print "columnDimensions = ", self._columnDimensions print "numActiveColumnsPerInhArea = ", self.getNumActiveColumnsPerInhArea() print "potentialPct = ", self.getPotentialPct() print "globalInhibition = ", self.getGlobalInhibition() print "localAreaDensity = ", self.getLocalAreaDensity() print "stimulusThreshold = ", self.getStimulusThreshold() print "synPermActiveInc = ", self.getSynPermActiveInc() print "synPermInactiveDec = ", self.getSynPermInactiveDec() print "synPermConnected = ", self.getSynPermConnected() print "minPctOverlapDutyCycle = ", self.getMinPctOverlapDutyCycles() print "dutyCyclePeriod = ", self.getDutyCyclePeriod() print "boostStrength = ", self.getBoostStrength() print "spVerbosity = ", self.getSpVerbosity() print "version = ", self._version
Useful for debugging.
def execute(filelocation, outpath, executable, args=None, switchArgs=None): procArgs = ['java', '-jar', executable] procArgs.extend(['-output_path', outpath]) if args is not None: for arg in args: procArgs.extend(['-'+arg[0], arg[1]]) if switchArgs is not None: procArgs.extend(['-'+arg for arg in switchArgs]) procArgs.extend(aux.toList(filelocation)) proc = subprocess.Popen(procArgs, stderr=subprocess.PIPE) while True: out = proc.stderr.read(1) if out == '' and proc.poll() != None: break if out != '': sys.stdout.write(out) sys.stdout.flush()
Executes the dinosaur tool on Windows operating systems. :param filelocation: either a single mgf file path or a list of file paths. :param outpath: path of the output file, file must not exist :param executable: must specify the complete file path of the spectra-cluster-cli.jar file, supported version is 1.0.2 BETA. :param args: list of arguments containing a value, for details see the spectra-cluster-cli help. Arguments should be added as tuples or a list. For example: [('precursor_tolerance', '0.5'), ('rounds', '3')] :param switchArgs: list of arguments not containing a value, for details see the spectra-cluster-cli help. Arguments should be added as strings. For example: ['fast_mode', 'keep_binary_files']
def get_isa(self, oneq_type='Xhalves', twoq_type='CZ') -> ISA: qubits = [Qubit(id=q.id, type=oneq_type, dead=q.dead) for q in self._isa.qubits] edges = [Edge(targets=e.targets, type=twoq_type, dead=e.dead) for e in self._isa.edges] return ISA(qubits, edges)
Construct an ISA suitable for targeting by compilation. This will raise an exception if the requested ISA is not supported by the device. :param oneq_type: The family of one-qubit gates to target :param twoq_type: The family of two-qubit gates to target
def process_infile(f, fileStore): if isinstance(f, tuple): return f elif isinstance(f, list): return process_array_infile(f, fileStore) elif isinstance(f, basestring): return process_single_infile(f, fileStore) else: raise RuntimeError('Error processing file: '.format(str(f)))
Takes an array of files or a single file and imports into the jobstore. This returns a tuple or an array of tuples replacing all previous path strings. Toil does not preserve a file's original name upon import and so the tuple keeps track of this with the format: '(filepath, preserveThisFilename)' :param f: String or an Array. The smallest element must be a string, so: an array of strings, an array of arrays of strings... etc. :param fileStore: The filestore object that is called to load files into the filestore. :return: A tuple or an array of tuples.
def create(self, qname): try: if self.exists(qname): log.error('Queues "%s" already exists. Nothing done.', qname) return True self.conn.create(qname) return True except pyrax.exceptions as err_msg: log.error('RackSpace API got some problems during creation: %s', err_msg) return False
Create RackSpace Queue.
def main(): if '-h' in sys.argv: print(main.__doc__) sys.exit() file=sys.argv[1] f=open(file,'r') Input=f.readlines() f.close() out=open(file,'w') for line in Input: out.write(line) out.close()
NAME convert2unix.py DESCRIPTION converts mac or dos formatted file to unix file in place SYNTAX convert2unix.py FILE OPTIONS -h prints help and quits
def cpos(string, chars, start): string = stypes.stringToCharP(string) chars = stypes.stringToCharP(chars) start = ctypes.c_int(start) return libspice.cpos_c(string, chars, start)
Find the first occurrence in a string of a character belonging to a collection of characters, starting at a specified location, searching forward. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/cpos_c.html :param string: Any character string. :type string: str :param chars: A collection of characters. :type chars: str :param start: Position to begin looking for one of chars. :type start: int :return: The index of the first character of str at or following index start that is in the collection chars. :rtype: int
def _get_colors(n): import matplotlib.pyplot as plt from matplotlib.colors import rgb2hex as r2h from numpy import linspace cols = linspace(0.05, .95, n) cmap = plt.get_cmap('nipy_spectral') return [r2h(cmap(i)) for i in cols]
Returns n unique and "evenly" spaced colors for the backgrounds of the projects. Args: n (int): The number of unique colors wanted. Returns: colors (list of str): The colors in hex form.
def capakey_rest_gateway_request(url, headers={}, params={}): try: res = requests.get(url, headers=headers, params=params) res.raise_for_status() return res except requests.ConnectionError as ce: raise GatewayRuntimeException( 'Could not execute request due to connection problems:\n%s' % repr(ce), ce ) except requests.HTTPError as he: raise GatewayResourceNotFoundException() except requests.RequestException as re: raise GatewayRuntimeException( 'Could not execute request due to:\n%s' % repr(re), re )
Utility function that helps making requests to the CAPAKEY REST service. :param string url: URL to request. :param dict headers: Headers to send with the URL. :param dict params: Parameters to send with the URL. :returns: Result of the call.
def _select(self, pointer): return YAMLChunk( self._ruamelparsed, pointer=pointer, label=self._label, strictparsed=self._strictparsed, key_association=copy(self._key_association), )
Get a YAMLChunk referenced by a pointer.
def populate_fw_dev(self, fw_id, mgmt_ip, new): for cnt in self.res: used = self.res.get(cnt).get('used') if mgmt_ip == self.res[cnt].get('mgmt_ip'): if new: self.res[cnt]['used'] = used + 1 self.res[cnt]['fw_id_lst'].append(fw_id) return self.res[cnt].get('obj_dict'), ( self.res[cnt].get('mgmt_ip')) return None, None
Populate the class after a restart.
def get_default_widget(): default_widget = forms.Textarea if hasattr(settings, 'BLEACH_DEFAULT_WIDGET'): default_widget = load_widget(settings.BLEACH_DEFAULT_WIDGET) return default_widget
Get the default widget or the widget defined in settings
def parse_extras(extras_str): from pkg_resources import Requirement extras = Requirement.parse("fakepkg{0}".format(extras_to_string(extras_str))).extras return sorted(dedup([extra.lower() for extra in extras]))
Turn a string of extras into a parsed extras list
def map_data(self): with open(self.src_file, "r") as f: for line in f: cols = line.split(',') print(cols)
provides a mapping from the CSV file to the aikif data structures.