code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _types_match(type1, type2): if isinstance(type1, six.string_types) and \ isinstance(type2, six.string_types): type1 = type1.rstrip('?') type2 = type2.rstrip('?') if type1 != type2: return False return True
Returns False only if it can show that no value of type1 can possibly match type2. Supports only a limited selection of types.
def append(self, data, segment=0): if not hasattr(data, '__iter__'): data = [data] self._builder.append(data, segment)
Append a single row to an SFrame. Throws a RuntimeError if one or more column's type is incompatible with a type appended. Parameters ---------- data : iterable An iterable representation of a single row. segment : int The segment to write this...
def largest_connected_set(C, directed=True): r if isdense(C): return sparse.connectivity.largest_connected_set(csr_matrix(C), directed=directed) else: return sparse.connectivity.largest_connected_set(C, directed=directed)
r"""Largest connected component for a directed graph with edge-weights given by the count matrix. Parameters ---------- C : scipy.sparse matrix Count matrix specifying edge weights. directed : bool, optional Whether to compute connected components for a directed or undirected...
def open_in_composer(self): impact_layer = self.impact_function.analysis_impacted report_path = dirname(impact_layer.source()) impact_report = self.impact_function.impact_report custom_map_report_metadata = impact_report.metadata custom_map_report_product = ( custom_m...
Open in layout designer a given MapReport instance. .. versionadded: 4.3.0
def _validation_error(prop, prop_type, prop_value, expected): if prop_type is None: attrib = 'value' assigned = prop_value else: attrib = 'type' assigned = prop_type raise ValidationError( 'Invalid property {attrib} for {prop}:\n\t{attrib}: {assigned}\n\texpected: {ex...
Default validation for updated properties
def func( coroutine: Union[str, Function, Callable], *, name: Optional[str] = None, keep_result: Optional[SecondsTimedelta] = None, timeout: Optional[SecondsTimedelta] = None, max_tries: Optional[int] = None, ) -> Function: if isinstance(coroutine, Function): return coroutine if ...
Wrapper for a job function which lets you configure more settings. :param coroutine: coroutine function to call, can be a string to import :param name: name for function, if None, ``coroutine.__qualname__`` is used :param keep_result: duration to keep the result for, if 0 the result is not kept :param ...
def get_ga_client_id(self): request = self.get_ga_request() if not request or not hasattr(request, 'session'): return super(GARequestErrorReportingMixin, self).get_ga_client_id() if 'ga_client_id' not in request.session: client_id = self.ga_cookie_re.match(request.COOKIES...
Retrieve the client ID from the Google Analytics cookie, if available, and save in the current session
def _read_para_seq_data(self, code, cbit, clen, *, desc, length, version): if clen != 4: raise ProtocolError(f'HIPv{version}: [Parano {code}] invalid format') _seqn = self._read_unpack(4) seq_data = dict( type=desc, critical=cbit, length=clen, ...
Read HIP SEQ_DATA parameter. Structure of HIP SEQ_DATA parameter [RFC 6078]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ...
def cmd_karma_bulk(infile, jsonout, badonly, verbose): if verbose: logging.basicConfig(level=logging.INFO, format='%(message)s') data = infile.read() result = {} for ip in data.split('\n'): if ip: logging.info('Checking ' + ip) response = karma(ip) if ...
Show which IP addresses are inside blacklists using the Karma online service. Example: \b $ cat /var/log/auth.log | habu.extract.ipv4 | habu.karma.bulk 172.217.162.4 spamhaus_drop,alienvault_spamming 23.52.213.96 CLEAN 190.210.43.70 alienvault_malicious
def get_space_id(deployment_name, space_name, token_manager=None, app_url=defaults.APP_URL): spaces = get_spaces(deployment_name, token_manager=token_manager, app_url=app_url) for space in spaces: if space...
get the space id that relates to the space name provided
def oauth_access( self, *, client_id: str, client_secret: str, code: str, **kwargs ) -> SlackResponse: kwargs.update( {"client_id": client_id, "client_secret": client_secret, "code": code} ) return self.api_call("oauth.access", data=kwargs)
Exchanges a temporary OAuth verifier code for an access token. Args: client_id (str): Issued when you created your application. e.g. '4b39e9-752c4' client_secret (str): Issued when you created your application. e.g. '33fea0113f5b1' code (str): The code param returned via the...
def delete(self, indexes): indexes = [indexes] if not isinstance(indexes, (list, blist)) else indexes if all([isinstance(i, bool) for i in indexes]): if len(indexes) != len(self._index): raise ValueError('boolean indexes list must be same size of existing indexes') ...
Delete rows from the DataFrame :param indexes: either a list of values or list of booleans for the rows to delete :return: nothing
def linear_deform(template, displacement, out=None): image_pts = template.space.points() for i, vi in enumerate(displacement): image_pts[:, i] += vi.asarray().ravel() values = template.interpolation(image_pts.T, out=out, bounds_check=False) return values.reshape(template.space.shape)
Linearized deformation of a template with a displacement field. The function maps a given template ``I`` and a given displacement field ``v`` to the new function ``x --> I(x + v(x))``. Parameters ---------- template : `DiscreteLpElement` Template to be deformed by a displacement field. ...
def set_lacp_timeout(self, name, value=None): commands = ['interface %s' % name] string = 'port-channel lacp fallback timeout' commands.append(self.command_builder(string, value=value)) return self.configure(commands)
Configures the Port-Channel LACP fallback timeout The fallback timeout configures the period an interface in fallback mode remains in LACP mode without receiving a PDU. Args: name(str): The Port-Channel interface name value(int): port-channel lacp fallback timeout...
def random_gate(qubits: Union[int, Qubits]) -> Gate: r N, qubits = qubits_count_tuple(qubits) unitary = scipy.stats.unitary_group.rvs(2**N) return Gate(unitary, qubits=qubits, name='RAND{}'.format(N))
r"""Returns a random unitary gate on K qubits. Ref: "How to generate random matrices from the classical compact groups" Francesco Mezzadri, math-ph/0609050
def from_handle(fh, stream_default='fasta'): if fh in (sys.stdin, sys.stdout, sys.stderr): return stream_default return from_filename(fh.name)
Look up the BioPython file type corresponding to a file-like object. For stdin, stdout, and stderr, ``stream_default`` is used.
def _rshift_arithmetic(self, shift_amount): if self.is_empty: return self nsplit = self._nsplit() if len(nsplit) == 1: highest_bit_set = self.lower_bound > StridedInterval.signed_max_int(nsplit[0].bits) l = self.lower_bound >> shift_amount u = self...
Arithmetic shift right with a concrete shift amount :param int shift_amount: Number of bits to shift right. :return: The new StridedInterval after right shifting :rtype: StridedInterval
def _apply(self, func, name, window=None, center=None, check_minp=None, **kwargs): def f(x, name=name, *args): x = self._shallow_copy(x) if isinstance(name, str): return getattr(x, name)(*args, **kwargs) return x.apply(name, *args, **kwargs) ...
Dispatch to apply; we are stripping all of the _apply kwargs and performing the original function call on the grouped object.
def highlight_min(self, subset=None, color='yellow', axis=0): return self._highlight_handler(subset=subset, color=color, axis=axis, max_=False)
Highlight the minimum by shading the background. Parameters ---------- subset : IndexSlice, default None a valid slice for ``data`` to limit the style application to. color : str, default 'yellow' axis : {0 or 'index', 1 or 'columns', None}, default 0 app...
def find_worktree_git_dir(dotgit): try: statbuf = os.stat(dotgit) except OSError: return None if not stat.S_ISREG(statbuf.st_mode): return None try: lines = open(dotgit, 'r').readlines() for key, value in [line.strip().split(': ') for line in lines]: i...
Search for a gitdir for this worktree.
def process_byte(self, tag): tag.set_address(self.normal_register.current_address) self.normal_register.move_to_next_address(1)
Process byte type tags
def _global_step(hparams): step = tf.to_float(tf.train.get_or_create_global_step()) multiplier = hparams.optimizer_multistep_accumulate_steps if not multiplier: return step tf.logging.info("Dividing global step by %d for multi-step optimizer." % multiplier) return step / tf.to_float(mult...
Adjust global step if a multi-step optimizer is used.
def get_flattened_bsp_keys_from_schema(schema): keys = [] for key in schema.declared_fields.keys(): field = schema.declared_fields[key] if isinstance(field, mm.fields.Nested) and \ isinstance(field.schema, BoundSpatialPoint): keys.append("{}.{}".format(key, "position"...
Returns the flattened keys of BoundSpatialPoints in a schema :param schema: schema :return: list
def shear(cls, x_angle=0, y_angle=0): sx = math.tan(math.radians(x_angle)) sy = math.tan(math.radians(y_angle)) return tuple.__new__(cls, (1.0, sy, 0.0, sx, 1.0, 0.0, 0.0, 0.0, 1.0))
Create a shear transform along one or both axes. :param x_angle: Angle in degrees to shear along the x-axis. :type x_angle: float :param y_angle: Angle in degrees to shear along the y-axis. :type y_angle: float :rtype: Affine
def _complete_multipart_upload(self, bucket_name, object_name, upload_id, uploaded_parts): is_valid_bucket_name(bucket_name) is_non_empty_string(object_name) is_non_empty_string(upload_id) ordered_parts = [] for part in sorted(uploaded_parts.key...
Complete an active multipart upload request. :param bucket_name: Bucket name of the multipart request. :param object_name: Object name of the multipart request. :param upload_id: Upload id of the active multipart request. :param uploaded_parts: Key, Value dictionary of uploaded parts.
def metis(hdf5_file_name, N_clusters_max): file_name = wgraph(hdf5_file_name) labels = sgraph(N_clusters_max, file_name) subprocess.call(['rm', file_name]) return labels
METIS algorithm by Karypis and Kumar. Partitions the induced similarity graph passed by CSPA. Parameters ---------- hdf5_file_name : string or file handle N_clusters_max : int Returns ------- labels : array of shape (n_samples,) A vector of labels denoting the clu...
def camelize(word): return ''.join(w[0].upper() + w[1:] for w in re.sub('[^A-Z^a-z^0-9^:]+', ' ', word).split(' '))
Convert a word from lower_with_underscores to CamelCase. Args: word: The string to convert. Returns: The modified string.
def channelize(gen, channels): def pick(g, channel): for samples in g: yield samples[channel] return [pick(gen_copy, channel) for channel, gen_copy in enumerate(itertools.tee(gen, channels))]
Break multi-channel generator into one sub-generator per channel Takes a generator producing n-tuples of samples and returns n generators, each producing samples for a single channel. Since multi-channel generators are the only reasonable way to synchronize samples across channels, and the sampler functions only ...
def fbank(wav_path, flat=True): (rate, sig) = wav.read(wav_path) if len(sig) == 0: logger.warning("Empty wav: {}".format(wav_path)) fbank_feat = python_speech_features.logfbank(sig, rate, nfilt=40) energy = extract_energy(rate, sig) feat = np.hstack([energy, fbank_feat]) delta_feat = pyt...
Currently grabs log Mel filterbank, deltas and double deltas.
def section_term_branch_orders(neurites, neurite_type=NeuriteType.all): return map_sections(sectionfunc.branch_order, neurites, neurite_type=neurite_type, iterator_type=Tree.ileaf)
Termination section branch orders in a collection of neurites
def _netstat_route_netbsd(): ret = [] cmd = 'netstat -f inet -rn | tail -n+5' out = __salt__['cmd.run'](cmd, python_shell=True) for line in out.splitlines(): comps = line.split() ret.append({ 'addr_family': 'inet', 'destination': comps[0], 'gateway': c...
Return netstat routing information for NetBSD
async def create_lease_store_if_not_exists_async(self): try: await self.host.loop.run_in_executor( self.executor, functools.partial( self.storage_client.create_container, self.lease_container_name)) except Exception as e...
Create the lease store if it does not exist, do nothing if it does exist. :return: `True` if the lease store already exists or was created successfully, `False` if not. :rtype: bool
def validate_subnet(s): if isinstance(s, basestring): if '/' in s: start, mask = s.split('/', 2) return validate_ip(start) and validate_netmask(mask) else: return False raise TypeError("expected string or unicode")
Validate a dotted-quad ip address including a netmask. The string is considered a valid dotted-quad address with netmask if it consists of one to four octets (0-255) seperated by periods (.) followed by a forward slash (/) and a subnet bitmask which is expressed in dotted-quad format. >>> validat...
def expand(doc, doc_url="param://", params=None): if doc_url.find("://") == -1: Log.error("{{url}} must have a prototcol (eg http://) declared", url=doc_url) url = URL(doc_url) url.query = set_default(url.query, params) phase1 = _replace_ref(doc, url) phase2 = _replace_locals(phase1, [phase1...
ASSUMING YOU ALREADY PULED THE doc FROM doc_url, YOU CAN STILL USE THE EXPANDING FEATURE USE mo_json_config.expand({}) TO ASSUME CURRENT WORKING DIRECTORY :param doc: THE DATA STRUCTURE FROM JSON SOURCE :param doc_url: THE URL THIS doc CAME FROM (DEFAULT USES params AS A DOCUMENT SOURCE) :param pa...
def addRectAnnot(self, rect): CheckParent(self) val = _fitz.Page_addRectAnnot(self, rect) if not val: return val.thisown = True val.parent = weakref.proxy(self) self._annot_refs[id(val)] = val return val
Add a 'Rectangle' annotation.
def start_router(router_class, router_name): handle = router_class.remote(router_name) ray.experimental.register_actor(router_name, handle) handle.start.remote() return handle
Wrapper for starting a router and register it. Args: router_class: The router class to instantiate. router_name: The name to give to the router. Returns: A handle to newly started router actor.
def _to_event_data(obj): if obj is None: return None if isinstance(obj, bool): return obj if isinstance(obj, int): return obj if isinstance(obj, float): return obj if isinstance(obj, str): return obj if isinstance(obj, bytes): return obj if isi...
Convert the specified object into a form that can be serialised by msgpack as event data. :param obj: The object to convert.
def load_stock_quantity(self): info = StocksInfo(self.config) for stock in self.model.stocks: stock.quantity = info.load_stock_quantity(stock.symbol) info.gc_book.close()
Loads quantities for all stocks
def mri_knee_data_8_channel(): url = 'https://zenodo.org/record/800529/files/3_rawdata_knee_8ch.mat' dct = get_data('3_rawdata_knee_8ch.mat', subset=DATA_SUBSET, url=url) data = flip(np.swapaxes(dct['rawdata'], 0, -1) * 9e3, 2) return data
Raw data for 8 channel MRI of a knee. This is an SE measurement of the knee of a healthy volunteer. The data has been rescaled so that the reconstruction fits approximately in [0, 1]. See the data source with DOI `10.5281/zenodo.800529`_ or the `project webpage`_ for further information. See...
def insert(self, part): params = {k: str(v) for k,v in part.params.items()} res=c.create_assembly_instance(self.uri.as_dict(), part.uri.as_dict(), params) return res
Insert a part into this assembly. Args: - part (onshapepy.part.Part) A Part instance that will be inserted. Returns: - requests.Response: Onshape response data
def get_days_in_month(year: int, month: int) -> int: month_range = calendar.monthrange(year, month) return month_range[1]
Returns number of days in the given month. 1-based numbers as arguments. i.e. November = 11
def _control_longitude(self): if self.lonm < 0.0: self.lonm = 360.0 + self.lonm if self.lonM < 0.0: self.lonM = 360.0 + self.lonM if self.lonm > 360.0: self.lonm = self.lonm - 360.0 if self.lonM > 360.0: self.lonM = self.lonM - 360.0
Control on longitude values
def _handle_aleph_keyword_view(dataset): adder = ViewController.aleph_kw_handler.add_keyword for keyword in dataset.get("keyword_tags", []): adder(keyword["val"]) if "keyword_tags" in dataset: del dataset["keyword_tags"]
Translate the Aleph keywords to locally used data.
def _remove_white_background(image): from PIL import ImageMath, Image if image.mode == "RGBA": bands = image.split() a = bands[3] rgb = [ ImageMath.eval( 'convert(' 'float(x + a - 255) * 255.0 / float(max(a, 1)) * ' 'float(min(a...
Remove white background in the preview image.
def global_request(self, kind, data=None, wait=True): if wait: self.completion_event = threading.Event() m = Message() m.add_byte(cMSG_GLOBAL_REQUEST) m.add_string(kind) m.add_boolean(wait) if data is not None: m.add(*data) self._log(DEBUG,...
Make a global request to the remote host. These are normally extensions to the SSH2 protocol. :param str kind: name of the request. :param tuple data: an optional tuple containing additional data to attach to the request. :param bool wait: ``True`` i...
def init_app(self, app, config_prefix=None): self.kill_session = self.original_kill_session config_prefix = (config_prefix or 'JIRA').rstrip('_').upper() if not hasattr(app, 'extensions'): app.extensions = dict() if config_prefix.lower() in app.extensions: raise V...
Actual method to read JIRA settings from app configuration and initialize the JIRA instance. Positional arguments: app -- Flask application instance. Keyword arguments: config_prefix -- Prefix used in config key names in the Flask app's configuration. Useful for applications which ...
def beam(problem, beam_size=100, iterations_limit=0, viewer=None): return _local_search(problem, _all_expander, iterations_limit=iterations_limit, fringe_size=beam_size, random_initial_states=True, ...
Beam search. beam_size is the size of the beam. If iterations_limit is specified, the algorithm will end after that number of iterations. Else, it will continue until it can't find a better node than the current one. Requires: SearchProblem.actions, SearchProblem.result, SearchProblem.value, an...
def open(self, section_index=0): uri = self._sections[section_index][1] if len(uri.split()) == 1: self._open_url(uri) else: if self._verbose: print "running command: %s" % uri p = popen(uri, shell=True) p.wait()
Launch a help section.
def on_chord_part_return(self, task, state, result, propagate=False): u with transaction.atomic(): chord_data = ChordData.objects.select_for_update().get( callback_result__task_id=task.request.chord[u'options'][u'task_id'] ) _ = TaskMeta.objects.update...
u""" Update the linking ChordData object and execute callback if needed. Parameters ---------- subtask: The subtask that just finished executing. Most useful values are stored on subtask.request. state: the status of the just-finished subtask. ...
def set_switch_state(self, state): self.set_service_value( self.switch_service, 'Target', 'newTargetValue', state) self.set_cache_value('Status', state)
Set the switch state, also update local state.
def norm_squared(x, Mx=None, inner_product=ip_euclid): assert(len(x.shape) == 2) if Mx is None: rho = inner_product(x, x) else: assert(len(Mx.shape) == 2) rho = inner_product(x, Mx) if rho.shape == (1, 1): if abs(rho[0, 0].imag) > abs(rho[0, 0])*1e-10 or rho[0, 0].real < ...
Compute the norm^2 w.r.t. to a given scalar product.
def clone_repo(pkg, dest, repo, repo_dest, branch): git(['clone', '--recursive', '-b', branch, repo, repo_dest])
Clone the Playdoh repo into a custom path.
def apply_adaptation(self, target_illuminant, adaptation='bradford'): logger.debug(" \- Original illuminant: %s", self.illuminant) logger.debug(" \- Target illuminant: %s", target_illuminant) if self.illuminant != target_illuminant: logger.debug(" \* Applying transformation from %...
This applies an adaptation matrix to change the XYZ color's illuminant. You'll most likely only need this during RGB conversions.
def combination(n, r): if n == r or r == 0: return 1 else: return combination(n-1, r-1) + combination(n-1, r)
This function calculates nCr.
def get(self): LOG.info('Returning all ansible runs') response = [] for run in self.backend_store.list_runs(): response.append(run_model.format_response(run)) return response
Get run list
def indexables(self): if self._indexables is None: d = self.description self._indexables = [GenericIndexCol(name='index', axis=0)] for i, n in enumerate(d._v_names): dc = GenericDataIndexableCol( name=n, pos=i, values=[n], version=self.vers...
create the indexables from the table description
def copystat(self, target): shutil.copystat(self.path, self._to_backend(target))
Copies the permissions, times and flags from this to the `target`. The owner is not copied.
def connect(*cmds, **kwargs): stdin = kwargs.get("stdin") env = kwargs.get("env", os.environ) timeout = kwargs.get("timeout") end = len(cmds) - 1 @contextmanager def inner(idx, inp): with stream(cmds[idx], stdin=inp, env=env, timeout=timeout) as s: if idx == end: ...
Connects multiple command streams together and yields the final stream. Args: cmds (list): list of commands to pipe together. Each command will be an input to ``stream``. stdin (file like object): stream to use as the first command's standard input. env (dict): The e...
def find_group_consistencies(groups1, groups2): r group1_list = {tuple(sorted(_group)) for _group in groups1} group2_list = {tuple(sorted(_group)) for _group in groups2} common_groups = list(group1_list.intersection(group2_list)) return common_groups
r""" Returns a measure of group consistency Example: >>> # ENABLE_DOCTEST >>> from utool.util_alg import * # NOQA >>> groups1 = [[1, 2, 3], [4], [5, 6]] >>> groups2 = [[1, 2], [4], [5, 6]] >>> common_groups = find_group_consistencies(groups1, groups2) >>> result...
def multiply(self, number): return self.from_list([x * number for x in self.to_list()])
Return a Vector as the product of the vector and a real number.
def calc_log_size(request, calc_id): try: response_data = logs.dbcmd('get_log_size', calc_id) except dbapi.NotFound: return HttpResponseNotFound() return HttpResponse(content=json.dumps(response_data), content_type=JSON)
Get the current number of lines in the log
def check_output(self, cmd, timeout=None, keep_rc=False, env=None): return subproc.call(cmd, timeout=timeout or self.timeout, keep_rc=keep_rc, env=env)
Subclasses can override to provide special environment setup, command prefixes, etc.
def output_file(self, _container): p = local.path(_container) if p.exists(): if not ui.ask("Path '{0}' already exists." " Overwrite?".format(p)): sys.exit(0) CFG["container"]["output"] = str(p)
Find and writes the output path of a chroot container.
def clear_and_configure(config=None, bind_in_runtime=True): with _INJECTOR_LOCK: clear() return configure(config, bind_in_runtime=bind_in_runtime)
Clear an existing injector and create another one with a callable config.
def validate_value(self, value): field = self.instance.preference.setup_field() value = field.to_python(value) field.validate(value) field.run_validators(value) return value
We call validation from the underlying form field
def mod_issquare(a, p): if not a: return True p1 = p // 2 p2 = pow(a, p1, p) return p2 == 1
Returns whether `a' is a square modulo p
def apply(self, doc): if not isinstance(doc, Document): raise TypeError( "Input Contexts to MentionSentences.apply() must be of type Document" ) for sentence in doc.sentences: yield TemporarySpanMention( char_start=0, char_end=len(sente...
Generate MentionSentences from a Document by parsing all of its Sentences. :param doc: The ``Document`` to parse. :type doc: ``Document`` :raises TypeError: If the input doc is not of type ``Document``.
def msg_intro(self): delim = self.style.attr_minor(self.style.delimiter) txt = self.intro_msg_fmt.format(delim=delim).rstrip() return self.term.center(txt)
Introductory message disabled above heading.
def copy_script(self, filename, id_=-1): for repo in self._children: repo.copy_script(filename, id_)
Copy a script to all repositories. Takes into account whether a JSS has been migrated. See the individual DistributionPoint types for more information. Args: filename: String path to the local file to copy. id_: Integer ID you wish to associate script with for a JDS ...
def simple_moving_matrix(x, n=10): if x.ndim > 1 and len(x[0]) > 1: x = np.average(x, axis=1) h = n / 2 o = 0 if h * 2 == n else 1 xx = [] for i in range(h, len(x) - h): xx.append(x[i-h:i+h+o]) return np.array(xx)
Create simple moving matrix. Parameters ---------- x : ndarray A numpy array n : integer The number of sample points used to make average Returns ------- ndarray A n x n numpy array which will be useful for calculating confidentail interval of simple moving ...
def state_range_type(self) -> Sequence[str]: fluents = self.domain.state_fluents ordering = self.domain.state_fluent_ordering return self._fluent_range_type(fluents, ordering)
The range type of each state fluent in canonical order. Returns: Sequence[str]: A tuple of range types representing the range of each fluent.
def initialize_model(self, root_node): LOGGER.debug("> Initializing model with '{0}' root node.".format(root_node)) self.beginResetModel() self.root_node = root_node self.enable_model_triggers(True) self.endResetModel() return True
Initializes the Model using given root node. :param root_node: Graph root node. :type root_node: DefaultNode :return: Method success :rtype: bool
def mysql_batch_and_fetch(mysql_config, *sql_queries): import MySQLdb as mydb import sys import gc if len(sql_queries) == 1: if isinstance(sql_queries[0], str): sql_queries = sql_queries[0].split(";") if isinstance(sql_queries[0], (list, tuple)): sql_queries = sql...
Excute a series of SQL statements before the final Select query Parameters ---------- mysql_config : dict The user credentials as defined in MySQLdb.connect, e.g. mysql_conig = {'user': 'myname', 'passwd': 'supersecret', 'host': '<ip adress or domain>', 'db': '<myschema>'} sql_...
def _get_group_difference(self, sp_groups): db_groups = set(Group.objects.all().values_list('name', flat=True)) missing_from_db = set(sp_groups).difference(db_groups) missing_from_sp = db_groups.difference(sp_groups) return (missing_from_db, missing_from_sp)
Helper method for gettings the groups that are present in the local db but not on stormpath and the other way around.
def _unquote_or_none(s: Optional[str]) -> Optional[bytes]: if s is None: return s return url_unescape(s, encoding=None, plus=False)
None-safe wrapper around url_unescape to handle unmatched optional groups correctly. Note that args are passed as bytes so the handler can decide what encoding to use.
def filter_by_func(self, func:Callable)->'ItemList': "Only keep elements for which `func` returns `True`." self.items = array([o for o in self.items if func(o)]) return self
Only keep elements for which `func` returns `True`.
def buffered_generator(source_gen, buffer_size=2, use_multiprocessing=False): r if buffer_size < 2: raise RuntimeError("Minimal buffer_ size is 2!") if use_multiprocessing: print('WARNING seems to freeze if passed in a generator') if False: pool = multiprocessing.Pool(pro...
r""" Generator that runs a slow source generator in a separate process. My generate function still seems faster on test cases. However, this function is more flexible in its compatability. Args: source_gen (iterable): slow generator buffer_size (int): the maximal number of items to pre...
def _find_vm(name, data, quiet=False): for hv_ in data: if not isinstance(data[hv_], dict): continue if name in data[hv_].get('vm_info', {}): ret = {hv_: {name: data[hv_]['vm_info'][name]}} if not quiet: __jid_event__.fire_event({'data': ret, 'outp...
Scan the query data for the named VM
def get_title(self): def _title(context_model): context = context_model.context() if context is None: return "new context*" title = os.path.basename(context.load_path) if context.load_path \ else "new context" if context_model.is_mo...
Returns a string suitable for titling a window containing this table.
def find_path(self, basename, install_dir=None): for dir in self.find_path_dirs: path = os.path.join(dir, basename) if os.path.exists(path): return path return os.path.join(install_dir or self.preferred_install_dir, basename)
Look in a few places for a file with the given name. If a custom version of the file is found in the directory being managed by this workspace, return it. Otherwise look in the custom and default input directories in the root directory, and then finally in the root directory itself. ...
def Subclasses(cls, sort_by=None, reverse=False): l = list() for attr, value in get_all_attributes(cls): try: if issubclass(value, Constant): l.append((attr, value)) except: pass if sort_by is None: sort_by =...
Get all nested Constant class and it's name pair. :param sort_by: the attribute name used for sorting. :param reverse: if True, return in descend order. :returns: [(attr, value),...] pairs. :: >>> class MyClass(Constant): ... a = 1 # non-class attributre .....
def _check_input_names(symbol, names, typename, throw): args = symbol.list_arguments() for name in names: if name in args: continue candidates = [arg for arg in args if not arg.endswith('_weight') and not arg.endswith('_bias') and ...
Check that all input names are in symbol's arguments.
def getRankMaps(self): rankMaps = [] for preference in self.preferences: rankMaps.append(preference.getRankMap()) return rankMaps
Returns a list of dictionaries, one for each preference, that associates the integer representation of each candidate with its position in the ranking, starting from 1 and returns a list of the number of times each preference is given.
def is_expired(self): if time.time() - self.last_ping > HB_PING_TIME: self.ping() return (time.time() - self.last_pong) > HB_PING_TIME + HB_PONG_TIME
Indicates if connection has expired.
def cast_pars_dict(pars_dict): o = {} for pname, pdict in pars_dict.items(): o[pname] = {} for k, v in pdict.items(): if k == 'free': o[pname][k] = bool(int(v)) elif k == 'name': o[pname][k] = v else: o[pname][k]...
Cast the bool and float elements of a parameters dict to the appropriate python types.
def filter_record(self, record): if len(record) >= self.max_length: return record[:self.max_length] else: return record
Filter record, truncating any over some maximum length
def mutateSequence(seq, distance): subProb=distance inProb=0.05*distance deProb=0.05*distance contProb=0.9 l = [] bases = [ 'A', 'C', 'T', 'G' ] i=0 while i < len(seq): if random.random() < subProb: l.append(random.choice(bases)) else: l.append(seq...
Mutates the DNA sequence for use in testing.
def read_stdout(self): output = "" if self._stdout_file: try: with open(self._stdout_file, "rb") as file: output = file.read().decode("utf-8", errors="replace") except OSError as e: log.warning("Could not read {}: {}".format(sel...
Reads the standard output of the QEMU process. Only use when the process has been stopped or has crashed.
def searchInAleph(base, phrase, considerSimilar, field): downer = Downloader() if field.lower() not in VALID_ALEPH_FIELDS: raise InvalidAlephFieldException("Unknown field '" + field + "'!") param_url = Template(SEARCH_URL_TEMPLATE).substitute( PHRASE=quote_plus(phrase), BASE=base, ...
Send request to the aleph search engine. Request itself is pretty useless, but it can be later used as parameter for :func:`getDocumentIDs`, which can fetch records from Aleph. Args: base (str): which database you want to use phrase (str): what do you want to search considerSimilar...
def respond_client(self, answer, socket): response = pickle.dumps(answer, -1) socket.sendall(response) self.read_list.remove(socket) socket.close()
Send an answer to the client.
def _read_message(self): payload_info = self._read_bytes_from_socket(4) read_len = unpack(">I", payload_info)[0] payload = self._read_bytes_from_socket(read_len) message = cast_channel_pb2.CastMessage() message.ParseFromString(payload) return message
Reads a message from the socket and converts it to a message.
def split_css_classes(css_classes): classes_list = text_value(css_classes).split(" ") return [c for c in classes_list if c]
Turn string into a list of CSS classes
def set_property_filter(filter_proto, name, op, value): filter_proto.Clear() pf = filter_proto.property_filter pf.property.name = name pf.op = op set_value(pf.value, value) return filter_proto
Set property filter contraint in the given datastore.Filter proto message. Args: filter_proto: datastore.Filter proto message name: property name op: datastore.PropertyFilter.Operation value: property value Returns: the same datastore.Filter. Usage: >>> set_property_filter(filter_proto,...
def _get_object_pydoc_page_name(obj): page_name = fullqualname.fullqualname(obj) if page_name is not None: page_name = _remove_builtin_prefix(page_name) return page_name
Returns fully qualified name, including module name, except for the built-in module.
def filter_by_transcript_expression( self, transcript_expression_dict, min_expression_value=0.0): return self.filter_any_above_threshold( multi_key_fn=lambda variant: variant.transcript_ids, value_dict=transcript_expression_dict, threshold=...
Filters variants down to those which have overlap a transcript whose expression value in the transcript_expression_dict argument is greater than min_expression_value. Parameters ---------- transcript_expression_dict : dict Dictionary mapping Ensembl transcript IDs to...
def options(self, **options): for k in options: self._jwrite = self._jwrite.option(k, to_str(options[k])) return self
Adds output options for the underlying data source. You can set the following option(s) for writing files: * ``timeZone``: sets the string that indicates a timezone to be used to format timestamps in the JSON/CSV datasources or partition values. If it isn't set, it u...
def install_egg(self, egg_name): if not os.path.exists(self.egg_directory): os.makedirs(self.egg_directory) self.requirement_set.add_requirement( InstallRequirement.from_line(egg_name, None)) try: self.requirement_set.prepare_files(self.finder) sel...
Install an egg into the egg directory
def set_children(self, child_ids): if not isinstance(child_ids, list): raise errors.InvalidArgument() if self.get_children_metadata().is_read_only(): raise errors.NoAccess() idstr_list = [] for object_id in child_ids: if not self._is_valid_id(object_id...
Sets the children. arg: child_ids (osid.id.Id[]): the children``Ids`` raise: InvalidArgument - ``child_ids`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.*
def list_compatible_canvas_layers(self): italic_font = QFont() italic_font.setItalic(True) list_widget = self.lstCanvasHazLayers list_widget.clear() for layer in self.parent.get_compatible_canvas_layers('hazard'): item = QListWidgetItem(layer['name'], list_widget) ...
Fill the list widget with compatible layers. :returns: Metadata of found layers. :rtype: list of dicts
def release_lock(dax, key, lock_mode=LockMode.wait): lock_fxn = _lock_fxn("unlock", lock_mode, False) return dax.get_scalar( dax.callproc(lock_fxn, key if isinstance(key, (list, tuple)) else [key])[0])
Manually release a pg advisory lock. :dax: a DataAccess instance :key: either a big int or a 2-tuple of integers :lock_mode: a member of the LockMode enum