code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def add_user( self, username, first_name, last_name, email, role, password="", hashed_password="", ): try: user = self.user_model() user.first_name = first_name user.last_name = last_name user.username = username user.email = email user.active = True user.roles.append(role) if hashed_password: user.password = hashed_password else: user.password = generate_password_hash(password) self.get_session.add(user) self.get_session.commit() log.info(c.LOGMSG_INF_SEC_ADD_USER.format(username)) return user except Exception as e: log.error(c.LOGMSG_ERR_SEC_ADD_USER.format(str(e))) self.get_session.rollback() return False
Generic function to create user
def get_template_vars(self, slides): try: head_title = slides[0]['title'] except (IndexError, TypeError): head_title = "Untitled Presentation" for slide_index, slide_vars in enumerate(slides): if not slide_vars: continue self.num_slides += 1 slide_number = slide_vars['number'] = self.num_slides if slide_vars['level'] and slide_vars['level'] <= TOC_MAX_LEVEL: self.add_toc_entry(slide_vars['title'], slide_vars['level'], slide_number) else: self.add_toc_entry(u"-", 1, slide_number) return {'head_title': head_title, 'num_slides': str(self.num_slides), 'slides': slides, 'toc': self.toc, 'embed': self.embed, 'css': self.get_css(), 'js': self.get_js(), 'user_css': self.user_css, 'user_js': self.user_js, 'math_output': self.math_output}
Computes template vars from slides html source code.
def delete_lbaas_port(self, lb_id): lb_id = lb_id.replace('-', '') req = dict(instance_id=lb_id) instances = self.get_vms_for_this_req(**req) for vm in instances: LOG.info("deleting lbaas vm %s " % vm.name) self.delete_vm_function(vm.port_id, vm)
send vm down event and delete db. :param lb_id: vip id for v1 and lbaas_id for v2
def send_registered_email(self, user, user_email, request_email_confirmation): if not self.user_manager.USER_ENABLE_EMAIL: return if not self.user_manager.USER_SEND_REGISTERED_EMAIL: return email = user_email.email if user_email else user.email if request_email_confirmation: token = self.user_manager.generate_token(user_email.id if user_email else user.id) confirm_email_link = url_for('user.confirm_email', token=token, _external=True) else: confirm_email_link = None self._render_and_send_email( email, user, self.user_manager.USER_REGISTERED_EMAIL_TEMPLATE, confirm_email_link=confirm_email_link, )
Send the 'user has registered' notification email.
def _prep_cnv_file(in_file, work_dir, somatic_info): out_file = os.path.join(work_dir, "%s-prep%s" % utils.splitext_plus(os.path.basename(in_file))) if not utils.file_uptodate(out_file, in_file): with file_transaction(somatic_info.tumor_data, out_file) as tx_out_file: with open(in_file) as in_handle: with open(tx_out_file, "w") as out_handle: out_handle.write(in_handle.readline()) for line in in_handle: parts = line.split("\t") parts[1] = _phylowgs_compatible_chroms(parts[1]) out_handle.write("\t".join(parts)) return out_file
Prepare Battenberg CNV file for ingest by PhyloWGS. The PhyloWGS preparation script does not handle 'chr' prefixed chromosomes (hg19 style) correctly. This converts them over to GRCh37 (no 'chr') style to match preparation work in _prep_vrn_file.
def get_sorted_attachments(self): inf = float("inf") order = self.get_attachments_order() attachments = self.get_attachments() def att_cmp(att1, att2): _n1 = att1.get('UID') _n2 = att2.get('UID') _i1 = _n1 in order and order.index(_n1) + 1 or inf _i2 = _n2 in order and order.index(_n2) + 1 or inf return cmp(_i1, _i2) sorted_attachments = sorted(attachments, cmp=att_cmp) return sorted_attachments
Returns a sorted list of analysis info dictionaries
def is_native_xmon_op(op: ops.Operation) -> bool: return (isinstance(op, ops.GateOperation) and is_native_xmon_gate(op.gate))
Check if the gate corresponding to an operation is a native xmon gate. Args: op: Input operation. Returns: True if the operation is native to the xmon, false otherwise.
def convert_unicode_2_utf8(input): if isinstance(input, dict): try: return dict((convert_unicode_2_utf8(key), convert_unicode_2_utf8(value)) for key, value in input.iteritems()) except AttributeError: return eval( ) elif isinstance(input, list): return [convert_unicode_2_utf8(element) for element in input] elif isinstance(input, str): return input else: try: if eval(): return input.encode('utf-8') except NameError: pass return input
Return a copy of `input` with every str component encoded from unicode to utf-8.
def RaiseIfLastError(result, func = None, arguments = ()): code = GetLastError() if code != ERROR_SUCCESS: raise ctypes.WinError(code) return result
Error checking for Win32 API calls with no error-specific return value. Regardless of the return value, the function calls GetLastError(). If the code is not C{ERROR_SUCCESS} then a C{WindowsError} exception is raised. For this to work, the user MUST call SetLastError(ERROR_SUCCESS) prior to calling the API. Otherwise an exception may be raised even on success, since most API calls don't clear the error status code.
def install(): tmp_weboob_dir = '/tmp/weboob' while (os.path.exists(tmp_weboob_dir)): tmp_weboob_dir += '1' print 'Fetching sources in temporary dir {}'.format(tmp_weboob_dir) result = cmd_exec('git clone {} {}'.format(WEBOOB_REPO, tmp_weboob_dir)) if (result['error']): print result['stderr'] print 'Weboob installation failed: could not clone repository' exit() print 'Sources fetched, will now process to installation' result = cmd_exec('cd {} && ./setup.py install'.format(tmp_weboob_dir)) shutil.rmtree(tmp_weboob_dir) if (result['error']): print result['stderr'] print 'Weboob installation failed: setup failed' exit() print result['stdout'] weboob_version = get_weboob_version() if (not weboob_version): print 'Weboob installation failed: version not detected' exit() print 'Weboob (version: {}) installation succeeded'.format(weboob_version) update()
Install weboob system-wide
def to_dict(self): return { "gates": [km.to_dict() for km in self.gates], "assignment_probs": {str(qid): a.tolist() for qid, a in self.assignment_probs.items()}, }
Create a JSON serializable representation of the noise model. For example:: { "gates": [ # list of embedded dictionary representations of KrausModels here [...] ] "assignment_probs": { "0": [[.8, .1], [.2, .9]], "1": [[.9, .4], [.1, .6]], } } :return: A dictionary representation of self. :rtype: Dict[str,Any]
def get(s, delimiter='', format="diacritical"): return delimiter.join(_pinyin_generator(u(s), format=format))
Return pinyin of string, the string must be unicode
def withNamedValues(cls, **values): enums = set(cls.namedValues.items()) enums.update(values.items()) class X(cls): namedValues = namedval.NamedValues(*enums) subtypeSpec = cls.subtypeSpec + constraint.SingleValueConstraint( *values.values()) X.__name__ = cls.__name__ return X
Create a subclass with discreet named values constraint. Reduce fully duplicate enumerations along the way.
def receive_data_chunk(self, raw_data, start): self.file.write(raw_data) eventlet.sleep(0)
Over-ridden method to circumvent the worker timeouts on large uploads.
def get_app_settings_from_arguments(args): config_filepath = os.path.abspath(args.config_uri) return get_appsettings(config_filepath, name=args.config_name)
Parse ``argparse`` style arguments into app settings. Given an ``argparse`` set of arguments as ``args`` parse the arguments to return the application settings. This assumes the parser was created using ``create_parser``.
def get_context_from_gdoc(self): try: start = int(time.time()) if not self.data or start > self.expires: self.data = self._get_context_from_gdoc(self.project.SPREADSHEET_KEY) end = int(time.time()) ttl = getattr(self.project, 'SPREADSHEET_CACHE_TTL', SPREADSHEET_CACHE_TTL) self.expires = end + ttl return self.data except AttributeError: return {}
Wrap getting context from Google sheets in a simple caching mechanism.
def addFollowOnFn(self, fn, *args, **kwargs): if PromisedRequirement.convertPromises(kwargs): return self.addFollowOn(PromisedRequirementFunctionWrappingJob.create(fn, *args, **kwargs)) else: return self.addFollowOn(FunctionWrappingJob(fn, *args, **kwargs))
Adds a function as a follow-on job. :param fn: Function to be run as a follow-on job with ``*args`` and ``**kwargs`` as \ arguments to this function. See toil.job.FunctionWrappingJob for reserved \ keyword arguments used to specify resource requirements. :return: The new follow-on job that wraps fn. :rtype: toil.job.FunctionWrappingJob
def set_prob_type(cls, problem_type, classification_type, eval_type): assert problem_type in problem_type_list, 'Need to set Problem Type' if problem_type == 'classification': assert classification_type in classification_type_list,\ 'Need to set Classification Type' assert eval_type in eval_type_list, 'Need to set Evaluation Type' cls.problem_type = problem_type cls.classification_type = classification_type cls.eval_type = eval_type if cls.problem_type == 'classification': print 'Setting Problem:{}, Type:{}, Eval:{}'.format(cls.problem_type, cls.classification_type, cls.eval_type) elif cls.problem_type == 'regression': print 'Setting Problem:{}, Eval:{}'.format(cls.problem_type, cls.eval_type) return
Set problem type
def _simplify_block(self, ail_block, stack_pointer_tracker=None): simp = self.project.analyses.AILBlockSimplifier(ail_block, stack_pointer_tracker=stack_pointer_tracker) return simp.result_block
Simplify a single AIL block. :param ailment.Block ail_block: The AIL block to simplify. :param stack_pointer_tracker: The RegisterDeltaTracker analysis instance. :return: A simplified AIL block.
def get_unicode_str(obj): if isinstance(obj, six.text_type): return obj if isinstance(obj, six.binary_type): return obj.decode("utf-8", errors="ignore") return six.text_type(obj)
Makes sure obj is a unicode string.
def _setweights(self): for name_w in self.weights: raw_w = getattr(self.module, name_w + '_raw') w = torch.nn.functional.dropout(raw_w, p=self.dropout, training=self.training) if hasattr(self.module, name_w): delattr(self.module, name_w) setattr(self.module, name_w, w)
Uses pytorch's built-in dropout function to apply dropout to the parameters of the wrapped module. Args: None Returns: None
def run(self): cmd = list(self.vasp_cmd) if self.auto_gamma: vi = VaspInput.from_directory(".") kpts = vi["KPOINTS"] if kpts.style == Kpoints.supported_modes.Gamma \ and tuple(kpts.kpts[0]) == (1, 1, 1): if self.gamma_vasp_cmd is not None and which( self.gamma_vasp_cmd[-1]): cmd = self.gamma_vasp_cmd elif which(cmd[-1] + ".gamma"): cmd[-1] += ".gamma" logger.info("Running {}".format(" ".join(cmd))) with open(self.output_file, 'w') as f_std, \ open(self.stderr_file, "w", buffering=1) as f_err: p = subprocess.Popen(cmd, stdout=f_std, stderr=f_err) return p
Perform the actual VASP run. Returns: (subprocess.Popen) Used for monitoring.
def total_items(self, request): n_total = 0 for item in self.get_queryset(request): n_total += item.quantity return Response(data={"quantity": n_total}, status=status.HTTP_200_OK)
Get total number of items in the basket
def get_finder(sources=None, pip_command=None, pip_options=None): if not pip_command: pip_command = get_pip_command() if not sources: sources = [ {"url": "https://pypi.org/simple", "name": "pypi", "verify_ssl": True} ] if not pip_options: pip_options = get_pip_options(sources=sources, pip_command=pip_command) session = pip_command._build_session(pip_options) atexit.register(session.close) finder = pip_shims.shims.PackageFinder( find_links=[], index_urls=[s.get("url") for s in sources], trusted_hosts=[], allow_all_prereleases=pip_options.pre, session=session, ) return finder
Get a package finder for looking up candidates to install :param sources: A list of pipfile-formatted sources, defaults to None :param sources: list[dict], optional :param pip_command: A pip command instance, defaults to None :type pip_command: :class:`~pip._internal.cli.base_command.Command` :param pip_options: A pip options, defaults to None :type pip_options: :class:`~pip._internal.cli.cmdoptions` :return: A package finder :rtype: :class:`~pip._internal.index.PackageFinder`
def ActionEnum(ctx): return Enum( ctx, interact=0, stop=1, ai_interact=2, move=3, add_attribute=5, give_attribute=6, ai_move=10, resign=11, spec=15, waypoint=16, stance=18, guard=19, follow=20, patrol=21, formation=23, save=27, ai_waypoint=31, chapter=32, ai_command=53, ai_queue=100, research=101, build=102, game=103, wall=105, delete=106, attackground=107, tribute=108, repair=110, release=111, multiqueue=112, togglegate=114, flare=115, order=117, queue=119, gatherpoint=120, sell=122, buy=123, droprelic=126, townbell=127, backtowork=128, postgame=255, default=Pass )
Action Enumeration.
def allow_unconfirmed_email(view_function): @wraps(view_function) def decorator(*args, **kwargs): g._flask_user_allow_unconfirmed_email = True try: user_manager = current_app.user_manager allowed = _is_logged_in_with_confirmed_email(user_manager) if not allowed: return user_manager.unauthenticated_view() return view_function(*args, **kwargs) finally: g._flask_user_allow_unconfirmed_email = False return decorator
This decorator ensures that the user is logged in, but allows users with or without a confirmed email addresses to access this particular view. It works in tandem with the ``USER_ALLOW_LOGIN_WITHOUT_CONFIRMED_EMAIL=True`` setting. .. caution:: | Use ``USER_ALLOW_LOGIN_WITHOUT_CONFIRMED_EMAIL=True`` and ``@allow_unconfirmed_email`` with caution, as they relax security requirements. | Make sure that decorated views **never call other views directly**. Allways use ``redirect()`` to ensure proper view protection. Example:: @route('/show_promotion') @allow_unconfirmed_emails def show_promotion(): # Logged in, with or without ... # confirmed email address It can also precede the ``@roles_required`` and ``@roles_accepted`` view decorators:: @route('/show_promotion') @allow_unconfirmed_emails @roles_required('Visitor') def show_promotion(): # Logged in, with or without ... # confirmed email address | Calls unauthorized_view() when the user is not logged in. | Calls the decorated view otherwise.
def update_dataset(self, dataset, fields, retry=DEFAULT_RETRY): partial = dataset._build_resource(fields) if dataset.etag is not None: headers = {"If-Match": dataset.etag} else: headers = None api_response = self._call_api( retry, method="PATCH", path=dataset.path, data=partial, headers=headers ) return Dataset.from_api_repr(api_response)
Change some fields of a dataset. Use ``fields`` to specify which fields to update. At least one field must be provided. If a field is listed in ``fields`` and is ``None`` in ``dataset``, it will be deleted. If ``dataset.etag`` is not ``None``, the update will only succeed if the dataset on the server has the same ETag. Thus reading a dataset with ``get_dataset``, changing its fields, and then passing it to ``update_dataset`` will ensure that the changes will only be saved if no modifications to the dataset occurred since the read. Args: dataset (google.cloud.bigquery.dataset.Dataset): The dataset to update. fields (Sequence[str]): The properties of ``dataset`` to change (e.g. "friendly_name"). retry (google.api_core.retry.Retry, optional): How to retry the RPC. Returns: google.cloud.bigquery.dataset.Dataset: The modified ``Dataset`` instance.
def get_predicate_indices(tags: List[str]) -> List[int]: return [ind for ind, tag in enumerate(tags) if 'V' in tag]
Return the word indices of a predicate in BIO tags.
def remove(self, key, cas=0, quiet=None, persist_to=0, replicate_to=0): return _Base.remove(self, key, cas=cas, quiet=quiet, persist_to=persist_to, replicate_to=replicate_to)
Remove the key-value entry for a given key in Couchbase. :param key: A string which is the key to remove. The format and type of the key follows the same conventions as in :meth:`upsert` :type key: string, dict, or tuple/list :param int cas: The CAS to use for the removal operation. If specified, the key will only be removed from the server if it has the same CAS as specified. This is useful to remove a key only if its value has not been changed from the version currently visible to the client. If the CAS on the server does not match the one specified, an exception is thrown. :param boolean quiet: Follows the same semantics as `quiet` in :meth:`get` :param int persist_to: If set, wait for the item to be removed from the storage of at least these many nodes :param int replicate_to: If set, wait for the item to be removed from the cache of at least these many nodes (excluding the master) :raise: :exc:`.NotFoundError` if the key does not exist. :raise: :exc:`.KeyExistsError` if a CAS was specified, but the CAS on the server had changed :return: A :class:`~.Result` object. Simple remove:: ok = cb.remove("key").success Don't complain if key does not exist:: ok = cb.remove("key", quiet=True) Only remove if CAS matches our version:: rv = cb.get("key") cb.remove("key", cas=rv.cas) Remove multiple keys:: oks = cb.remove_multi(["key1", "key2", "key3"]) Remove multiple keys with CAS:: oks = cb.remove({ "key1" : cas1, "key2" : cas2, "key3" : cas3 }) .. seealso:: :meth:`remove_multi`, :meth:`endure` for more information on the ``persist_to`` and ``replicate_to`` options.
def GMailer(recipients, username, password, subject='Log message from lggr.py'): import smtplib srvr = smtplib.SMTP('smtp.gmail.com', 587) srvr.ehlo() srvr.starttls() srvr.ehlo() srvr.login(username, password) if not (isinstance(recipients, list) or isinstance(recipients, tuple)): recipients = [recipients] gmail_sender = '{0}@gmail.com'.format(username) msg = 'To: {0}\nFrom: '+gmail_sender+'\nSubject: '+subject+'\n' msg = msg + '\n{1}\n\n' try: while True: logstr = (yield) for rcp in recipients: message = msg.format(rcp, logstr) srvr.sendmail(gmail_sender, rcp, message) except GeneratorExit: srvr.quit()
Sends messages as emails to the given list of recipients, from a GMail account.
def disable_napp(mgr): if mgr.is_enabled(): LOG.info(' Disabling...') mgr.disable() LOG.info(' Disabled.') else: LOG.error(" NApp isn't enabled.")
Disable a NApp.
def service_running(service_name, **kwargs): if init_is_systemd(): return service('is-active', service_name) else: if os.path.exists(_UPSTART_CONF.format(service_name)): try: cmd = ['status', service_name] for key, value in six.iteritems(kwargs): parameter = '%s=%s' % (key, value) cmd.append(parameter) output = subprocess.check_output( cmd, stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError: return False else: if ("start/running" in output or "is running" in output or "up and running" in output): return True elif os.path.exists(_INIT_D_CONF.format(service_name)): return service('status', service_name) return False
Determine whether a system service is running. :param service_name: the name of the service :param **kwargs: additional args to pass to the service command. This is used to pass additional key=value arguments to the service command line for managing specific instance units (e.g. service ceph-osd status id=2). The kwargs are ignored in systemd services.
def find_ignored_languages(source): for (index, line) in enumerate(source.splitlines()): match = RSTCHECK_COMMENT_RE.match(line) if match: key_and_value = line[match.end():].strip().split('=') if len(key_and_value) != 2: raise Error('Expected "key=value" syntax', line_number=index + 1) if key_and_value[0] == 'ignore-language': for language in key_and_value[1].split(','): yield language.strip()
Yield ignored languages. Languages are ignored via comment. For example, to ignore C++, JSON, and Python: >>> list(find_ignored_languages(''' ... Example ... ======= ... ... .. rstcheck: ignore-language=cpp,json ... ... .. rstcheck: ignore-language=python ... ''')) ['cpp', 'json', 'python']
def to_web_include( project: 'projects.Project', file_path: str ) -> WEB_INCLUDE: if not file_path.endswith('.css') and not file_path.endswith('.js'): return None slug = file_path[len(project.source_directory):] url = '/{}' \ .format(slug) \ .replace('\\', '/') \ .replace('//', '/') return WEB_INCLUDE(name=':project:{}'.format(url), src=url)
Converts the given file_path into a WEB_INCLUDE instance that represents the deployed version of this file to be loaded into the results project page :param project: Project in which the file_path resides :param file_path: Absolute path to the source file for which the WEB_INCLUDE instance will be created :return: The WEB_INCLUDE instance that represents the given source file
def register_on_snapshot_deleted(self, callback): event_type = library.VBoxEventType.on_snapshot_deleted return self.event_source.register_callback(callback, event_type)
Set the callback function to consume on snapshot deleted events. Callback receives a ISnapshotDeletedEvent object. Returns the callback_id
def to_binary(value, encoding='utf-8'): if not value: return b'' if isinstance(value, six.binary_type): return value if isinstance(value, six.text_type): return value.encode(encoding) return to_text(value).encode(encoding)
Convert value to binary string, default encoding is utf-8 :param value: Value to be converted :param encoding: Desired encoding
def check_aggregate(df, variable, components=None, exclude_on_fail=False, multiplier=1, **kwargs): fdf = df.filter(**kwargs) if len(fdf.data) > 0: vdf = fdf.check_aggregate(variable=variable, components=components, exclude_on_fail=exclude_on_fail, multiplier=multiplier) df.meta['exclude'] |= fdf.meta['exclude'] return vdf
Check whether the timeseries values match the aggregation of sub-categories Parameters ---------- df: IamDataFrame instance args: see IamDataFrame.check_aggregate() for details kwargs: passed to `df.filter()`
def spin_sz(self): return conversions.secondary_spin(self.mass1, self.mass2, self.spin1z, self.spin2z)
Returns the z-component of the spin of the secondary mass.
def validate(method): @wraps(method) def mod_run(self, rinput): self.validate_input(rinput) result = method(self, rinput) self.validate_result(result) return result return mod_run
Decorate run method, inputs and outputs are validated
def process_commission(self, commission): asset = commission['asset'] cost = commission['cost'] self.position_tracker.handle_commission(asset, cost) self._cash_flow(-cost)
Process the commission. Parameters ---------- commission : zp.Event The commission being paid.
def error(self, msgid, error): self.requests[msgid].errback(error) del self.requests[msgid]
Handle a error message.
def fuse_wheels(to_wheel, from_wheel, out_wheel): to_wheel, from_wheel, out_wheel = [ abspath(w) for w in (to_wheel, from_wheel, out_wheel)] with InTemporaryDirectory(): zip2dir(to_wheel, 'to_wheel') zip2dir(from_wheel, 'from_wheel') fuse_trees('to_wheel', 'from_wheel') rewrite_record('to_wheel') dir2zip('to_wheel', out_wheel)
Fuse `from_wheel` into `to_wheel`, write to `out_wheel` Parameters --------- to_wheel : str filename of wheel to fuse into from_wheel : str filename of wheel to fuse from out_wheel : str filename of new wheel from fusion of `to_wheel` and `from_wheel`
def get_server_url(self): server_host = self.driver_wrapper.config.get('Server', 'host') server_port = self.driver_wrapper.config.get('Server', 'port') server_username = self.driver_wrapper.config.get_optional('Server', 'username') server_password = self.driver_wrapper.config.get_optional('Server', 'password') server_auth = '{}:{}@'.format(server_username, server_password) if server_username and server_password else '' server_url = 'http://{}{}:{}'.format(server_auth, server_host, server_port) return server_url
Return the configured server url :returns: server url
def duplicates(inlist): dups = [] for i in range(len(inlist)): if inlist[i] in inlist[i+1:]: dups.append(inlist[i]) return dups
Returns duplicate items in the FIRST dimension of the passed list. Usage: duplicates (inlist)
def loads(string): d = _loads(string) for k, v in d.items(): FILTERS[dr.get_component(k) or k] = set(v)
Loads the filters dictionary given a string.
def _export_module_attachments(meta_graph): added_attachments = tf_v1.get_collection(_ATTACHMENT_COLLECTION_INTERNAL) if not added_attachments: return unique_attachments = collections.OrderedDict( (attachment.key, attachment) for attachment in added_attachments) meta_graph.collection_def[ATTACHMENT_COLLECTION_SAVED].bytes_list.value[:] = [ attachment.SerializeToString() for attachment in unique_attachments.values()]
Exports ModuleAttachments from the current tf.Graph into `meta_graph`.
def js_adaptor(buffer): buffer = re.sub('true', 'True', buffer) buffer = re.sub('false', 'False', buffer) buffer = re.sub('none', 'None', buffer) buffer = re.sub('NaN', '"NaN"', buffer) return buffer
convert javascript objects like true, none, NaN etc. to quoted word. Arguments: buffer: string to be converted Returns: string after conversion
def is_article(self, response, url): site = self.__sites_object[url] heuristics = self.__get_enabled_heuristics(url) self.log.info("Checking site: %s", response.url) statement = self.__get_condition(url) self.log.debug("Condition (original): %s", statement) for heuristic, condition in heuristics.items(): heuristic_func = getattr(self, heuristic) result = heuristic_func(response, site) check = self.__evaluate_result(result, condition) statement = re.sub(r"\b%s\b" % heuristic, str(check), statement) self.log.debug("Checking heuristic (%s)" " result (%s) on condition (%s): %s", heuristic, result, condition, check) self.log.debug("Condition (evaluated): %s", statement) is_article = eval(statement) self.log.debug("Article accepted: %s", is_article) return is_article
Tests if the given response is an article by calling and checking the heuristics set in config.cfg and sitelist.json :param obj response: The response of the site. :param str url: The base_url (needed to get the site-specific config from the JSON-file) :return bool: true if the heuristics match the site as an article
def select(*signals: Signal, **kwargs) -> List[Signal]: class CleanUp(Interrupt): pass timeout = kwargs.get("timeout", None) if not isinstance(timeout, (float, int, type(None))): raise ValueError("The timeout keyword parameter can be either None or a number.") def wait_one(signal: Signal, common: Signal) -> None: try: signal.wait() common.turn_on() except CleanUp: pass common = Signal(name=local.name + "-selector").turn_off() if _logger is not None: _log(INFO, "select", "select", "select", signals=[sig.name for sig in signals]) procs = [] for signal in signals: procs.append(add(wait_one, signal, common)) try: common.wait(timeout) finally: for proc in procs: proc.interrupt(CleanUp()) return [signal for signal in signals if signal.is_on]
Allows the current process to wait for multiple concurrent signals. Waits until one of the signals turns on, at which point this signal is returned. :param timeout: If this parameter is not ``None``, it is taken as a delay at the end of which the process times out, and stops waiting on the set of :py:class:`Signal`s. In such a situation, a :py:class:`Timeout` exception is raised on the process.
def _update(self, dataFile, handle): self._cache.remove((dataFile, handle)) self._add(dataFile, handle)
Update the priority of the file handle. The element is first removed and then added to the left of the deque.
def create_divisao_dc(self): return DivisaoDc( self.networkapi_url, self.user, self.password, self.user_ldap)
Get an instance of divisao_dc services facade.
def global_state_code(self): self._generate_func_code() if not self._compile_regexps: return '\n'.join( [ 'from fastjsonschema import JsonSchemaException', '', '', ] ) regexs = ['"{}": re.compile(r"{}")'.format(key, value.pattern) for key, value in self._compile_regexps.items()] return '\n'.join( [ 'import re', 'from fastjsonschema import JsonSchemaException', '', '', 'REGEX_PATTERNS = {', ' ' + ',\n '.join(regexs), '}', '', ] )
Returns global variables for generating function from ``func_code`` as code. Includes compiled regular expressions and imports.
def first(self, values, axis=0): values = np.asarray(values) return self.unique, np.take(values, self.index.sorter[self.index.start], axis)
return values at first occurance of its associated key Parameters ---------- values : array_like, [keys, ...] values to pick the first value of per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups
def sunset_utc(self, date, latitude, longitude, observer_elevation=0): try: return self._calc_time(90 + 0.833, SUN_SETTING, date, latitude, longitude, observer_elevation) except ValueError as exc: if exc.args[0] == "math domain error": raise AstralError( ("Sun never reaches the horizon on this day, " "at this location.") ) else: raise
Calculate sunset time in the UTC timezone. :param date: Date to calculate for. :type date: :class:`datetime.date` :param latitude: Latitude - Northern latitudes should be positive :type latitude: float :param longitude: Longitude - Eastern longitudes should be positive :type longitude: float :param observer_elevation: Elevation in metres to calculate sunset for :type observer_elevation: int :return: The UTC date and time at which sunset occurs. :rtype: :class:`~datetime.datetime`
def restart(self, force=False, wait_for_available=True, operation_timeout=None): body = {'force': force} self.manager.session.post(self.uri + '/operations/restart', body=body) if wait_for_available: time.sleep(10) self.manager.client.wait_for_available( operation_timeout=operation_timeout)
Restart the HMC represented by this Console object. Once the HMC is online again, this Console object, as well as any other resource objects accessed through this HMC, can continue to be used. An automatic re-logon will be performed under the covers, because the HMC restart invalidates the currently used HMC session. Authorization requirements: * Task permission for the "Shutdown/Restart" task. * "Remote Restart" must be enabled on the HMC. Parameters: force (bool): Boolean controlling whether the restart operation is processed when users are connected (`True`) or not (`False`). Users in this sense are local or remote GUI users. HMC WS API clients do not count as users for this purpose. wait_for_available (bool): Boolean controlling whether this method should wait for the HMC to become available again after the restart, as follows: * If `True`, this method will wait until the HMC has restarted and is available again. The :meth:`~zhmcclient.Client.query_api_version` method will be used to check for availability of the HMC. * If `False`, this method will return immediately once the HMC has accepted the request to be restarted. operation_timeout (:term:`number`): Timeout in seconds, for waiting for HMC availability after the restart. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_available=True`, a :exc:`~zhmcclient.OperationTimeout` is raised. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` :exc:`~zhmcclient.OperationTimeout`: The timeout expired while waiting for the HMC to become available again after the restart.
def show_agent(self, agent, **_params): return self.get(self.agent_path % (agent), params=_params)
Fetches information of a certain agent.
def __iterate_value(self, value): if hasattr(value, '__dict__') or isinstance(value, dict): return self.__find_object_children(value) elif isinstance(value, (list, tuple, set)): return self.__construct_list(value) return self.safe_values(value)
Return value for JSON serialization
def add_corpus(self, customization_id, corpus_name, corpus_file, allow_overwrite=None, **kwargs): if customization_id is None: raise ValueError('customization_id must be provided') if corpus_name is None: raise ValueError('corpus_name must be provided') if corpus_file is None: raise ValueError('corpus_file must be provided') headers = {} if 'headers' in kwargs: headers.update(kwargs.get('headers')) sdk_headers = get_sdk_headers('speech_to_text', 'V1', 'add_corpus') headers.update(sdk_headers) params = {'allow_overwrite': allow_overwrite} form_data = {} form_data['corpus_file'] = (None, corpus_file, 'text/plain') url = '/v1/customizations/{0}/corpora/{1}'.format( *self._encode_path_vars(customization_id, corpus_name)) response = self.request( method='POST', url=url, headers=headers, params=params, files=form_data, accept_json=True) return response
Add a corpus. Adds a single corpus text file of new training data to a custom language model. Use multiple requests to submit multiple corpus text files. You must use credentials for the instance of the service that owns a model to add a corpus to it. Adding a corpus does not affect the custom language model until you train the model for the new data by using the **Train a custom language model** method. Submit a plain text file that contains sample sentences from the domain of interest to enable the service to extract words in context. The more sentences you add that represent the context in which speakers use words from the domain, the better the service's recognition accuracy. The call returns an HTTP 201 response code if the corpus is valid. The service then asynchronously processes the contents of the corpus and automatically extracts new words that it finds. This can take on the order of a minute or two to complete depending on the total number of words and the number of new words in the corpus, as well as the current load on the service. You cannot submit requests to add additional resources to the custom model or to train the model until the service's analysis of the corpus for the current request completes. Use the **List a corpus** method to check the status of the analysis. The service auto-populates the model's words resource with words from the corpus that are not found in its base vocabulary. These are referred to as out-of-vocabulary (OOV) words. You can use the **List custom words** method to examine the words resource. You can use other words method to eliminate typos and modify how words are pronounced as needed. To add a corpus file that has the same name as an existing corpus, set the `allow_overwrite` parameter to `true`; otherwise, the request fails. Overwriting an existing corpus causes the service to process the corpus text file and extract OOV words anew. Before doing so, it removes any OOV words associated with the existing corpus from the model's words resource unless they were also added by another corpus or grammar, or they have been modified in some way with the **Add custom words** or **Add a custom word** method. The service limits the overall amount of data that you can add to a custom model to a maximum of 10 million total words from all sources combined. Also, you can add no more than 30 thousand custom (OOV) words to a model. This includes words that the service extracts from corpora and grammars, and words that you add directly. **See also:** * [Working with corpora](https://cloud.ibm.com/docs/services/speech-to-text/language-resource.html#workingCorpora) * [Add corpora to the custom language model](https://cloud.ibm.com/docs/services/speech-to-text/language-create.html#addCorpora). :param str customization_id: The customization ID (GUID) of the custom language model that is to be used for the request. You must make the request with credentials for the instance of the service that owns the custom model. :param str corpus_name: The name of the new corpus for the custom language model. Use a localized name that matches the language of the custom model and reflects the contents of the corpus. * Include a maximum of 128 characters in the name. * Do not include spaces, slashes, or backslashes in the name. * Do not use the name of an existing corpus or grammar that is already defined for the custom model. * Do not use the name `user`, which is reserved by the service to denote custom words that are added or modified by the user. :param file corpus_file: A plain text file that contains the training data for the corpus. Encode the file in UTF-8 if it contains non-ASCII characters; the service assumes UTF-8 encoding if it encounters non-ASCII characters. Make sure that you know the character encoding of the file. You must use that encoding when working with the words in the custom language model. For more information, see [Character encoding](https://cloud.ibm.com/docs/services/speech-to-text/language-resource.html#charEncoding). With the `curl` command, use the `--data-binary` option to upload the file for the request. :param bool allow_overwrite: If `true`, the specified corpus overwrites an existing corpus with the same name. If `false`, the request fails if a corpus with the same name already exists. The parameter has no effect if a corpus with the same name does not already exist. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse
def get_cache_buster(src_path, method='importtime'): try: fn = _BUST_METHODS[method] except KeyError: raise KeyError('Unsupported busting method value: %s' % method) return fn(src_path)
Return a string that can be used as a parameter for cache-busting URLs for this asset. :param src_path: Filesystem path to the file we're generating a cache-busting value for. :param method: Method for cache-busting. Supported values: importtime, mtime, md5 The default is 'importtime', because it requires the least processing. Note that the mtime and md5 cache busting methods' results are cached on the src_path. Example:: >>> SRC_PATH = os.path.join(os.path.dirname(__file__), 'html.py') >>> get_cache_buster(SRC_PATH) is _IMPORT_TIME True >>> get_cache_buster(SRC_PATH, method='mtime') == _cache_key_by_mtime(SRC_PATH) True >>> get_cache_buster(SRC_PATH, method='md5') == _cache_key_by_md5(SRC_PATH) True
def validate(self,value): if self.validator is not None: try: valid = self.validator(value) except Exception as e: import pdb; pdb.set_trace() if isinstance(valid, tuple) and len(valid) == 2: valid, errormsg = valid elif isinstance(valid, bool): errormsg = "Invalid value" else: raise TypeError("Custom validator must return a boolean or a (bool, errormsg) tuple.") if valid: self.error = None else: self.error = errormsg return valid else: self.error = None return True
Validate the parameter
def event_exists(self, client, check): return self.api_request( 'get', 'events/{}/{}'.format(client, check) ).status_code == 200
Query Sensu API for event.
def bwrite(stream, obj): handle = None if not hasattr(stream, "write"): stream = handle = open(stream, "wb") try: stream.write(bencode(obj)) finally: if handle: handle.close()
Encode a given object to a file or stream.
def create(self, to, from_, parameters=values.unset): data = values.of({'To': to, 'From': from_, 'Parameters': serialize.object(parameters), }) payload = self._version.create( 'POST', self._uri, data=data, ) return ExecutionInstance(self._version, payload, flow_sid=self._solution['flow_sid'], )
Create a new ExecutionInstance :param unicode to: The Contact phone number to start a Studio Flow Execution. :param unicode from_: The Twilio phone number to send messages or initiate calls from during the Flow Execution. :param dict parameters: JSON data that will be added to your flow's context and can accessed as variables inside your flow. :returns: Newly created ExecutionInstance :rtype: twilio.rest.studio.v1.flow.execution.ExecutionInstance
def predictions_iter(self): for fname in self.forecast_names: yield self.predictions.get(col_names=fname)
property decorated prediction iterator Returns ------- iterator : iterator iterator on prediction sensitivity vectors (matrix)
def determine_version(self, request, api_version=None): if api_version is False: api_version = None for version in self.versions: if version and "v{0}".format(version) in request.path: api_version = version break request_version = set() if api_version is not None: request_version.add(api_version) version_header = request.get_header("X-API-VERSION") if version_header: request_version.add(version_header) version_param = request.get_param('api_version') if version_param is not None: request_version.add(version_param) if len(request_version) > 1: raise ValueError('You are requesting conflicting versions') return next(iter(request_version or (None, )))
Determines the appropriate version given the set api_version, the request header, and URL query params
def enable(self, trigger_ids=[]): trigger_ids = ','.join(trigger_ids) url = self._service_url(['triggers', 'enabled'], params={'triggerIds': trigger_ids, 'enabled': 'true'}) self._put(url, data=None, parse_json=False)
Enable triggers. :param trigger_ids: List of trigger definition ids to enable
def register_vcs_handler(vcs, method): def decorate(f): if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate
Decorator to mark a method as the handler for a particular VCS.
def calc_checksum(sentence): if sentence.startswith('$'): sentence = sentence[1:] sentence = sentence.split('*')[0] return reduce(xor, map(ord, sentence))
Calculate a NMEA 0183 checksum for the given sentence. NMEA checksums are a simple XOR of all the characters in the sentence between the leading "$" symbol, and the "*" checksum separator. Args: sentence (str): NMEA 0183 formatted sentence
def convert(source, ext=COMPLETE, fmt=HTML, dname=None): if dname and not ext & COMPATIBILITY: if os.path.isfile(dname): dname = os.path.abspath(os.path.dirname(dname)) source, _ = _expand_source(source, dname, fmt) _MMD_LIB.markdown_to_string.argtypes = [ctypes.c_char_p, ctypes.c_ulong, ctypes.c_int] _MMD_LIB.markdown_to_string.restype = ctypes.c_char_p src = source.encode('utf-8') return _MMD_LIB.markdown_to_string(src, ext, fmt).decode('utf-8')
Converts a string of MultiMarkdown text to the requested format. Transclusion is performed if the COMPATIBILITY extension is not set, and dname is set to a valid directory Keyword arguments: source -- string containing MultiMarkdown text ext -- extension bitfield to pass to conversion process fmt -- flag indicating output format to use dname -- Path to use for transclusion - if None, transclusion functionality is bypassed
def load(self, name): name = ctypes.util.find_library(name) return ctypes.cdll.LoadLibrary(name)
Loads and returns foreign library.
def _check_load_parameters(self, **kwargs): rset = self._meta_data['required_load_parameters'] check = _missing_required_parameters(rset, **kwargs) if check: check.sort() error_message = 'Missing required params: %s' % check raise MissingRequiredReadParameter(error_message)
Params given to load should at least satisfy required params. :params: kwargs :raises: MissingRequiredReadParameter
def region_by_identifier(self, identifier): if identifier < 0: raise(ValueError("Identifier must be a positive integer.")) if not np.equal(np.mod(identifier, 1), 0): raise(ValueError("Identifier must be a positive integer.")) if identifier == 0: raise(ValueError("0 represents the background.")) return Region.select_from_array(self, identifier)
Return region of interest corresponding to the supplied identifier. :param identifier: integer corresponding to the segment of interest :returns: `jicbioimage.core.region.Region`
def open(self): if not self.handle: try: path = self.system_dir except AttributeError: path = '' self.__handle = lvm_init(path) if not bool(self.__handle): raise HandleError("Failed to initialize LVM handle.")
Obtains the lvm handle. Usually you would never need to use this method unless you are trying to do operations using the ctypes function wrappers in conversion.py *Raises:* * HandleError
def filter_by_analysis_period(self, analysis_period): self._check_analysis_period(analysis_period) _filtered_data = self.filter_by_moys(analysis_period.moys) _filtered_data.header._analysis_period = analysis_period return _filtered_data
Filter a Data Collection based on an analysis period. Args: analysis period: A Ladybug analysis period Return: A new Data Collection with filtered data
def page_should_not_contain_text(self, text, loglevel='INFO'): if self._is_text_present(text): self.log_source(loglevel) raise AssertionError("Page should not have contained text '%s'" % text) self._info("Current page does not contains text '%s'." % text)
Verifies that current page not contains `text`. If this keyword fails, it automatically logs the page source using the log level specified with the optional `loglevel` argument. Giving `NONE` as level disables logging.
def upsample(self, factor): self.checkforpilimage() if type(factor) != type(0): raise RuntimeError, "Upsample factor must be an integer !" if self.verbose: print "Upsampling by a factor of %i" % factor self.pilimage = self.pilimage.resize((self.pilimage.size[0] * factor, self.pilimage.size[1] * factor)) self.upsamplefactor = factor self.draw = None
The inverse operation of rebin, applied on the PIL image. Do this before writing text or drawing on the image ! The coordinates will be automatically converted for you
def set_proto_message_event( pb_message_event, span_data_message_event): pb_message_event.type = span_data_message_event.type pb_message_event.id = span_data_message_event.id pb_message_event.uncompressed_size = \ span_data_message_event.uncompressed_size_bytes pb_message_event.compressed_size = \ span_data_message_event.compressed_size_bytes
Sets properties on the protobuf message event. :type pb_message_event: :class: `~opencensus.proto.trace.Span.TimeEvent.MessageEvent` :param pb_message_event: protobuf message event :type span_data_message_event: :class: `~opencensus.trace.time_event.MessageEvent` :param span_data_message_event: opencensus message event
def to_dqflags(self, bits=None, minlen=1, dtype=float, round=False): from ..segments import DataQualityDict out = DataQualityDict() bitseries = self.get_bit_series(bits=bits) for bit, sts in bitseries.items(): out[bit] = sts.to_dqflag(name=bit, minlen=minlen, round=round, dtype=dtype, description=self.bits.description[bit]) return out
Convert this `StateVector` into a `~gwpy.segments.DataQualityDict` The `StateTimeSeries` for each bit is converted into a `~gwpy.segments.DataQualityFlag` with the bits combined into a dict. Parameters ---------- minlen : `int`, optional, default: 1 minimum number of consecutive `True` values to identify as a `Segment`. This is useful to ignore single bit flips, for example. bits : `list`, optional a list of bit indices or bit names to select, defaults to `~StateVector.bits` Returns ------- DataQualityFlag list : `list` a list of `~gwpy.segments.flag.DataQualityFlag` reprensentations for each bit in this `StateVector` See Also -------- :meth:`StateTimeSeries.to_dqflag` for details on the segment representation method for `StateVector` bits
def get_categorical_feature_names(example): features = get_example_features(example) return sorted([ feature_name for feature_name in features if features[feature_name].WhichOneof('kind') == 'bytes_list' ])
Returns a list of feature names for byte type features. Args: example: An example. Returns: A list of categorical feature names (e.g. ['education', 'marital_status'] )
def _check_and_handle_includes(self, from_file): logger.debug("Check/handle includes from %s", from_file) try: paths = self._parser.get("INCLUDE", "paths") except (config_parser.NoSectionError, config_parser.NoOptionError) as exc: logger.debug("_check_and_handle_includes: EXCEPTION: %s", exc) return paths_lines = [p.strip() for p in paths.split("\n")] logger.debug("paths = %s (wanted just once; CLEARING)", paths_lines) self._parser.remove_option("INCLUDE", "paths") for f in paths_lines: abspath = (f if os.path.isabs(f) else os.path.abspath( os.path.join(os.path.dirname(from_file), f))) use_path = os.path.normpath(abspath) if use_path in self._parsed_files: raise RecursionInConfigFile("In %s: %s already read", from_file, use_path) self._parsed_files.append(use_path) self._handle_rc_file(use_path)
Look for an optional INCLUDE section in the given file path. If the parser set `paths`, it is cleared so that they do not keep showing up when additional files are parsed.
def _print_results(file, status): file_color = c.Fore.GREEN status_color = c.Fore.RED if status == 'Success': status_color = c.Fore.GREEN elif status == 'Skipped': status_color = c.Fore.YELLOW print( '{}{!s:<13}{}{!s:<35}{}{!s:<8}{}{}'.format( c.Fore.CYAN, 'Downloading:', file_color, file, c.Fore.CYAN, 'Status:', status_color, status, ) )
Print the download results. Args: file (str): The filename. status (str): The file download status.
def rebalance_replicas( self, max_movement_count=None, max_movement_size=None, ): movement_count = 0 movement_size = 0 for partition in six.itervalues(self.cluster_topology.partitions): count, size = self._rebalance_partition_replicas( partition, None if not max_movement_count else max_movement_count - movement_count, None if not max_movement_size else max_movement_size - movement_size, ) movement_count += count movement_size += size return movement_count, movement_size
Balance replicas across replication-groups. :param max_movement_count: The maximum number of partitions to move. :param max_movement_size: The maximum total size of the partitions to move. :returns: A 2-tuple whose first element is the number of partitions moved and whose second element is the total size of the partitions moved.
def _send_script(self, device_info, control_info, script, progress_callback): for i in range(0, len(script), 20): chunk = script[i:i+20] self._send_rpc(device_info, control_info, 8, 0x2101, chunk, 0.001, 1.0) if progress_callback is not None: progress_callback(i + len(chunk), len(script))
Send a script by repeatedly sending it as a bunch of RPCs. This function doesn't do anything special, it just sends a bunch of RPCs with each chunk of the script until it's finished.
def filter_dependencies(self): dependencies = self.event['check'].get('dependencies', None) if dependencies is None or not isinstance(dependencies, list): return for dependency in self.event['check']['dependencies']: if not str(dependency): continue dependency_split = tuple(dependency.split('/')) if len(dependency_split) == 2: client, check = dependency_split else: client = self.event['client']['name'] check = dependency_split[0] if self.event_exists(client, check): self.bail('check dependency event exists')
Determine whether a check has dependencies.
def get_parameters(self, params, graph=None): g = graph if graph is not None else self.tf_graph with g.as_default(): with tf.Session() as self.tf_session: self.tf_saver.restore(self.tf_session, self.model_path) out = {} for par in params: if type(params[par]) == list: for i, p in enumerate(params[par]): out[par + '-' + str(i+1)] = p.eval() else: out[par] = params[par].eval() return out
Get the parameters of the model. :param params: dictionary of keys (str names) and values (tensors). :return: evaluated tensors in params
def txn_getAssociation(self, server_url, handle=None): if handle is not None: self.db_get_assoc(server_url, handle) else: self.db_get_assocs(server_url) rows = self.cur.fetchall() if len(rows) == 0: return None else: associations = [] for values in rows: values = list(values) values[1] = self.blobDecode(values[1]) assoc = Association(*values) if assoc.expiresIn == 0: self.txn_removeAssociation(server_url, assoc.handle) else: associations.append((assoc.issued, assoc)) if associations: associations.sort() return associations[-1][1] else: return None
Get the most recent association that has been set for this server URL and handle. str -> NoneType or Association
def pysal_Moran(self, **kwargs): if self.weights is None: self.raster_weights(**kwargs) rasterf = self.raster.flatten() rasterf = rasterf[rasterf.mask==False] self.Moran = pysal.Moran(rasterf, self.weights, **kwargs)
Compute Moran's I measure of global spatial autocorrelation for GeoRaster Usage: geo.pysal_Moran(permutations = 1000, rook=True) arguments passed to raster_weights() and pysal.Moran See help(gr.raster_weights), help(pysal.Moran) for options
async def _connect_sentinel(self, address, timeout, pools): try: with async_timeout(timeout, loop=self._loop): pool = await create_pool( address, minsize=1, maxsize=2, parser=self._parser_class, loop=self._loop) pools.append(pool) return pool except asyncio.TimeoutError as err: sentinel_logger.debug( "Failed to connect to Sentinel(%r) within %ss timeout", address, timeout) return err except Exception as err: sentinel_logger.debug( "Error connecting to Sentinel(%r): %r", address, err) return err
Try to connect to specified Sentinel returning either connections pool or exception.
def calculate_size(self, modules_per_line, number_of_lines, dpi=300): width = 2 * self.quiet_zone + modules_per_line * self.module_width height = 2.0 + self.module_height * number_of_lines if self.font_size and self.text: height += pt2mm(self.font_size) / 2 + self.text_distance return int(mm2px(width, dpi)), int(mm2px(height, dpi))
Calculates the size of the barcode in pixel. :parameters: modules_per_line : Integer Number of modules in one line. number_of_lines : Integer Number of lines of the barcode. dpi : Integer DPI to calculate. :returns: Width and height of the barcode in pixel. :rtype: Tuple
def menu_text(self, request=None): source_field_name = settings.PAGE_FIELD_FOR_MENU_ITEM_TEXT if( source_field_name != 'menu_text' and hasattr(self, source_field_name) ): return getattr(self, source_field_name) return self.title
Return a string to use as link text when this page appears in menus.
def discover(package, cls_match_func): matched_classes = set() for _, module_name, _ in pkgutil.walk_packages( package.__path__, prefix=package.__name__ + '.', ): module = __import__(module_name, fromlist=[str('__trash')], level=0) for _, imported_class in inspect.getmembers(module, inspect.isclass): if imported_class.__module__ != module.__name__: continue if cls_match_func(imported_class): matched_classes.add(imported_class) return matched_classes
Returns a set of classes in the directory matched by cls_match_func Args: path - A Python package cls_match_func - Function taking a class and returning true if the class is to be included in the output.
def addif(self, iname): _runshell([brctlexe, 'addif', self.name, iname], "Could not add interface %s to %s." % (iname, self.name))
Add an interface to the bridge
def repeats(seq, size): seq = str(seq) n_mers = [seq[i:i + size] for i in range(len(seq) - size + 1)] counted = Counter(n_mers) found_repeats = [(key, value) for key, value in counted.iteritems() if value > 1] return found_repeats
Count times that a sequence of a certain size is repeated. :param seq: Input sequence. :type seq: coral.DNA or coral.RNA :param size: Size of the repeat to count. :type size: int :returns: Occurrences of repeats and how many :rtype: tuple of the matched sequence and how many times it occurs
def make(target="all", dir=".", **kwargs): if not fs.isfile(fs.path(dir, "Makefile")): raise NoMakefileError("No makefile in '{}'".format(fs.abspath(dir))) fs.cd(dir) if "timeout" not in kwargs: kwargs["timeout"] = 300 ret, out, err = system.run(["make", target], **kwargs) fs.cdpop() if ret > 0: if re.search(_BAD_TARGET_RE, err): raise NoTargetError("No rule for target '{}'" .format(target)) else: raise MakeError("Target '{}' failed".format(target)) raise MakeError("Failed") return ret, out, err
Run make. Arguments: target (str, optional): Name of the target to build. Defaults to "all". dir (str, optional): Path to directory containing Makefile. **kwargs (optional): Any additional arguments to be passed to system.run(). Returns: (int, str, str): The first element is the return code of the make command. The second and third elements are the stdout and stderr of the process. Raises: NoMakefileError: In case a Makefile is not found in the target directory. NoTargetError: In case the Makefile does not support the requested target. MakeError: In case the target rule fails.
def certify_bool(value, required=True): if certify_required( value=value, required=required, ): return if not isinstance(value, bool): raise CertifierTypeError( message="expected bool, but value is of type {cls!r}".format( cls=value.__class__.__name__), value=value, required=required, )
Certifier for boolean values. :param value: The value to be certified. :param bool required: Whether the value can be `None`. Defaults to True. :raises CertifierTypeError: The type is invalid
def batch_iter(iterator, batch_size, return_func=None, padding=None): for batch in zip_longest(*[iter(iterator)]*batch_size, fillvalue=padding): gen = (thing for thing in batch if thing is not padding) if return_func is None: yield gen else: yield return_func(gen)
Break an iterable into batches of size batch_size Note that `padding` should be set to something (anything) which is NOT a valid member of the iterator. For example, None works for [0,1,2,...10], but not for ['a', None, 'c', 'd']. Parameters ---------- iterator : iterable A python object which is iterable. batch_size : int The size of batches you wish to produce from the iterator. return_func : executable or None Pass a function that takes a generator and returns an iterable (e.g. `list` or `set`). If None, a generator will be returned. padding : anything This is used internally to ensure that the remainder of the list is included. This MUST NOT be a valid element of the iterator. Returns ------- An iterator over lists or generators, depending on `return_lists`.
def parse_peddy_sexcheck(handle: TextIO): data = {} samples = csv.DictReader(handle) for sample in samples: data[sample['sample_id']] = { 'predicted_sex': sample['predicted_sex'], 'het_ratio': float(sample['het_ratio']), 'error': True if sample['error'] == 'True' else False, } return data
Parse Peddy sexcheck output.
def get_string_camel_patterns(cls, name, min_length=0): patterns = [] abbreviations = list(set(cls._get_abbreviations(name, output_length=min_length))) abbreviations.sort(key=len, reverse=True) for abbr in abbreviations: casing_permutations = list(set(cls._get_casing_permutations(abbr))) casing_permutations.sort(key=lambda v: (v.upper(), v[0].islower(), len(v))) permutations = [permutation for permutation in casing_permutations if cls.is_valid_camel(permutation) or len(permutation) <= 2] if permutations: patterns.append(permutations) return patterns
Finds all permutations of possible camel casing of the given name :param name: str, the name we need to get all possible permutations and abbreviations for :param min_length: int, minimum length we want for abbreviations :return: list(list(str)), list casing permutations of list of abbreviations
def ensure_dim(core, dim, dim_): if dim is None: dim = dim_ if not dim: return core, 1 if dim_ == dim: return core, int(dim) if dim > dim_: key_convert = lambda vari: vari[:dim_] else: key_convert = lambda vari: vari + (0,)*(dim-dim_) new_core = {} for key, val in core.items(): key_ = key_convert(key) if key_ in new_core: new_core[key_] += val else: new_core[key_] = val return new_core, int(dim)
Ensure that dim is correct.
def extern_call(self, context_handle, func, args_ptr, args_len): c = self._ffi.from_handle(context_handle) runnable = c.from_value(func[0]) args = tuple(c.from_value(arg[0]) for arg in self._ffi.unpack(args_ptr, args_len)) return self.call(c, runnable, args)
Given a callable, call it.