code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _get_assignments_in(self, filterlist, symbol = ""): if symbol != "": lsymbol = symbol for assign in self._assignments: target = assign.split("%")[0].lower() if target == lsymbol: return True else: result = [] for assign in self._assignments: target = assign.split("%")[0].lower() if target in filterlist: result.append(assign) return result
Returns a list of code elements whose names are in the specified object. :arg filterlist: the list of symbols to check agains the assignments. :arg symbol: when specified, return true if that symbol has its value changed via an assignment.
def _psi_n(x, n, b): return 2**(b-1) / gamma(b) * (-1)**n * \ np.exp(gammaln(n+b) - gammaln(n+1) + np.log(2*n+b) - 0.5 * np.log(2*np.pi*x**3) - (2*n+b)**2 / (8.*x))
Compute the n-th term in the infinite sum of the Jacobi density.
def get_docs_sources_from_ES(self): docs = [doc for doc, _, _, get_from_ES in self.doc_to_update if get_from_ES] if docs: documents = self.docman.elastic.mget(body={"docs": docs}, realtime=True) return iter(documents["docs"]) else: return iter([])
Get document sources using MGET elasticsearch API
def createPedChr24UsingPlink(options): plinkCommand = ["plink", "--noweb", "--bfile", options.bfile, "--chr", "24", "--recodeA", "--keep", options.out + ".list_problem_sex_ids", "--out", options.out + ".chr24_recodeA"] runCommand(plinkCommand)
Run plink to create a ped format. :param options: the options. :type options: argparse.Namespace Uses Plink to create a ``ped`` file of markers on the chromosome ``24``. It uses the ``recodeA`` options to use additive coding. It also subsets the data to keep only samples with sex problems.
def extract_context(tex_file, extracted_image_data): if os.path.isdir(tex_file) or not os.path.exists(tex_file): return [] lines = "".join(get_lines_from_file(tex_file)) for data in extracted_image_data: context_list = [] indicies = [match.span() for match in re.finditer(r"(\\(?:fig|ref)\{%s\})" % (re.escape(data['label']),), lines)] for startindex, endindex in indicies: i = startindex - CFG_PLOTEXTRACTOR_CONTEXT_EXTRACT_LIMIT if i < 0: text_before = lines[:startindex] else: text_before = lines[i:startindex] context_before = get_context(text_before, backwards=True) i = endindex + CFG_PLOTEXTRACTOR_CONTEXT_EXTRACT_LIMIT text_after = lines[endindex:i] context_after = get_context(text_after) context_list.append( context_before + ' \\ref{' + data['label'] + '} ' + context_after ) data['contexts'] = context_list
Extract context. Given a .tex file and a label name, this function will extract the text before and after for all the references made to this label in the text. The number of characters to extract before and after is configurable. :param tex_file (list): path to .tex file :param extracted_image_data ([(string, string, list), ...]): a list of tuples of images matched to labels and captions from this document. :return extracted_image_data ([(string, string, list, list), (string, string, list, list),...)]: the same list, but now containing extracted contexts
def journal_event(events): reasons = set(chain.from_iterable(e.reasons for e in events)) attributes = set(chain.from_iterable(e.file_attributes for e in events)) return JrnlEvent(events[0].file_reference_number, events[0].parent_file_reference_number, events[0].file_name, events[0].timestamp, list(reasons), list(attributes))
Group multiple events into a single one.
def get_keys(self, alias_name, key_format): uri = self.URI + "/keys/" + alias_name + "?format=" + key_format return self._client.get(uri)
Retrieves the contents of PKCS12 file in the format specified. This PKCS12 formatted file contains both the certificate as well as the key file data. Valid key formats are Base64 and PKCS12. Args: alias_name: Key pair associated with the RabbitMQ key_format: Valid key formats are Base64 and PKCS12. Returns: dict: RabbitMQ certificate
def _get_service_port(self, instance): host = instance.get('host', DEFAULT_HOST) port = instance.get('port', DEFAULT_PORT) try: socket.getaddrinfo(host, port) except socket.gaierror: port = DEFAULT_PORT_NUM return port
Get the ntp server port
def new(cls, shapes, start_x, start_y, x_scale, y_scale): return cls( shapes, int(round(start_x)), int(round(start_y)), x_scale, y_scale )
Return a new |FreeformBuilder| object. The initial pen location is specified (in local coordinates) by (*start_x*, *start_y*).
def total_rated_level(octave_frequencies): sums = 0.0 for band in OCTAVE_BANDS.keys(): if band not in octave_frequencies: continue if octave_frequencies[band] is None: continue if octave_frequencies[band] == 0: continue sums += pow(10.0, ((float(octave_frequencies[band]) + OCTAVE_BANDS[band][1]) / 10.0)) level = 10.0 * math.log10(sums) return level
Calculates the A-rated total sound pressure level based on octave band frequencies
def inline_callbacks(original, debug=False): f = eliot_friendly_generator_function(original) if debug: f.debug = True return inlineCallbacks(f)
Decorate a function like ``inlineCallbacks`` would but in a more Eliot-friendly way. Use it just like ``inlineCallbacks`` but where you want Eliot action contexts to Do The Right Thing inside the decorated function.
def trainObjects(objects, exp, numRepeatsPerObject, experimentIdOffset=0): objectsToLearn = objects.provideObjectsToLearn() objectTraversals = {} for objectId in objectsToLearn: objectTraversals[objectId + experimentIdOffset] = objects.randomTraversal( objectsToLearn[objectId], numRepeatsPerObject) exp.learnObjects(objectTraversals)
Train the network on all the objects by randomly traversing points on each object. We offset the id of each object to avoid confusion with any sequences that might have been learned.
def _parse_data(self, data, charset): builder = TreeBuilder(numbermode=self._numbermode) if isinstance(data,basestring): xml.sax.parseString(data, builder) else: xml.sax.parse(data, builder) return builder.root[self._root_element_name()]
Parse the xml data into dictionary.
def process_rgb_bytes(bytes_in, width, height, quality=DEFAULT_JPEG_QUALITY): if len(bytes_in) != width * height * 3: raise ValueError("bytes_in length is not coherent with given width and height") bytes_out_p = ffi.new("char**") bytes_out_p_gc = ffi.gc(bytes_out_p, lib.guetzli_free_bytes) length = lib.guetzli_process_rgb_bytes( bytes_in, width, height, bytes_out_p_gc, quality ) bytes_out = ffi.cast("char*", bytes_out_p_gc[0]) return ffi.unpack(bytes_out, length)
Generates an optimized JPEG from RGB bytes. :param bytes bytes_in: the input image's bytes :param int width: the width of the input image :param int height: the height of the input image :param int quality: the output JPEG quality (default 95) :returns: Optimized JPEG bytes :rtype: bytes :raises ValueError: the given width and height is not coherent with the ``bytes_in`` length. .. code:: python import pyguetzli # 2x2px RGB image # | red | green | image_pixels = b"\\xFF\\x00\\x00\\x00\\xFF\\x00" image_pixels += b"\\x00\\x00\\xFF\\xFF\\xFF\\xFF" # | blue | white | optimized_jpeg = pyguetzli.process_rgb_bytes(image_pixels, 2, 2)
def num_tasks(self, work_spec_name): return self.num_finished(work_spec_name) + \ self.num_failed(work_spec_name) + \ self.registry.len(WORK_UNITS_ + work_spec_name)
Get the total number of work units for some work spec.
def nPr(n, r): f = math.factorial return int(f(n) / f(n-r))
Calculates nPr. Args: n (int): total number of items. r (int): items to permute Returns: nPr.
def parents(self, resources): if self.docname == 'index': return [] parents = [] parent = resources.get(self.parent) while parent is not None: parents.append(parent) parent = resources.get(parent.parent) return parents
Split the path in name and get parents
def revoke_session(): form = RevokeForm(request.form) if not form.validate_on_submit(): abort(403) sid_s = form.data['sid_s'] if SessionActivity.query.filter_by( user_id=current_user.get_id(), sid_s=sid_s).count() == 1: delete_session(sid_s=sid_s) db.session.commit() if not SessionActivity.is_current(sid_s=sid_s): flash('Session {0} successfully removed.'.format(sid_s), 'success') else: flash('Unable to remove the session {0}.'.format(sid_s), 'error') return redirect(url_for('invenio_accounts.security'))
Revoke a session.
def copytree(source_directory, destination_directory, ignore=None): if os.path.isdir(source_directory): if not os.path.isdir(destination_directory): os.makedirs(destination_directory) files = os.listdir(source_directory) if ignore is not None: ignored = ignore(source_directory, files) else: ignored = set() for f in files: if f not in ignored: copytree( os.path.join(source_directory, f), os.path.join(destination_directory, f), ignore ) else: shutil.copyfile(source_directory, destination_directory)
Recursively copy the contents of a source directory into a destination directory. Both directories must exist. This function does not copy the root directory ``source_directory`` into ``destination_directory``. Since ``shutil.copytree(src, dst)`` requires ``dst`` not to exist, we cannot use for our purposes. Code adapted from http://stackoverflow.com/a/12686557 :param string source_directory: the source directory, already existing :param string destination_directory: the destination directory, already existing
def clear_measurements(self): keys = list(self.measurements.keys()) for key in keys: del(self.measurements[key]) self.meas_counter = -1
Remove all measurements from self.measurements. Reset the measurement counter. All ID are invalidated.
def add_url (self, url, line=0, column=0, page=0, name=u"", base=None): if base: base_ref = urlutil.url_norm(base)[0] else: base_ref = None url_data = get_url_from(url, self.recursion_level+1, self.aggregate, parent_url=self.url, base_ref=base_ref, line=line, column=column, page=page, name=name, parent_content_type=self.content_type) self.aggregate.urlqueue.put(url_data)
Add new URL to queue.
def check_arrays_survival(X, y, **kwargs): event, time = check_y_survival(y) kwargs.setdefault("dtype", numpy.float64) X = check_array(X, ensure_min_samples=2, **kwargs) check_consistent_length(X, event, time) return X, event, time
Check that all arrays have consistent first dimensions. Parameters ---------- X : array-like Data matrix containing feature vectors. y : structured array with two fields A structured array containing the binary event indicator as first field, and time of event or time of censoring as second field. kwargs : dict Additional arguments passed to :func:`sklearn.utils.check_array`. Returns ------- X : array, shape=[n_samples, n_features] Feature vectors. event : array, shape=[n_samples,], dtype=bool Binary event indicator. time : array, shape=[n_samples,], dtype=float Time of event or censoring.
def get_dataset(self, dataset_ref, retry=DEFAULT_RETRY): if isinstance(dataset_ref, str): dataset_ref = DatasetReference.from_string( dataset_ref, default_project=self.project ) api_response = self._call_api(retry, method="GET", path=dataset_ref.path) return Dataset.from_api_repr(api_response)
Fetch the dataset referenced by ``dataset_ref`` Args: dataset_ref (Union[ \ :class:`~google.cloud.bigquery.dataset.DatasetReference`, \ str, \ ]): A reference to the dataset to fetch from the BigQuery API. If a string is passed in, this method attempts to create a dataset reference from a string using :func:`~google.cloud.bigquery.dataset.DatasetReference.from_string`. retry (:class:`google.api_core.retry.Retry`): (Optional) How to retry the RPC. Returns: google.cloud.bigquery.dataset.Dataset: A ``Dataset`` instance.
def _set_scripts(self, host_metadata, scripts): scripts_key = 'deploy-scripts' if 'ovirt-scritps' in host_metadata: scripts_key = 'ovirt-scripts' host_metadata[scripts_key] = scripts return host_metadata
Temporary method to set the host scripts TODO: remove once the "ovirt-scripts" option gets deprecated Args: host_metadata(dict): host metadata to set scripts in Returns: dict: the updated metadata
def get_quotes(self): mask = "mask[order[id,items[id,package[id,keyName]]]]" quotes = self.client['Account'].getActiveQuotes(mask=mask) return quotes
Retrieve a list of active quotes. :returns: a list of SoftLayer_Billing_Order_Quote
def render_latex_text(input_text, nest_in_doc=False, preamb_extra=None, appname='utool', verbose=None): import utool as ut if verbose is None: verbose = ut.VERBOSE dpath = ut.ensure_app_resource_dir(appname, 'latex_tmp') fname = 'temp_render_latex' pdf_fpath = ut.compile_latex_text( input_text, dpath=dpath, fname=fname, preamb_extra=preamb_extra, verbose=verbose) ut.startfile(pdf_fpath) return pdf_fpath
compiles latex and shows the result
def sendto(self, data, addr, flags=0): return self.llc.sendto(self._tco, data, addr, flags)
Send data to the socket. The socket should not be connected to a remote socket, since the destination socket is specified by addr. Returns a boolean value that indicates success or failure. Failure to send is generally an indication that the socket was closed.
def get_grade_entries_for_gradebook_column_on_date(self, gradebook_column_id, from_, to): grade_entry_list = [] for grade_entry in self.get_grade_entries_for_gradebook_column(gradebook_column_id): if overlap(from_, to, grade_entry.start_date, grade_entry.end_date): grade_entry_list.append(grade_entry) return objects.GradeEntryList(grade_entry_list, runtime=self._runtime)
Gets a ``GradeEntryList`` for the given gradebook column and effective during the entire given date range inclusive but not confined to the date range. arg: gradebook_column_id (osid.id.Id): a gradebook column ``Id`` arg: from (osid.calendaring.DateTime): start of date range arg: to (osid.calendaring.DateTime): end of date range return: (osid.grading.GradeEntryList) - the returned ``GradeEntry`` list raise: InvalidArgument - ``from`` is greater than ``to`` raise: NullArgument - ``gradebook_column_id, from, or to`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def set_jinja2_silent_none(config): config.commit() jinja2_env = config.get_jinja2_environment() jinja2_env.finalize = _silent_none
if variable is None print '' instead of 'None'
def quadrant(xcoord, ycoord): xneg = bool(xcoord < 0) yneg = bool(ycoord < 0) if xneg is True: if yneg is False: return 2 return 3 if yneg is False: return 1 return 4
Find the quadrant a pair of coordinates are located in :type xcoord: integer :param xcoord: The x coordinate to find the quadrant for :type ycoord: integer :param ycoord: The y coordinate to find the quadrant for
def _mysql_aes_key(key): final_key = bytearray(16) for i, c in enumerate(key): final_key[i % 16] ^= key[i] if PY3 else ord(key[i]) return bytes(final_key)
Format key.
def Parse(self, rdf_data): if not isinstance(rdf_data, (list, set)): raise ProcessingError("Bad host data format: %s" % type(rdf_data)) if self.baseline: comparison = self.baseliner.Parse(rdf_data) else: comparison = rdf_data found = self.handler.Parse(comparison) results = self.hint.Render(found) return self.matcher.Detect(comparison, results)
Process rdf data through filters. Test if results match expectations. Processing of rdf data is staged by a filter handler, which manages the processing of host data. The output of the filters are compared against expected results. Args: rdf_data: An list containing 0 or more rdf values. Returns: An anomaly if data didn't match expectations. Raises: ProcessingError: If rdf_data is not a handled type.
def auto_name_prefix(self): native_system = std_platform.system() native_machine = self.CPU_ALIASES.get(std_platform.machine(), std_platform.machine()) if native_system == self.system and native_machine == self.machine: return '' platform = { 'linux': 'linux32', 'android-api-16': 'android-arm', 'android-aarch64': 'android-arm64', }.get(self.gecko_platform, self.gecko_platform) return platform + '-'
Generate platform prefix for cross-platform downloads.
def force_list(value, min=None, max=None): if not isinstance(value, (list, tuple)): value = [value] return is_list(value, min, max)
Check that a value is a list, coercing strings into a list with one member. Useful where users forget the trailing comma that turns a single value into a list. You can optionally specify the minimum and maximum number of members. A minumum of greater than one will fail if the user only supplies a string. >>> vtor = Validator() >>> vtor.check('force_list', ()) [] >>> vtor.check('force_list', []) [] >>> vtor.check('force_list', 'hello') ['hello']
def _suffix(self): _output_formats={'GCG':'.msf', 'GDE':'.gde', 'PHYLIP':'.phy', 'PIR':'.pir', 'NEXUS':'.nxs'} if self.Parameters['-output'].isOn(): return _output_formats[self.Parameters['-output'].Value] else: return '.aln'
Return appropriate suffix for alignment file
def critical(self, msg, *args, **kwargs) -> Task: return self._make_log_task(logging.CRITICAL, msg, args, **kwargs)
Log msg with severity 'CRITICAL'. To pass exception information, use the keyword argument exc_info with a true value, e.g. await logger.critical("Houston, we have a major disaster", exc_info=1)
def read(*paths): with open(os.path.join(os.path.dirname(__file__), *paths)) as fp: return fp.read()
read and return txt content of file
def _load_records(self, record_type_idstrs): for record_type_idstr in record_type_idstrs: try: self._init_record(record_type_idstr) except (ImportError, KeyError): pass
Loads query records
def get_sigma(x, min_limit=-np.inf, max_limit=np.inf): z = np.append(x, [min_limit, max_limit]) sigma = np.ones(x.shape) for i in range(x.size): xleft = z[np.argmin([(x[i] - k) if k < x[i] else np.inf for k in z])] xright = z[np.argmin([(k - x[i]) if k > x[i] else np.inf for k in z])] sigma[i] = max(x[i] - xleft, xright - x[i]) if sigma[i] == np.inf: sigma[i] = min(x[i] - xleft, xright - x[i]) if (sigma[i] == -np.inf): sigma[i] = 1.0 return sigma
Compute the standard deviations around the points for a 1D GMM. We take the distance from the nearest left and right neighbors for each point, then use the max as the estimate of standard deviation for the gaussian mixture around that point. Arguments --------- x : 1D array Set of points to create the GMM min_limit : Optional[float], default : -inf Minimum limit for the distribution max_limit : Optional[float], default : inf maximum limit for the distribution Returns ------- 1D array Array of standard deviations
def discretize(self, method, *args, **kwargs): return method(self, *args, **kwargs).get_discrete_values()
Discretizes the continuous distribution into discrete probability masses using various methods. Parameters ---------- method : A Discretizer Class from pgmpy.discretize *args, **kwargs: The parameters to be given to the Discretizer Class. Returns ------- An n-D array or a DiscreteFactor object according to the discretiztion method used. Examples -------- >>> import numpy as np >>> from scipy.special import beta >>> from pgmpy.factors.continuous import ContinuousFactor >>> from pgmpy.factors.continuous import RoundingDiscretizer >>> def dirichlet_pdf(x, y): ... return (np.power(x, 1) * np.power(y, 2)) / beta(x, y) >>> dirichlet_factor = ContinuousFactor(['x', 'y'], dirichlet_pdf) >>> dirichlet_factor.discretize(RoundingDiscretizer, low=1, high=2, cardinality=5) # TODO: finish this
def machine(self): if not self._ptr: raise BfdException("BFD not initialized") return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.FLAVOUR)
Return the flavour attribute of the BFD file being processed.
def flatFieldFromFunction(self): fitimg, mask = self._prepare() mask = ~mask s0, s1 = fitimg.shape guess = (s1 * 0.7, 0, 1, s0 / 2, s1 / 2) fn = lambda xy, f, alpha, fx, cx, cy: vignetting((xy[0] * fx, xy[1]), f, alpha, cx=cx, cy=cy) flatfield = fit2dArrayToFn(fitimg, fn, mask=mask, guess=guess, output_shape=self._orig_shape)[0] return flatfield, self.bglevel / self._n, fitimg, mask
calculate flatField from fitting vignetting function to averaged fit-image returns flatField, average background level, fitted image, valid indices mask
def get_personal_info(user): num_phones = len(user.phones.all() or []) num_emails = len(user.emails.all() or []) num_websites = len(user.websites.all() or []) personal_info = {} for i in range(num_phones): personal_info["phone_{}".format(i)] = user.phones.all()[i] for i in range(num_emails): personal_info["email_{}".format(i)] = user.emails.all()[i] for i in range(num_websites): personal_info["website_{}".format(i)] = user.websites.all()[i] num_fields = {"phones": num_phones, "emails": num_emails, "websites": num_websites} return personal_info, num_fields
Get a user's personal info attributes to pass as an initial value to a PersonalInformationForm.
def _generate_edges(self, edge_prob): self.E, self.parent = [], {} for i in range(self.m): if random() < edge_prob and i > 0: p_i = choice(i) self.E.append((p_i, i)) self.parent[i] = p_i
Generate a random tree-structured dependency graph based on a specified edge probability. Also create helper data struct mapping child -> parent.
def accept(self): if self._can_settle_message(): self._response = errors.MessageAccepted() self._settler(self._response) self.state = constants.MessageState.ReceivedSettled return True return False
Send a response disposition to the service to indicate that a received message has been accepted. If the client is running in PeekLock mode, the service will wait on this disposition. Otherwise it will be ignored. Returns `True` is message was accepted, or `False` if the message was already settled. :rtype: bool :raises: TypeError if the message is being sent rather than received.
def get_heroku_connect_models(): from django.apps import apps apps.check_models_ready() from heroku_connect.db.models import HerokuConnectModel return ( model for models in apps.all_models.values() for model in models.values() if issubclass(model, HerokuConnectModel) and not model._meta.managed )
Return all registered Heroku Connect Models. Returns: (Iterator): All registered models that are subclasses of `.HerokuConnectModel`. Abstract models are excluded, since they are not registered.
def get_token(wallet: 'Wallet', token_str: str) -> 'NEP5Token.NEP5Token': if token_str.startswith('0x'): token_str = token_str[2:] token = None for t in wallet.GetTokens().values(): if token_str in [t.symbol, t.ScriptHash.ToString()]: token = t break if not isinstance(token, NEP5Token.NEP5Token): raise ValueError("The given token argument does not represent a known NEP5 token") return token
Try to get a NEP-5 token based on the symbol or script_hash Args: wallet: wallet instance token_str: symbol or script_hash (accepts script hash with or without 0x prefix) Raises: ValueError: if token is not found Returns: NEP5Token instance if found.
def percentile(sorted_list, percent, key=lambda x: x): if not sorted_list: return None if percent == 1: return float(sorted_list[-1]) if percent == 0: return float(sorted_list[0]) n = len(sorted_list) i = percent * n if ceil(i) == i: i = int(i) return (sorted_list[i-1] + sorted_list[i]) / 2 return float(sorted_list[ceil(i)-1])
Find the percentile of a sorted list of values. Arguments --------- sorted_list : list A sorted (ascending) list of values. percent : float A float value from 0.0 to 1.0. key : function, optional An optional function to compute a value from each element of N. Returns ------- float The desired percentile of the value list. Examples -------- >>> sorted_list = [4,6,8,9,11] >>> percentile(sorted_list, 0.4) 7.0 >>> percentile(sorted_list, 0.44) 8.0 >>> percentile(sorted_list, 0.6) 8.5 >>> percentile(sorted_list, 0.99) 11.0 >>> percentile(sorted_list, 1) 11.0 >>> percentile(sorted_list, 0) 4.0
def get_template(self, context, **kwargs): if 'template' in kwargs['params']: self.template = kwargs['params']['template'] return super(GoscaleTemplateInclusionTag, self).get_template(context, **kwargs)
Returns the template to be used for the current context and arguments.
def rename(self, from_, to): blueprint = self._create_blueprint(from_) blueprint.rename(to) self._build(blueprint)
Rename a table on the schema.
def total_num_lines(self): return sum([len(summary.measured_lines) for summary in self._diff_violations().values()])
Return the total number of lines in the diff for which we have coverage info.
def sense_ttb(self, target): info = "{device} does not support sense for Type B Target" raise nfc.clf.UnsupportedTargetError(info.format(device=self))
Sense for a Type B Target is not supported.
def get_platform_by_name(self, name, for_target=None): if not name: return self.default_platform if name not in self.platforms_by_name: raise self.UndefinedJvmPlatform(for_target, name, self.platforms_by_name) return self.platforms_by_name[name]
Finds the platform with the given name. If the name is empty or None, returns the default platform. If not platform with the given name is defined, raises an error. :param str name: name of the platform. :param JvmTarget for_target: optionally specified target we're looking up the platform for. Only used in error message generation. :return: The jvm platform object. :rtype: JvmPlatformSettings
def visit_nonlocal(self, node, parent): return nodes.Nonlocal( node.names, getattr(node, "lineno", None), getattr(node, "col_offset", None), parent, )
visit a Nonlocal node and return a new instance of it
def profile(schemaname='sensordata', profiletype='pjs'): db_log("Profiling ", schemaname) schema = schemastore[schemaname]['schema'] db_log("Schema: ", schema, lvl=debug) testclass = None if profiletype == 'warmongo': db_log("Running Warmongo benchmark") testclass = warmongo.model_factory(schema) elif profiletype == 'pjs': db_log("Running PJS benchmark") try: import python_jsonschema_objects as pjs except ImportError: db_log("PJS benchmark selected but not available. Install " "python_jsonschema_objects (PJS)") return db_log() builder = pjs.ObjectBuilder(schema) ns = builder.build_classes() pprint(ns) testclass = ns[schemaname] db_log("ns: ", ns, lvl=warn) if testclass is not None: db_log("Instantiating elements...") for i in range(100): testclass() else: db_log("No Profiletype available!") db_log("Profiling done")
Profiles object model handling with a very simple benchmarking test
def _get_api_dependencies_of(name, version='', force=False): m, d = _get_metadap_dap(name, version=version) if not force and not _is_supported_here(d): raise DapiLocalError( '{0} is not supported on this platform (use --force to suppress this check).'. format(name)) return d.get('dependencies', [])
Returns list of first level dependencies of the given dap from Dapi
def array(self) -> numpy.ndarray: array = numpy.full(self.shape, fillvalue, dtype=float) for idx, (descr, subarray) in enumerate(self.arrays.items()): sequence = self.sequences[descr] array[self.get_slices(idx, sequence.shape)] = subarray return array
The series data of all logged |IOSequence| objects contained in one single |numpy.ndarray|. The documentation on |NetCDFVariableDeep.shape| explains how |NetCDFVariableDeep.array| is structured. The first example confirms that, for the default configuration, the first axis definces the location, while the second one defines time: >>> from hydpy.core.examples import prepare_io_example_1 >>> nodes, elements = prepare_io_example_1() >>> from hydpy.core.netcdftools import NetCDFVariableDeep >>> ncvar = NetCDFVariableDeep('input_nied', isolate=False, timeaxis=1) >>> for element in elements: ... nied1 = element.model.sequences.inputs.nied ... ncvar.log(nied1, nied1.series) >>> ncvar.array array([[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.]]) For higher dimensional sequences, |NetCDFVariableDeep.array| can contain missing values. Such missing values show up for some fiels of the second example element, which defines only two hydrological response units instead of three: >>> ncvar = NetCDFVariableDeep('flux_nkor', isolate=False, timeaxis=1) >>> for element in elements: ... nkor1 = element.model.sequences.fluxes.nkor ... ncvar.log(nkor1, nkor1.series) >>> ncvar.array[1] array([[ 16., 17., nan], [ 18., 19., nan], [ 20., 21., nan], [ 22., 23., nan]]) When using the first axis for time (`timeaxis=0`) the same data can be accessed with slightly different indexing: >>> ncvar = NetCDFVariableDeep('flux_nkor', isolate=False, timeaxis=0) >>> for element in elements: ... nkor1 = element.model.sequences.fluxes.nkor ... ncvar.log(nkor1, nkor1.series) >>> ncvar.array[:, 1] array([[ 16., 17., nan], [ 18., 19., nan], [ 20., 21., nan], [ 22., 23., nan]])
def dbmax50years(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `dbmax50years`'.format(value)) self._dbmax50years = value
Corresponds to IDD Field `dbmax50years` 50-year return period values for maximum extreme dry-bulb temperature Args: value (float): value for IDD Field `dbmax50years` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
def _escape_string(text, _map={}): if isinstance(text, str): text = text.encode('ascii') assert isinstance(text, (bytes, bytearray)) if not _map: for ch in range(256): if ch in _VALID_CHARS: _map[ch] = chr(ch) else: _map[ch] = '\\%02x' % ch if six.PY2: _map[chr(ch)] = _map[ch] buf = [_map[ch] for ch in text] return ''.join(buf)
Escape the given bytestring for safe use as a LLVM array constant.
def load_lst(self): with open(self._lst_file, 'r') as fd: lines = fd.readlines() idx, uname, fname = list(), list(), list() for line in lines: values = line.split(',') values = [x.strip() for x in values] idx.append(int(values[0])) uname.append(values[1]) fname.append(values[2]) self._idx = idx self._fname = fname self._uname = uname
Load the lst file into internal data structures
async def monitor_start(self, monitor): cfg = self.cfg if (not platform.has_multiprocessing_socket or cfg.concurrency == 'thread'): cfg.set('workers', 0) servers = await self.binds(monitor) if not servers: raise ImproperlyConfigured('Could not open a socket. ' 'No address to bind to') addresses = [] for server in servers.values(): addresses.extend(server.addresses) self.cfg.addresses = addresses
Create the socket listening to the ``bind`` address. If the platform does not support multiprocessing sockets set the number of workers to 0.
def insert(self, idx, w, comment=''): if idx >= self.count(): self.add(w, comment) return if idx < 0: return w = copy.copy(w) if comment: w.comment = comment w.seq = idx self.wpoints.insert(idx, w) self.last_change = time.time() self.reindex()
insert a waypoint
def validate_checksum( filename, md5sum ): filename = match_filename( filename ) md5_hash = file_md5( filename=filename ) if md5_hash != md5sum: raise ValueError('md5 checksums are inconsistent: {}'.format( filename ))
Compares the md5 checksum of a file with an expected value. If the calculated and expected checksum values are not equal, ValueError is raised. If the filename `foo` is not found, will try to read a gzipped file named `foo.gz`. In this case, the checksum is calculated for the unzipped file. Args: filename (str): Path for the file to be checksummed. md5sum (str): The expected hex checksum. Returns: None
def _killall(self, force=False): for_termination = [] for n, p in iteritems(self._processes): if 'returncode' not in p: for_termination.append(n) for n in for_termination: p = self._processes[n] signame = 'SIGKILL' if force else 'SIGTERM' self._system_print("sending %s to %s (pid %s)\n" % (signame, n, p['pid'])) if force: self._env.kill(p['pid']) else: self._env.terminate(p['pid'])
Kill all remaining processes, forcefully if requested.
def CharacterData(self, data): if data.strip(): data = data.encode() if not self.data: self.data = data else: self.data += data
Expat character data event handler
def time_to_number(self, time): if not isinstance(time, datetime.time): raise TypeError(time) return ((time.second / 60.0 + time.minute) / 60.0 + time.hour) / 24.0
Converts a time instance to a corresponding float value.
def _retry_get(self, uri): for i in six.moves.range(DEFAULT_RETRY): resp, body = self.api.method_get(uri) if body: return resp, body raise exc.ServiceResponseFailure("The Cloud DNS service failed to " "respond to the request.")
Handles GET calls to the Cloud DNS API in order to retry on empty body responses.
def get_all_info(pdb_id): out = to_dict( get_info(pdb_id) )['molDescription']['structureId'] out = remove_at_sign(out) return out
A wrapper for get_info that cleans up the output slighly Parameters ---------- pdb_id : string A 4 character string giving a pdb entry of interest Returns ------- out : dict A dictionary containing all the information stored in the entry Examples -------- >>> all_info = get_all_info('4lza') >>> print(all_info) {'polymer': {'macroMolecule': {'@name': 'Adenine phosphoribosyltransferase', ' accession': {'@id': 'B0K969'}}, '@entityNr': '1', '@type': 'protein', 'polymerDescription': {'@description': 'Adenine phosphoribosyltransferase'}, 'synonym': {'@name': 'APRT'}, '@length': '195', 'enzClass': {'@ec': '2.4.2.7'}, 'chain': [{'@id': 'A'}, {'@id': 'B'}], 'Taxonomy': {'@name': 'Thermoanaerobacter pseudethanolicus ATCC 33223', '@id': '340099'}, '@weight': '22023.9'}, 'id': '4LZA'} >>> results = get_all_info('2F5N') >>> first_polymer = results['polymer'][0] >>> first_polymer['polymerDescription'] {'@description': "5'-D(*AP*GP*GP*TP*AP*GP*AP*CP*CP*TP*GP*GP*AP*CP*GP*C)-3'"}
def url(viewname, *args, **kwargs): return reverse(viewname, args=args, kwargs=kwargs)
Helper for Django's ``reverse`` in templates.
def get_launch_config(self, scaling_group): key_map = { "OS-DCF:diskConfig": "disk_config", "flavorRef": "flavor", "imageRef": "image", } uri = "/%s/%s/launch" % (self.uri_base, utils.get_id(scaling_group)) resp, resp_body = self.api.method_get(uri) ret = {} data = resp_body.get("launchConfiguration") ret["type"] = data.get("type") args = data.get("args", {}) ret["load_balancers"] = args.get("loadBalancers") for key, value in args.get("server", {}).items(): norm_key = key_map.get(key, key) ret[norm_key] = value return ret
Returns the launch configuration for the specified scaling group.
def run(args): raw_arguments = get_arguments(args[1:]) process_arguments(raw_arguments) walk.run() return True
Process command line arguments and walk inputs.
def get_description(self): if self._description: return self._description try: trailerURL= "http://trailers.apple.com%s" % self.baseURL response = urllib.request.urlopen(trailerURL) Reader = codecs.getreader("utf-8") responseReader = Reader(response) trailerHTML = responseReader.read() description = re.search('<meta *name="Description" *content="(.*?)" *[/]*>' ,trailerHTML) if description: self._description = description.group(1) else: self._description = "None" except: self._description = "Error" return self._description
Returns description text as provided by the studio
def _evaluate_tempyREPR(self, child, repr_cls): score = 0 if repr_cls.__name__ == self.__class__.__name__: score += 1 elif repr_cls.__name__ == self.root.__class__.__name__: score += 1 for parent_cls in _filter_classes(repr_cls.__mro__[1:], TempyPlace): for scorer in ( method for method in dir(parent_cls) if method.startswith("_reprscore") ): score += getattr(parent_cls, scorer, lambda *args: 0)( parent_cls, self, child ) return score
Assign a score ito a TempyRepr class. The scores depends on the current scope and position of the object in which the TempyREPR is found.
def _eb_env_tags(envs, session_factory, retry): client = local_session(session_factory).client('elasticbeanstalk') def process_tags(eb_env): try: eb_env['Tags'] = retry( client.list_tags_for_resource, ResourceArn=eb_env['EnvironmentArn'])['ResourceTags'] except client.exceptions.ResourceNotFoundException: return return eb_env return list(map(process_tags, envs))
Augment ElasticBeanstalk Environments with their tags.
def _TryPrintAsAnyMessage(self, message): packed_message = _BuildMessageFromTypeName(message.TypeName(), self.descriptor_pool) if packed_message: packed_message.MergeFromString(message.value) self.out.write('%s[%s]' % (self.indent * ' ', message.type_url)) self._PrintMessageFieldValue(packed_message) self.out.write(' ' if self.as_one_line else '\n') return True else: return False
Serializes if message is a google.protobuf.Any field.
def run(command, show=True, *args, **kwargs): if show: print_command(command) with hide("running"): return _run(command, *args, **kwargs)
Runs a shell comand on the remote server.
def push_to_server(self, data): output = add_profile(self.customer.pk, data, data) output['response'].raise_if_error() self.profile_id = output['profile_id'] self.payment_profile_ids = output['payment_profile_ids']
Create customer profile for given ``customer`` on Authorize.NET
def _adjust_scrollbar(self, f): hb = self.horizontalScrollBar() hb.setValue(int(f * hb.value() + ((f - 1) * hb.pageStep()/2))) vb = self.verticalScrollBar() vb.setValue(int(f * vb.value() + ((f - 1) * vb.pageStep()/2)))
Adjust the scrollbar position to take into account the zooming of the figure.
def _difference(self, original_keys, updated_keys, name, item_index): original_keys = set(original_keys) updated_keys = set(updated_keys) added_keys = updated_keys.difference(original_keys) removed_keys = set() if name is None: removed_keys = original_keys.difference(updated_keys) elif name not in updated_keys and name in original_keys: removed_keys = set([name]) for key in removed_keys: if key in item_index: del(item_index[key]) for key in updated_keys.difference(added_keys.union(removed_keys)): if item_index[key].get('_changed'): item_index[key]['_changed'] = False removed_keys.add(key) added_keys.add(key) return added_keys, removed_keys
Calculate difference between the original and updated sets of keys. Removed items will be removed from item_index, new items should have been added by the discovery process. (?help or ?sensor-list) This method is for use in inspect_requests and inspect_sensors only. Returns ------- (added, removed) added : set of str Names of the keys that were added removed : set of str Names of the keys that were removed
def unwrap(self): return [v.unwrap() if hasattr(v, 'unwrap') and not hasattr(v, 'no_unwrap') else v for v in self]
Return a deep copy of myself as a list, and unwrap any wrapper objects in me.
def _dictify(field, value): if value is None: return None elif field.type_: return value.to_dict() return field.dict_value(value)
Make `value` suitable for a dictionary. * If `value` is an Entity, call to_dict() on it. * If value is a timestamp, turn it into a string value. * If none of the above are satisfied, return the input value
def set_metadata_cache(cache): global _METADATA_CACHE if _METADATA_CACHE and _METADATA_CACHE.is_open: _METADATA_CACHE.close() _METADATA_CACHE = cache
Sets the metadata cache object to use.
def find(self, filter=None, page=1, per_page=10, fields=None, context=None): if filter is None: filter = [] rv = self.client.session.get( self.path, params={ 'filter': dumps(filter or []), 'page': page, 'per_page': per_page, 'field': fields, 'context': dumps(context or self.client.context), } ) response_received.send(rv) return rv
Find records that match the filter. Pro Tip: The fields could have nested fields names if the field is a relationship type. For example if you were looking up an order and also want to get the shipping address country then fields would be: `['shipment_address', 'shipment_address.country']` but country in this case is the ID of the country which is not very useful if you don't already have a map. You can fetch the country code by adding `'shipment_address.country.code'` to the fields. :param filter: A domain expression (Refer docs for domain syntax) :param page: The page to fetch to get paginated results :param per_page: The number of records to fetch per page :param fields: A list of field names to fetch. :param context: Any overrides to the context.
def next(self): if self.is_train: data, labels = self.sample_train_batch() else: if self.test_count * self.batch_size < len(self.test_image_files): data, labels = self.get_test_batch() self.test_count += 1 else: self.test_count = 0 raise StopIteration return mx.io.DataBatch(data=[data], label=[labels])
Return a batch.
def get_frontend_node(self): if self.ssh_to: if self.ssh_to in self.nodes: cls = self.nodes[self.ssh_to] if cls: return cls[0] else: log.warning( "preferred `ssh_to` `%s` is empty: unable to " "get the choosen frontend node from that class.", self.ssh_to) else: raise NodeNotFound( "Invalid ssh_to `%s`. Please check your " "configuration file." % self.ssh_to) for cls in sorted(self.nodes.keys()): if self.nodes[cls]: return self.nodes[cls][0] raise NodeNotFound("Unable to find a valid frontend: " "cluster has no nodes!")
Returns the first node of the class specified in the configuration file as `ssh_to`, or the first node of the first class in alphabetic order. :return: :py:class:`Node` :raise: :py:class:`elasticluster.exceptions.NodeNotFound` if no valid frontend node is found
def get_cameras_properties(self): resource = "cameras" resource_event = self.publish_and_get_event(resource) if resource_event: self._last_refresh = int(time.time()) self._camera_properties = resource_event.get('properties')
Return camera properties.
def set_timeout(self, timeout): try: timeout = float(timeout) assert timeout>=0 assert timeout>=self.__wait except: raise Exception('timeout must be a positive number bigger than wait') self.__timeout = timeout
set the timeout limit. :Parameters: #. timeout (number): The maximum delay or time allowed to successfully set the lock. When timeout is exhausted before successfully setting the lock, the lock ends up not acquired.
def update_tabs_text(self): try: for index, fname in enumerate(self.filenames): client = self.clients[index] if fname: self.rename_client_tab(client, self.disambiguate_fname(fname)) else: self.rename_client_tab(client, None) except IndexError: pass
Update the text from the tabs.
def branches(self): if self._branches is None: cmd = 'git branch --contains {}'.format(self.sha1) out = shell.run( cmd, capture=True, never_pretend=True ).stdout.strip() self._branches = [x.strip('* \t\n') for x in out.splitlines()] return self._branches
List of all branches this commit is a part of.
def get_list(self, list_id): return List(tweepy_list_to_json(self._client.get_list(list_id=list_id)))
Get info of specified list :param list_id: list ID number :return: :class:`~responsebot.models.List` object
def _inter_df_op_handler(self, func, other, **kwargs): axis = kwargs.get("axis", 0) axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0 if isinstance(other, type(self)): return self._inter_manager_operations( other, "outer", lambda x, y: func(x, y, **kwargs) ) else: return self._scalar_operations( axis, other, lambda df: func(df, other, **kwargs) )
Helper method for inter-manager and scalar operations. Args: func: The function to use on the Manager/scalar. other: The other Manager/scalar. Returns: New DataManager with new data and index.
def setCol(self, x, l): for i in xrange(0, self.__size): self.setCell(x, i, l[i])
set the x-th column, starting at 0
def read_args_tool(toolkey, example_parameters, tool_add_args=None): import scanpy as sc p = default_tool_argparser(help(toolkey), example_parameters) if tool_add_args is None: p = add_args(p) else: p = tool_add_args(p) args = vars(p.parse_args()) args = settings.process_args(args) return args
Read args for single tool.
def image_exists(self, id=None, tag=None): exists = False if id and self.image_by_id(id): exists = True elif tag and self.image_by_tag(tag): exists = True return exists
Check if specified image exists
def space_search(args): r = fapi.list_workspaces() fapi._check_response_code(r, 200) workspaces = r.json() extra_terms = [] if args.bucket: workspaces = [w for w in workspaces if re.search(args.bucket, w['workspace']['bucketName'])] extra_terms.append('bucket') pretty_spaces = [] for space in workspaces: ns = space['workspace']['namespace'] ws = space['workspace']['name'] pspace = ns + '/' + ws pspace += '\t' + space['workspace']['bucketName'] pretty_spaces.append(pspace) return sorted(pretty_spaces, key=lambda s: s.lower())
Search for workspaces matching certain criteria
def _trace (frame, event, arg): if event in ('call', 'c_call'): _trace_line(frame, event, arg) elif event in ('return', 'c_return'): _trace_line(frame, event, arg) print(" return:", arg) return _trace
Trace function calls.
def match_contains(self, el, contains): match = True content = None for contain_list in contains: if content is None: content = self.get_text(el, no_iframe=self.is_html) found = False for text in contain_list.text: if text in content: found = True break if not found: match = False return match
Match element if it contains text.
def remove(self): if self.parent is not None: for i, child in enumerate(self.parent.children): if id(child) == id(self): self.parent.remove_child(i) self.parent = None break
Remove this node from the list of children of its current parent, if the current parent is not ``None``, otherwise do nothing. .. versionadded:: 1.7.0
def hook_wnd_proc(self): self.__local_wnd_proc_wrapped = WndProcType(self.local_wnd_proc) self.__old_wnd_proc = SetWindowLong(self.__local_win_handle, GWL_WNDPROC, self.__local_wnd_proc_wrapped)
Attach to OS Window message handler
def _connected_pids(self, from_parent=True): to_pid = aliased(PersistentIdentifier, name='to_pid') if from_parent: to_relation = PIDRelation.child_id from_relation = PIDRelation.parent_id else: to_relation = PIDRelation.parent_id from_relation = PIDRelation.child_id query = PIDQuery( [to_pid], db.session(), _filtered_pid_class=to_pid ).join( PIDRelation, to_pid.id == to_relation ) if isinstance(self.pid, PersistentIdentifier): query = query.filter(from_relation == self.pid.id) else: from_pid = aliased(PersistentIdentifier, name='from_pid') query = query.join( from_pid, from_pid.id == from_relation ).filter( from_pid.pid_value == self.pid.pid_value, from_pid.pid_type == self.pid.pid_type, ) return query
Follow a relationship to find connected PIDs.abs. :param from_parent: search children from the current pid if True, else search for its parents. :type from_parent: bool