code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _getFromDate(l, date): try: date = _toDate(date) i = _insertDateIndex(date, l) - 1 if i == -1: return l[0] return l[i] except (ValueError, TypeError): return l[0]
returns the index of given or best fitting date
def _fill_col_borders(self): first = True last = True if self.col_indices[0] == self.hcol_indices[0]: first = False if self.col_indices[-1] == self.hcol_indices[-1]: last = False for num, data in enumerate(self.tie_data): self.tie_data[num] = self._extrapolate_cols(data, first, last) if first and last: self.col_indices = np.concatenate((np.array([self.hcol_indices[0]]), self.col_indices, np.array([self.hcol_indices[-1]]))) elif first: self.col_indices = np.concatenate((np.array([self.hcol_indices[0]]), self.col_indices)) elif last: self.col_indices = np.concatenate((self.col_indices, np.array([self.hcol_indices[-1]])))
Add the first and last column to the data by extrapolation.
def from_gpx(gpx_track_point): return Point( lat=gpx_track_point.latitude, lon=gpx_track_point.longitude, time=gpx_track_point.time )
Creates a point from GPX representation Arguments: gpx_track_point (:obj:`gpxpy.GPXTrackPoint`) Returns: :obj:`Point`
def create_eager_metrics_for_problem(problem, model_hparams): metric_fns = problem.eval_metric_fns(model_hparams) problem_hparams = problem.get_hparams(model_hparams) target_modality = problem_hparams.modality["targets"] weights_fn = model_hparams.weights_fn.get( "targets", modalities.get_weights_fn(target_modality)) return create_eager_metrics_internal(metric_fns, weights_fn=weights_fn)
See create_eager_metrics.
def echo(msg, *args, **kwargs): file = kwargs.pop('file', None) nl = kwargs.pop('nl', True) err = kwargs.pop('err', False) color = kwargs.pop('color', None) msg = safe_unicode(msg).format(*args, **kwargs) click.echo(msg, file=file, nl=nl, err=err, color=color)
Wraps click.echo, handles formatting and check encoding
def parallel_split_combine(args, split_fn, parallel_fn, parallel_name, combiner, file_key, combine_arg_keys, split_outfile_i=-1): args = [x[0] for x in args] split_args, combine_map, finished_out, extras = _get_split_tasks(args, split_fn, file_key, split_outfile_i) split_output = parallel_fn(parallel_name, split_args) if isinstance(combiner, six.string_types): combine_args, final_args = _organize_output(split_output, combine_map, file_key, combine_arg_keys) parallel_fn(combiner, combine_args) elif callable(combiner): final_args = combiner(split_output, combine_map, file_key) return finished_out + final_args + extras
Split, run split items in parallel then combine to output file. split_fn: Split an input file into parts for processing. Returns the name of the combined output file along with the individual split output names and arguments for the parallel function. parallel_fn: Reference to run_parallel function that will run single core, multicore, or distributed as needed. parallel_name: The name of the function, defined in bcbio.distributed.tasks/multitasks/ipythontasks to run in parallel. combiner: The name of the function, also from tasks, that combines the split output files into a final ready to run file. Can also be a callable function if combining is delayed. split_outfile_i: the location of the output file in the arguments generated by the split function. Defaults to the last item in the list.
async def _close(self): try: if self._hb_inbox_sid is not None: await self._nc.unsubscribe(self._hb_inbox_sid) self._hb_inbox = None self._hb_inbox_sid = None if self._ack_subject_sid is not None: await self._nc.unsubscribe(self._ack_subject_sid) self._ack_subject = None self._ack_subject_sid = None except: pass for _, sub in self._sub_map.items(): if sub._msgs_task is not None: sub._msgs_task.cancel() try: await self._nc.unsubscribe(sub.sid) except: continue self._sub_map = {}
Removes any present internal state from the client.
async def unsubscribe(self, topic): if self.socket_type not in {SUB, XSUB}: raise AssertionError( "A %s socket cannot unsubscribe." % self.socket_type.decode(), ) self._subscriptions.remove(topic) tasks = [ asyncio.ensure_future( peer.connection.local_unsubscribe(topic), loop=self.loop, ) for peer in self._peers if peer.connection ] if tasks: try: await asyncio.wait(tasks, loop=self.loop) finally: for task in tasks: task.cancel()
Unsubscribe the socket from the specified topic. :param topic: The topic to unsubscribe from.
def _deploy_and_remember( self, contract_name: str, arguments: List, deployed_contracts: 'DeployedContracts', ) -> Contract: receipt = self.deploy(contract_name, arguments) deployed_contracts['contracts'][contract_name] = _deployed_data_from_receipt( receipt=receipt, constructor_arguments=arguments, ) return self.web3.eth.contract( abi=self.contract_manager.get_contract_abi(contract_name), address=deployed_contracts['contracts'][contract_name]['address'], )
Deploys contract_name with arguments and store the result in deployed_contracts.
def get(self, telescope, band): klass = self._bpass_classes.get(telescope) if klass is None: raise NotDefinedError('bandpass data for %s not defined', telescope) bp = klass() bp.registry = self bp.telescope = telescope bp.band = band return bp
Get a Bandpass object for a known telescope and filter.
def _merge(self, a, b): for k, v in a.items(): if isinstance(v, dict): item = b.setdefault(k, {}) self._merge(v, item) elif isinstance(v, list): item = b.setdefault(k, [{}]) if len(v) == 1 and isinstance(v[0], dict): self._merge(v[0], item[0]) else: b[k] = v else: b[k] = v return b
Merges a into b.
def integer_squareroot(value: int) -> int: if not isinstance(value, int) or isinstance(value, bool): raise ValueError( "Value must be an integer: Got: {0}".format( type(value), ) ) if value < 0: raise ValueError( "Value cannot be negative: Got: {0}".format( value, ) ) with decimal.localcontext() as ctx: ctx.prec = 128 return int(decimal.Decimal(value).sqrt())
Return the integer square root of ``value``. Uses Python's decimal module to compute the square root of ``value`` with a precision of 128-bits. The value 128 is chosen since the largest square root of a 256-bit integer is a 128-bit integer.
def order_transforms(transforms): outputs = set().union(*[t.outputs for t in transforms]) out = [] remaining = [t for t in transforms] while remaining: leftover = [] for t in remaining: if t.inputs.isdisjoint(outputs): out.append(t) outputs -= t.outputs else: leftover.append(t) remaining = leftover return out
Orders transforms to ensure proper chaining. For example, if `transforms = [B, A, C]`, and `A` produces outputs needed by `B`, the transforms will be re-rorderd to `[A, B, C]`. Parameters ---------- transforms : list List of transform instances to order. Outputs ------- list : List of transformed ordered such that forward transforms can be carried out without error.
def add_job(self, id, func, **kwargs): job_def = dict(kwargs) job_def['id'] = id job_def['func'] = func job_def['name'] = job_def.get('name') or id fix_job_def(job_def) return self._scheduler.add_job(**job_def)
Add the given job to the job list and wakes up the scheduler if it's already running. :param str id: explicit identifier for the job (for modifying it later) :param func: callable (or a textual reference to one) to run at the given time
def _find_geophysical_vars(self, ds, refresh=False): if self._geophysical_vars.get(ds, None) and refresh is False: return self._geophysical_vars[ds] self._geophysical_vars[ds] = cfutil.get_geophysical_variables(ds) return self._geophysical_vars[ds]
Returns a list of geophysical variables. Modifies `self._geophysical_vars` :param netCDF4.Dataset ds: An open netCDF dataset :param bool refresh: if refresh is set to True, the cache is invalidated. :rtype: list :return: A list containing strings with geophysical variable names.
def get_template_loader(self, subdir='templates'): if self.request is None: raise ValueError("this method can only be called after the view middleware is run. Check that `django_mako_plus.middleware` is in MIDDLEWARE.") dmp = apps.get_app_config('django_mako_plus') return dmp.engine.get_template_loader(self.app, subdir)
App-specific function to get the current app's template loader
def get_data(self): data = [] ntobj = cx.namedtuple("NtGoCnt", "Depth_Level BP_D MF_D CC_D BP_L MF_L CC_L") cnts = self.get_cnts_levels_depths_recs(set(self.obo.values())) max_val = max(max(dep for dep in cnts['depth']), max(lev for lev in cnts['level'])) for i in range(max_val+1): vals = [i] + [cnts[desc][i][ns] for desc in cnts for ns in self.nss] data.append(ntobj._make(vals)) return data
Collect counts of GO terms at all levels and depths.
def bootstrap_auc(df, col, pred_col, n_bootstrap=1000): scores = np.zeros(n_bootstrap) old_len = len(df) df.dropna(subset=[col], inplace=True) new_len = len(df) if new_len < old_len: logger.info("Dropping NaN values in %s to go from %d to %d rows" % (col, old_len, new_len)) preds = df[pred_col].astype(int) for i in range(n_bootstrap): sampled_counts, sampled_pred = resample(df[col], preds) if is_single_class(sampled_pred, col=pred_col): continue scores[i] = roc_auc_score(sampled_pred, sampled_counts) return scores
Calculate the boostrapped AUC for a given col trying to predict a pred_col. Parameters ---------- df : pandas.DataFrame col : str column to retrieve the values from pred_col : str the column we're trying to predict n_boostrap : int the number of bootstrap samples Returns ------- list : AUCs for each sampling
def decompile(f): co = f.__code__ args, kwonly, varargs, varkwargs = paramnames(co) annotations = f.__annotations__ or {} defaults = list(f.__defaults__ or ()) kw_defaults = f.__kwdefaults__ or {} if f.__name__ == '<lambda>': node = ast.Lambda body = pycode_to_body(co, DecompilationContext(in_lambda=True))[0] extra_kwargs = {} else: node = ast.FunctionDef body = pycode_to_body(co, DecompilationContext(in_function_block=True)) extra_kwargs = { 'decorator_list': [], 'returns': annotations.get('return') } return node( name=f.__name__, args=make_function_arguments( args=args, kwonly=kwonly, varargs=varargs, varkwargs=varkwargs, defaults=defaults, kw_defaults=kw_defaults, annotations=annotations, ), body=body, **extra_kwargs )
Decompile a function. Parameters ---------- f : function The function to decompile. Returns ------- ast : ast.FunctionDef A FunctionDef node that compiles to f.
def _get_prefixes(self): prefixes = { "@": "o", "+": "v", } feature_prefixes = self.server.features.get('PREFIX') if feature_prefixes: modes = feature_prefixes[1:len(feature_prefixes)//2] symbols = feature_prefixes[len(feature_prefixes)//2+1:] prefixes = dict(zip(symbols, modes)) return prefixes
Get the possible nick prefixes and associated modes for a client.
def set_level_for_logger_and_its_handlers(log: logging.Logger, level: int) -> None: log.setLevel(level) for h in log.handlers: h.setLevel(level)
Set a log level for a log and all its handlers. Args: log: log to modify level: log level to set
def workspace(show_values: bool = True, show_types: bool = True): r = _get_report() data = {} for key, value in r.project.shared.fetch(None).items(): if key.startswith('__cauldron_'): continue data[key] = value r.append_body(render.status(data, values=show_values, types=show_types))
Adds a list of the shared variables currently stored in the project workspace. :param show_values: When true the values for each variable will be shown in addition to their name. :param show_types: When true the data types for each shared variable will be shown in addition to their name.
def blurring_kernel(shape=None): name = 'motionblur.mat' url = URL_CAM + name dct = get_data(name, subset=DATA_SUBSET, url=url) return convert(255 - dct['im'], shape, normalize='sum')
Blurring kernel for convolution simulations. The kernel is scaled to sum to one. Returns ------- An image with the following properties: image type: gray scales size: [100, 100] (if not specified by `size`) scale: [0, 1] type: float64
def get_element_coors(self, ig=None): cc = self.coors n_ep_max = self.n_e_ps.max() coors = nm.empty((self.n_el, n_ep_max, self.dim), dtype=cc.dtype) for ig, conn in enumerate(self.conns): i1, i2 = self.el_offsets[ig], self.el_offsets[ig + 1] coors[i1:i2, :conn.shape[1], :] = cc[conn] return coors
Get the coordinates of vertices elements in group `ig`. Parameters ---------- ig : int, optional The element group. If None, the coordinates for all groups are returned, filled with zeros at places of missing vertices, i.e. where elements having less then the full number of vertices (`n_ep_max`) are. Returns ------- coors : array The coordinates in an array of shape `(n_el, n_ep_max, dim)`.
def verify_indices_all_unique(obj): axis_names = [ ('index',), ('index', 'columns'), ('items', 'major_axis', 'minor_axis') ][obj.ndim - 1] for axis_name, index in zip(axis_names, obj.axes): if index.is_unique: continue raise ValueError( "Duplicate entries in {type}.{axis}: {dupes}.".format( type=type(obj).__name__, axis=axis_name, dupes=sorted(index[index.duplicated()]), ) ) return obj
Check that all axes of a pandas object are unique. Parameters ---------- obj : pd.Series / pd.DataFrame / pd.Panel The object to validate. Returns ------- obj : pd.Series / pd.DataFrame / pd.Panel The validated object, unchanged. Raises ------ ValueError If any axis has duplicate entries.
def delete_resource_scenario(scenario_id, resource_attr_id, quiet=False, **kwargs): _check_can_edit_scenario(scenario_id, kwargs['user_id']) _delete_resourcescenario(scenario_id, resource_attr_id, suppress_error=quiet)
Remove the data associated with a resource in a scenario.
def format(self): if self._format: return self._format elif self.pil_image: return self.pil_image.format
The format of the image file. An uppercase string corresponding to the :attr:`PIL.ImageFile.ImageFile.format` attribute. Valid values include ``"JPEG"`` and ``"PNG"``.
def checkin_bundle(self, db_path, replace=True, cb=None): from ambry.orm.exc import NotFoundError db = Database('sqlite:///{}'.format(db_path)) db.open() if len(db.datasets) == 0: raise NotFoundError("Did not get a dataset in the {} bundle".format(db_path)) ds = db.dataset(db.datasets[0].vid) assert ds is not None assert ds._database try: b = self.bundle(ds.vid) self.logger.info( "Removing old bundle before checking in new one of same number: '{}'" .format(ds.vid)) self.remove(b) except NotFoundError: pass try: self.dataset(ds.vid) except NotFoundError: self.database.copy_dataset(ds, cb=cb) b = self.bundle(ds.vid) b.commit() self.search.index_bundle(b) return b
Add a bundle, as a Sqlite file, to this library
def _get_target_from_package_name(self, target, package_name, file_path): address_path = self.parse_file_path(file_path) if not address_path: return None dep_spec_path = os.path.normpath(os.path.join(target.address.spec_path, address_path)) for dep in target.dependencies: if dep.package_name == package_name and dep.address.spec_path == dep_spec_path: return dep return None
Get a dependent target given the package name and relative file path. This will only traverse direct dependencies of the passed target. It is not necessary to traverse further than that because transitive dependencies will be resolved under the direct dependencies and every direct dependencies is symlinked to the target. Returns `None` if the target does not exist. :param NodePackage target: A subclass of NodePackage :param string package_name: A package.json name that is required to be the same as the target name :param string file_path: Relative filepath from target to the package in the format 'file:<address_path>'
def out(self, obj, formatter=None, out_file=None): if not isinstance(obj, CommandResultItem): raise TypeError('Expected {} got {}'.format(CommandResultItem.__name__, type(obj))) import platform import colorama if platform.system() == 'Windows': out_file = colorama.AnsiToWin32(out_file).stream output = formatter(obj) try: print(output, file=out_file, end='') except IOError as ex: if ex.errno == errno.EPIPE: pass else: raise except UnicodeEncodeError: print(output.encode('ascii', 'ignore').decode('utf-8', 'ignore'), file=out_file, end='')
Produces the output using the command result. The method does not return a result as the output is written straight to the output file. :param obj: The command result :type obj: knack.util.CommandResultItem :param formatter: The formatter we should use for the command result :type formatter: function :param out_file: The file to write output to :type out_file: file-like object
def weld_cast_array(array, weld_type, to_weld_type): if not is_numeric(weld_type) or not is_numeric(to_weld_type): raise TypeError('Cannot cast array of type={} to type={}'.format(weld_type, to_weld_type)) obj_id, weld_obj = create_weld_object(array) weld_template = weld_obj.weld_code = weld_template.format(array=obj_id, type=weld_type, to=to_weld_type) return weld_obj
Cast array to a different type. Parameters ---------- array : numpy.ndarray or WeldObject Input data. weld_type : WeldType Type of each element in the input array. to_weld_type : WeldType Desired type. Returns ------- WeldObject Representation of this computation.
def _get_zipped_rows(self, soup): table = soup.findChildren('table')[2] rows = table.findChildren(['tr'])[:-2] spacing = range(2, len(rows), 3) rows = [row for (i, row) in enumerate(rows) if (i not in spacing)] info = [row for (i, row) in enumerate(rows) if (i % 2 == 0)] detail = [row for (i, row) in enumerate(rows) if (i % 2 != 0)] return zip(info, detail)
Returns all 'tr' tag rows as a list of tuples. Each tuple is for a single story.
def _get_fw(self, msg, updates, req_fw_type=None, req_fw_ver=None): fw_type = None fw_ver = None if not isinstance(updates, tuple): updates = (updates, ) for store in updates: fw_id = store.pop(msg.node_id, None) if fw_id is not None: fw_type, fw_ver = fw_id updates[-1][msg.node_id] = fw_id break if fw_type is None or fw_ver is None: _LOGGER.debug( 'Node %s is not set for firmware update', msg.node_id) return None, None, None if req_fw_type is not None and req_fw_ver is not None: fw_type, fw_ver = req_fw_type, req_fw_ver fware = self.firmware.get((fw_type, fw_ver)) if fware is None: _LOGGER.debug( 'No firmware of type %s and version %s found', fw_type, fw_ver) return None, None, None return fw_type, fw_ver, fware
Get firmware type, version and a dict holding binary data.
def converts_values(self): return self.convert_value is not Formatter.convert_value or \ self.convert_column is not Formatter.convert_column
Whether this Formatter also converts values.
def reindex_axis(self, labels, axis=0, **kwargs): if axis != 0: raise ValueError("cannot reindex series on non-zero axis!") msg = ("'.reindex_axis' is deprecated and will be removed in a future " "version. Use '.reindex' instead.") warnings.warn(msg, FutureWarning, stacklevel=2) return self.reindex(index=labels, **kwargs)
Conform Series to new index with optional filling logic. .. deprecated:: 0.21.0 Use ``Series.reindex`` instead.
def colorize(occurence,maxoccurence,minoccurence): if occurence == maxoccurence: color = (255,0,0) elif occurence == minoccurence: color = (0,0,255) else: color = (int((float(occurence)/maxoccurence*255)),0,int(float(minoccurence)/occurence*255)) return color
A formula for determining colors.
def _GetPkgResources(package_name, filepath): requirement = pkg_resources.Requirement.parse(package_name) try: return pkg_resources.resource_filename(requirement, filepath) except pkg_resources.DistributionNotFound: pkg_resources.working_set = pkg_resources.WorkingSet() try: return pkg_resources.resource_filename(requirement, filepath) except pkg_resources.DistributionNotFound: logging.error("Distribution %s not found. Is it installed?", package_name) return None
A wrapper for the `pkg_resource.resource_filename` function.
def from_inline(cls: Type[UnlockType], inline: str) -> UnlockType: data = Unlock.re_inline.match(inline) if data is None: raise MalformedDocumentError("Inline input") index = int(data.group(1)) parameters_str = data.group(2).split(' ') parameters = [] for p in parameters_str: param = UnlockParameter.from_parameter(p) if param: parameters.append(param) return cls(index, parameters)
Return an Unlock instance from inline string format :param inline: Inline string format :return:
def extract_match(self, list_title_matches): list_title_matches_set = set(list_title_matches) list_title_count = [] for match in list_title_matches_set: list_title_count.append((list_title_matches.count(match), match)) if list_title_count and max(list_title_count)[0] != min(list_title_count)[0]: return max(list_title_count)[1] return None
Extract the title with the most matches from the list. :param list_title_matches: A list, the extracted titles which match with others :return: A string, the most frequently extracted title.
def show_domain_record(self, domain_id, record_id): json = self.request('/domains/%s/records/%s' % (domain_id, record_id), method='GET') status = json.get('status') if status == 'OK': domain_record_json = json.get('record') domain_record = Record.from_json(domain_record_json) return domain_record else: message = json.get('message') raise DOPException('[%s]: %s' % (status, message))
This method returns the specified domain record. Required parameters domain_id: Integer or Domain Name (e.g. domain.com), specifies the domain for which to retrieve a record. record_id: Integer, specifies the record_id to retrieve.
def random(self, namespace=0): query = self.LIST.substitute( WIKI=self.uri, ENDPOINT=self.endpoint, LIST='random') query += "&rnlimit=1&rnnamespace=%d" % namespace emoji = [ u'\U0001f32f', u'\U0001f355', u'\U0001f35c', u'\U0001f363', u'\U0001f369', u'\U0001f36a', u'\U0001f36d', u'\U0001f370', ] action = 'random' if namespace: action = 'random:%d' % namespace self.set_status(action, random.choice(emoji)) return query
Returns query string for random page
def message(self, msg): for broker in self.message_brokers: try: broker(msg) except Exception as exc: utils.error(exc)
Send a message to third party applications
def set_riskfree_rate(self, rf): self.rf = rf self._update(self.prices)
Set annual risk-free rate property and calculate properly annualized monthly and daily rates. Then performance stats are recalculated. Affects only this instance of the PerformanceStats. Args: * rf (float): Annual `risk-free rate <https://www.investopedia.com/terms/r/risk-freerate.asp>`_
def attach_const_node(node, name, value): if name not in node.special_attributes: _attach_local_node(node, nodes.const_factory(value), name)
create a Const node and register it in the locals of the given node with the specified name
def _reconstruct(self, path_to_root): item_pattern = re.compile('\d+\\]') dot_pattern = re.compile('\\.|\\[') path_segments = dot_pattern.split(path_to_root) schema_endpoint = self.schema if path_segments[1]: for i in range(1,len(path_segments)): if item_pattern.match(path_segments[i]): schema_endpoint = schema_endpoint[0] else: schema_endpoint = schema_endpoint[path_segments[i]] return schema_endpoint
a helper method for finding the schema endpoint from a path to root :param path_to_root: string with dot path to root from :return: list, dict, string, number, or boolean at path to root
def max_frequency (sig,FS): f, fs = plotfft(sig, FS, doplot=False) t = cumsum(fs) ind_mag = find (t>t[-1]*0.95)[0] f_max=f[ind_mag] return f_max
Compute max frequency along the specified axes. Parameters ---------- sig: ndarray input from which max frequency is computed. FS: int sampling frequency Returns ------- f_max: int 0.95 of max_frequency using cumsum.
def get(self, key): value = None for store in self._stores: value = store.get(key) if value is not None: break if value is not None: for store2 in self._stores: if store == store2: break store2.put(key, value) return value
Return the object named by key. Checks each datastore in order.
def drop_udf( self, name, input_types=None, database=None, force=False, aggregate=False, ): if not input_types: if not database: database = self.current_database result = self.list_udfs(database=database, like=name) if len(result) > 1: if force: for func in result: self._drop_single_function( func.name, func.inputs, database=database, aggregate=aggregate, ) return else: raise Exception( "More than one function " + "with {0} found.".format(name) + "Please specify force=True" ) elif len(result) == 1: func = result.pop() self._drop_single_function( func.name, func.inputs, database=database, aggregate=aggregate, ) return else: raise Exception("No function found with name {0}".format(name)) self._drop_single_function( name, input_types, database=database, aggregate=aggregate )
Drops a UDF If only name is given, this will search for the relevant UDF and drop it. To delete an overloaded UDF, give only a name and force=True Parameters ---------- name : string input_types : list of strings (optional) force : boolean, default False Must be set to true to drop overloaded UDFs database : string, default None aggregate : boolean, default False
def get_screen_settings(self, screen_id): if not isinstance(screen_id, baseinteger): raise TypeError("screen_id can only be an instance of type baseinteger") record_screen_settings = self._call("getScreenSettings", in_p=[screen_id]) record_screen_settings = IRecordingScreenSettings(record_screen_settings) return record_screen_settings
Returns the recording settings for a particular screen. in screen_id of type int Screen ID to retrieve recording screen settings for. return record_screen_settings of type :class:`IRecordingScreenSettings` Recording screen settings for the requested screen.
def commit_format(self): formatted_analyses = [] for analyze in self.analysis['messages']: formatted_analyses.append({ 'message': f"{analyze['source']}: {analyze['message']}. Code: {analyze['code']}", 'file': analyze['location']['path'], 'line': analyze['location']['line'], }) return formatted_analyses
Formats the analysis into a simpler dictionary with the line, file and message values to be commented on a commit. Returns a list of dictionaries
def true_false_returns(func): @functools.wraps(func) def _execute(*args, **kwargs): try: func(*args, **kwargs) return True except: return False return _execute
Executes function, if error returns False, else True :param func: function to call :return: True iff ok, else False
def m_quadratic_sum(A, B, max_it=50): r gamma1 = solve_discrete_lyapunov(A, B, max_it) return gamma1
r""" Computes the quadratic sum .. math:: V = \sum_{j=0}^{\infty} A^j B A^{j'} V is computed by solving the corresponding discrete lyapunov equation using the doubling algorithm. See the documentation of `util.solve_discrete_lyapunov` for more information. Parameters ---------- A : array_like(float, ndim=2) An n x n matrix as described above. We assume in order for convergence that the eigenvalues of :math:`A` have moduli bounded by unity B : array_like(float, ndim=2) An n x n matrix as described above. We assume in order for convergence that the eigenvalues of :math:`A` have moduli bounded by unity max_it : scalar(int), optional(default=50) The maximum number of iterations Returns ======== gamma1: array_like(float, ndim=2) Represents the value :math:`V`
def dispatch_url(self, url_string): url, url_adapter, query_args = self.parse_url(url_string) try: endpoint, kwargs = url_adapter.match() except NotFound: raise NotSupported(url_string) except RequestRedirect as e: new_url = "{0.new_url}?{1}".format(e, url_encode(query_args)) return self.dispatch_url(new_url) try: handler = import_string(endpoint) request = Request(url=url, args=query_args) return handler(request, **kwargs) except RequestRedirect as e: return self.dispatch_url(e.new_url)
Dispatch the URL string to the target endpoint function. :param url_string: the origin URL string. :returns: the return value of calling dispatched function.
def _build(self, inputs): shape_inputs = inputs.get_shape().as_list() rank = len(shape_inputs) max_dim = np.max(self._dims) + 1 if rank < max_dim: raise ValueError("Rank of inputs must be at least {}.".format(max_dim)) full_begin = [0] * rank full_size = [-1] * rank for dim, begin, size in zip(self._dims, self._begin, self._size): full_begin[dim] = begin full_size[dim] = size return tf.slice(inputs, begin=full_begin, size=full_size)
Connects the SliceByDim module into the graph. Args: inputs: `Tensor` to slice. Its rank must be greater than the maximum dimension specified in `dims` (plus one as python is 0 indexed). Returns: The sliced tensor. Raises: ValueError: If `inputs` tensor has insufficient rank.
def clean_registration_ids(self, registration_ids=[]): valid_registration_ids = [] for registration_id in registration_ids: details = self.registration_info_request(registration_id) if details.status_code == 200: valid_registration_ids.append(registration_id) return valid_registration_ids
Checks registration ids and excludes inactive ids Args: registration_ids (list, optional): list of ids to be cleaned Returns: list: cleaned registration ids
def from_array(array): if array is None or not array: return None assert_type_or_raise(array, dict, parameter_name="array") data = {} data['label'] = u(array.get('label')) data['amount'] = int(array.get('amount')) instance = LabeledPrice(**data) instance._raw = array return instance
Deserialize a new LabeledPrice from a given dictionary. :return: new LabeledPrice instance. :rtype: LabeledPrice
def on_view_not_found( self, environ: Dict[str, Any], start_response: Callable[[str, List[Tuple[str, str]]], None], ) -> Iterable[bytes]: start_response('404 Not Found', [('Content-type', 'text/plain')]) return [b'Not found']
called when views not found
def findWCSExtn(filename): rootname,extroot = fileutil.parseFilename(filename) extnum = None if extroot is None: fimg = fits.open(rootname, memmap=False) for i,extn in enumerate(fimg): if 'crval1' in extn.header: refwcs = wcsutil.HSTWCS('{}[{}]'.format(rootname,i)) if refwcs.wcs.has_cd(): extnum = '{}'.format(i) break fimg.close() else: try: refwcs = wcsutil.HSTWCS(filename) if refwcs.wcs.has_cd(): extnum = extroot except: extnum = None return extnum
Return new filename with extension that points to an extension with a valid WCS. Returns ======= extnum : str, None Value of extension name as a string either as provided by the user or based on the extension number for the first extension which contains a valid HSTWCS object. Returns None if no extension can be found with a valid WCS. Notes ===== The return value from this function can be used as input to create another HSTWCS with the syntax:: `HSTWCS('{}[{}]'.format(filename,extnum))
def lockfile(path): with genfile(path) as fd: fcntl.lockf(fd, fcntl.LOCK_EX) yield None
A file lock with-block helper. Args: path (str): A path to a lock file. Examples: Get the lock on a file and dostuff while having the lock:: path = '/hehe/haha.lock' with lockfile(path): dostuff() Notes: This is curently based on fcntl.lockf(), and as such, it is purely advisory locking. If multiple processes are attempting to obtain a lock on the same file, this will block until the process which has the current lock releases it. Yields: None
def new_request(sender, request=None, notify=True, **kwargs): if current_app.config['COMMUNITIES_MAIL_ENABLED'] and notify: send_community_request_email(request)
New request for inclusion.
def observed(self, band, corrected=True): if band not in 'ugriz': raise ValueError("band='{0}' not recognized".format(band)) i = 'ugriz'.find(band) t, y, dy = self.lcdata.get_lightcurve(self.lcid, return_1d=False) if corrected: ext = self.obsmeta['rExt'] * self.ext_correction[band] else: ext = 0 return t[:, i], y[:, i] - ext, dy[:, i]
Return observed values in the given band Parameters ---------- band : str desired bandpass: should be one of ['u', 'g', 'r', 'i', 'z'] corrected : bool (optional) If true, correct for extinction Returns ------- t, mag, dmag : ndarrays The times, magnitudes, and magnitude errors for the specified band.
def _FormatOtherFileToken(self, token_data): timestamp = token_data.microseconds + ( token_data.timestamp * definitions.MICROSECONDS_PER_SECOND) date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) date_time_string = date_time.CopyToDateTimeString() return { 'string': token_data.name.rstrip('\x00'), 'timestamp': date_time_string}
Formats an other file token as a dictionary of values. Args: token_data (bsm_token_data_other_file32): AUT_OTHER_FILE32 token data. Returns: dict[str, str]: token values.
async def generate_wallet_key(config: Optional[str]) -> str: logger = logging.getLogger(__name__) logger.debug("generate_wallet_key: >>> config: %r", config) if not hasattr(generate_wallet_key, "cb"): logger.debug("generate_wallet_key: Creating callback") generate_wallet_key.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p)) c_config = c_char_p(config.encode('utf-8')) if config is not None else None key = await do_call('indy_generate_wallet_key', c_config, generate_wallet_key.cb) res = key.decode() logger.debug("generate_wallet_key: <<< res: %r", res) return res
Generate wallet master key. Returned key is compatible with "RAW" key derivation method. It allows to avoid expensive key derivation for use cases when wallet keys can be stored in a secure enclave. :param config: (optional) key configuration json. { "seed": string, (optional) Seed that allows deterministic key creation (if not set random one will be created). Can be UTF-8, base64 or hex string. } :return: Error code
def info(self, message, domain=None): if domain is None: domain = self.extension_name info(message, domain)
Shortcut function for `utils.loggable.info` Args: message: see `utils.loggable.info` domain: see `utils.loggable.info`
def get_multipart_md5(self, filename, chunk_size=8 * 1024 * 1024): md5s = [] with open(filename, 'rb') as fp: while True: data = fp.read(chunk_size) if not data: break md5s.append(hashlib.md5(data)) digests = b"".join(m.digest() for m in md5s) new_md5 = hashlib.md5(digests) new_etag = '"%s-%s"' % (new_md5.hexdigest(), len(md5s)) return new_etag.strip('"').strip("'")
Returns the md5 checksum of the provided file name after breaking it into chunks. This is done to mirror the method used by Amazon S3 after a multipart upload.
def update_logo_preview(self): logo_path = self.organisation_logo_path_line_edit.text() if os.path.exists(logo_path): icon = QPixmap(logo_path) label_size = self.organisation_logo_label.size() label_size.setHeight(label_size.height() - 2) label_size.setWidth(label_size.width() - 2) scaled_icon = icon.scaled( label_size, Qt.KeepAspectRatio) self.organisation_logo_label.setPixmap(scaled_icon) else: self.organisation_logo_label.setText(tr("Logo not found"))
Update logo based on the current logo path.
def get(ctx, key): file = ctx.obj['FILE'] stored_value = get_key(file, key) if stored_value: click.echo('%s=%s' % (key, stored_value)) else: exit(1)
Retrieve the value for the given key.
def map_to_matype(self, matype): try: value = int(matype) if abs(value) > len(AlphaVantage._ALPHA_VANTAGE_MATH_MAP): raise ValueError("The value {} is not supported".format(value)) except ValueError: value = AlphaVantage._ALPHA_VANTAGE_MATH_MAP.index(matype) return value
Convert to the alpha vantage math type integer. It returns an integer correspondent to the type of math to apply to a function. It raises ValueError if an integer greater than the supported math types is given. Keyword Arguments: matype: The math type of the alpha vantage api. It accepts integers or a string representing the math type. * 0 = Simple Moving Average (SMA), * 1 = Exponential Moving Average (EMA), * 2 = Weighted Moving Average (WMA), * 3 = Double Exponential Moving Average (DEMA), * 4 = Triple Exponential Moving Average (TEMA), * 5 = Triangular Moving Average (TRIMA), * 6 = T3 Moving Average, * 7 = Kaufman Adaptive Moving Average (KAMA), * 8 = MESA Adaptive Moving Average (MAMA)
def particles(category=None): filepath = os.path.join(os.path.dirname(__file__), './particles.json') with open(filepath) as f: try: particles = json.load(f) except ValueError as e: log.error('Bad json format in "{}"'.format(filepath)) else: if category: if category in particles: return particles[category] else: log.warn('Category "{}" not contained in particle dictionary!'.format(category)) return particles
Returns a dict containing old greek particles grouped by category.
def create_gp(self): nb_bams = len(self.bams) gp_parts = [ textwrap.dedent( ), os.linesep.join([self._gp_style_func(i, nb_bams) for i in range(nb_bams)]), textwrap.dedent( ), os.linesep.join(self.gp_plots) ] gp_src = os.linesep.join(gp_parts) with open(self._gp_fn, "w+") as f: f.write(gp_src)
Create GnuPlot file.
def _fail(self, message, text, i): raise ValueError("{}:\n{}".format(message, text[i : i + 79]))
Raise an exception with given message and text at i.
def load(cls, fname, args): if args.type == JSON: if fname.endswith('.bz2'): open_ = bz2.open else: open_ = open if args.progress: print('Loading JSON data...') with open_(fname, 'rt') as fp: storage = JsonStorage.load(fp) else: storage = SqliteStorage.load(fname) if args.settings is not None: extend(storage.settings, args.settings) return cls.from_storage(storage)
Load a generator. Parameters ---------- cls : `type` Generator class. fname : `str` Input file path. args : `argparse.Namespace` Command arguments. Returns ------- `cls`
def get_column_list_prefixed(self): return map( lambda x: ".".join([self.name, x]), self.columns )
Returns a list of columns
def est_covariance_mtx(self, corr=False): cov = self.particle_covariance_mtx(self.particle_weights, self.particle_locations) if corr: dstd = np.sqrt(np.diag(cov)) cov /= (np.outer(dstd, dstd)) return cov
Returns the full-rank covariance matrix of the current particle distribution. :param bool corr: If `True`, the covariance matrix is normalized by the outer product of the square root diagonal of the covariance matrix, i.e. the correlation matrix is returned instead. :rtype: :class:`numpy.ndarray`, shape ``(n_modelparams, n_modelparams)``. :returns: An array containing the estimated covariance matrix.
def _check_and_assign_normalization_members(self, normalization_ctor, normalization_kwargs): if isinstance(normalization_ctor, six.string_types): normalization_ctor = util.parse_string_to_constructor(normalization_ctor) if normalization_ctor is not None and not callable(normalization_ctor): raise ValueError( "normalization_ctor must be a callable or a string that specifies " "a callable, got {}.".format(normalization_ctor)) self._normalization_ctor = normalization_ctor self._normalization_kwargs = normalization_kwargs
Checks that the normalization constructor is callable.
def reference_index(self): if self._db_location: ref_indices = glob.glob(os.path.join(self._db_location, "*", self._REF_INDEX)) if ref_indices: return ref_indices[0]
Absolute path to the BWA index for EricScript reference data.
def debug_dump(message, file_prefix="dump"): global index index += 1 with open("%s_%s.dump" % (file_prefix, index), 'w') as f: f.write(message.SerializeToString()) f.close()
Utility while developing to dump message data to play with in the interpreter
def _validate_required(self, item, name): if self.required is True and item is None: raise ArgumentError(name, "This argument is required.")
Validate that the item is present if it's required.
def _ifelse(expr, true_expr, false_expr): tps = (SequenceExpr, Scalar) if not isinstance(true_expr, tps): true_expr = Scalar(_value=true_expr) if not isinstance(false_expr, tps): false_expr = Scalar(_value=false_expr) output_type = utils.highest_precedence_data_type( *[true_expr.dtype, false_expr.dtype]) is_sequence = isinstance(expr, SequenceExpr) or \ isinstance(true_expr, SequenceExpr) or \ isinstance(false_expr, SequenceExpr) if is_sequence: return IfElse(_input=expr, _then=true_expr, _else=false_expr, _data_type=output_type) else: return IfElse(_input=expr, _then=true_expr, _else=false_expr, _value_type=output_type)
Given a boolean sequence or scalar, if true will return the left, else return the right one. :param expr: sequence or scalar :param true_expr: :param false_expr: :return: sequence or scalar :Example: >>> (df.id == 3).ifelse(df.id, df.fid.astype('int')) >>> df.isMale.ifelse(df.male_count, df.female_count)
def _countWhereGreaterEqualInRows(sparseMatrix, rows, threshold): return sum(sparseMatrix.countWhereGreaterOrEqual(row, row+1, 0, sparseMatrix.nCols(), threshold) for row in rows)
Like countWhereGreaterOrEqual, but for an arbitrary selection of rows, and without any column filtering.
def dot(a, b): if hasattr(a, '__dot__'): return a.__dot__(b) if a is None: return b else: raise ValueError( 'Dot is waiting for two TT-vectors or two TT- matrices')
Dot product of two TT-matrices or two TT-vectors
def yesterday(date=None): if not date: return _date - datetime.timedelta(days=1) else: current_date = parse(date) return current_date - datetime.timedelta(days=1)
yesterday once more
def _absolute_path(path, relative_to=None): if path and os.path.isabs(path): return path if path and relative_to is not None: _abspath = os.path.join(relative_to, path) if os.path.isfile(_abspath): log.debug( 'Relative path \'%s\' converted to existing absolute path ' '\'%s\'', path, _abspath ) return _abspath return path
Return an absolute path. In case ``relative_to`` is passed and ``path`` is not an absolute path, we try to prepend ``relative_to`` to ``path``and if that path exists, return that one
def get_func_name(func): func_name = getattr(func, '__name__', func.__class__.__name__) module_name = func.__module__ if module_name is not None: module_name = func.__module__ return '{}.{}'.format(module_name, func_name) return func_name
Return a name which includes the module name and function name.
def set_sleep_on_power_button(enabled): state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setallowpowerbuttontosleepcomputer {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_sleep_on_power_button, )
Set whether or not the power button can sleep the computer. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep_on_power_button True
def run(self, messages): statistics = {} statistics['time'] = str(datetime.now()) statistics['time-utc'] = str(datetime.utcnow()) statistics['unlock'] = self.args.unlock if self.args.question: statistics['question'] = [t.name for t in self.assignment.specified_tests] statistics['requested-questions'] = self.args.question if self.args.suite: statistics['requested-suite'] = self.args.suite if self.args.case: statistics['requested-case'] = self.args.case messages['analytics'] = statistics self.log_run(messages)
Returns some analytics about this autograder run.
def setup(self): super().setup() self._start_time = self.clock.time self.initialize_simulants()
Setup the simulation and initialize its population.
def write_pid(self, pid=None): pid = pid or os.getpid() self.write_metadata_by_name(self._name, 'pid', str(pid))
Write the current processes PID to the pidfile location
def start(track_file, twitter_api_key, twitter_api_secret, twitter_access_token, twitter_access_token_secret, poll_interval=15, unfiltered=False, languages=None, debug=False, outfile=None): listener = construct_listener(outfile) checker = BasicFileTermChecker(track_file, listener) auth = get_tweepy_auth(twitter_api_key, twitter_api_secret, twitter_access_token, twitter_access_token_secret) stream = DynamicTwitterStream(auth, listener, checker, unfiltered=unfiltered, languages=languages) set_terminate_listeners(stream) if debug: set_debug_listener(stream) begin_stream_loop(stream, poll_interval)
Start the stream.
def setOverlayTransformTrackedDeviceComponent(self, ulOverlayHandle, unDeviceIndex, pchComponentName): fn = self.function_table.setOverlayTransformTrackedDeviceComponent result = fn(ulOverlayHandle, unDeviceIndex, pchComponentName) return result
Sets the transform to draw the overlay on a rendermodel component mesh instead of a quad. This will only draw when the system is drawing the device. Overlays with this transform type cannot receive mouse events.
def _process_deprecated(attrib, deprecated_attrib, kwargs): if deprecated_attrib not in DEPRECATIONS: raise ValueError('{0} not included in deprecations list' .format(deprecated_attrib)) if deprecated_attrib in kwargs: warnings.warn("'{0}' is DEPRECATED use '{1}' instead" .format(deprecated_attrib, DEPRECATIONS[deprecated_attrib]), DeprecationWarning) if attrib: raise ValueError("You can't use both '{0}' and '{1}'. " "Please only use one of them" .format(deprecated_attrib, DEPRECATIONS[deprecated_attrib])) else: return kwargs.pop(deprecated_attrib) return attrib
Processes optional deprecate arguments
def Map(self, function): new_table = self.__class__() new_table._table = [self.header] for row in self: filtered_row = function(row) if filtered_row: new_table.Append(filtered_row) return new_table
Applies the function to every row in the table. Args: function: A function applied to each row. Returns: A new TextTable() Raises: TableError: When transform is not invalid row entry. The transform must be compatible with Append().
def details_for_given_date_in_gradebook_history_for_this_course(self, date, course_id): path = {} data = {} params = {} path["course_id"] = course_id path["date"] = date self.logger.debug("GET /api/v1/courses/{course_id}/gradebook_history/{date} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/gradebook_history/{date}".format(**path), data=data, params=params, all_pages=True)
Details for a given date in gradebook history for this course. Returns the graders who worked on this day, along with the assignments they worked on. More details can be obtained by selecting a grader and assignment and calling the 'submissions' api endpoint for a given date.
def _db_remove_prefix(self, spec, recursive = False): if recursive: prefix = spec['prefix'] del spec['prefix'] where, params = self._expand_prefix_spec(spec) spec['prefix'] = prefix params['prefix'] = prefix where = 'prefix <<= %(prefix)s AND ' + where else: where, params = self._expand_prefix_spec(spec) sql = "DELETE FROM ip_net_plan AS p WHERE %s" % where self._execute(sql, params)
Do the underlying database operations to delete a prefix
def _store_object(self, obj_name, content, etag=None, chunked=False, chunk_size=None, headers=None): head_etag = headers.pop("ETag", "") if chunked: headers.pop("Content-Length", "") headers["Transfer-Encoding"] = "chunked" elif etag is None and content is not None: etag = utils.get_checksum(content) if etag: headers["ETag"] = etag if not headers.get("Content-Type"): headers["Content-Type"] = None uri = "/%s/%s" % (self.uri_base, obj_name) resp, resp_body = self.api.method_put(uri, data=content, headers=headers)
Handles the low-level creation of a storage object and the uploading of the contents of that object.
def remove_from_parent(self): if self.parent: self.parent._children.remove(self) self.parent._invalidate_time_caches() self.parent = None
Removes this frame from its parent, and nulls the parent link
def retrieve_order(self, order_id): response = self.request(E.retrieveOrderSslCertRequest( E.id(order_id) )) return response.as_model(SSLOrder)
Retrieve details on a single order.
def fap_davies(Z, fmax, t, y, dy, normalization='standard'): N = len(t) fap_s = fap_single(Z, N, normalization=normalization) tau = tau_davies(Z, fmax, t, y, dy, normalization=normalization) return fap_s + tau
Davies upper-bound to the false alarm probability (Eqn 5 of Baluev 2008)
def _error_dm(self, m, dm, s): pred = self.fmodel.predict_given_context(np.hstack((m, dm)), s, range(len(s))) err_v = pred - self.goal error = sum(e*e for e in err_v) return error
Error function. Once self.goal has been defined, compute the error of input using the generalized forward model.
def issueCommand(self, command, *args): result = Deferred() self._dq.append(result) self.sendLine(b" ".join([command] + list(args))) return result
Issue the given Assuan command and return a Deferred that will fire with the response.