content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def random_permutation_matrix(size): """Random permutation matrix. Parameters ---------- size : int The dimension of the random permutation matrix. Returns ------- random_permutation : array, shape (size, size) An identity matrix with its rows random shuffled. """ identity = np.identity(size) index = np.arange(0, size) np.random.shuffle(index) random_permutation = identity[index] return random_permutation
0ca4e93218fd647188ac09c4d71a3df1cff3acf7
3,645,293
from typing import Optional import collections def matched(captured: Optional[Capture], groups_count: int) -> MatchedType: """ Construct the matched strings transversing\ given a captured structure The passed Capture has the last captured char\ and so the sequence is transversed in reverse Sub-matches are put in their group index Repeating sub-matches (i.e: ``(a)*``) are put\ into a nested sequence of their group index :param captured: The last capture or None :param groups_count: number of groups :return: matched strings :private: """ match = collections.defaultdict(lambda: []) curr_groups = [] while captured: if captured.char == Symbols.GROUP_END: curr_groups.append(captured) if captured.is_repeated: match[captured.index].append([]) captured = captured.prev continue if captured.char == Symbols.GROUP_START: curr_groups.pop() captured = captured.prev continue for g in curr_groups: if g.is_repeated: match[g.index][-1].append(captured.char) else: match[g.index].append(captured.char) captured = captured.prev assert not curr_groups return tuple( _join_reversed(match[g]) if g in match else None for g in range(groups_count))
0bb7544f9d5ac339e0aed717bc5779deba781dc8
3,645,294
def tle_fmt_float(num,width=10): """ Return a left-aligned signed float string, with no leading zero left of the decimal """ digits = (width-2) ret = "{:<.{DIGITS}f}".format(num,DIGITS=digits) if ret.startswith("0."): return " " + ret[1:] if ret.startswith("-0."): return "-" + ret[2:]
686cb4061e5cf2ad620b85b0e66b96a8cd1c3abf
3,645,295
def pack(name=None, prefix=None, output=None, format='infer', arcroot='', dest_prefix=None, verbose=False, force=False, compress_level=4, n_threads=1, zip_symlinks=False, zip_64=True, filters=None, ignore_editable_packages=False): """Package an existing conda environment into an archive file. Parameters ---------- name : str, optional The name of the conda environment to pack. prefix : str, optional A path to a conda environment to pack. output : str, optional The path of the output file. Defaults to the environment name with a ``.tar.gz`` suffix (e.g. ``my_env.tar.gz``). format : {'infer', 'zip', 'tar.gz', 'tgz', 'tar.bz2', 'tbz2', 'tar'}, optional The archival format to use. By default this is inferred by the output file extension. arcroot : str, optional The relative path in the archive to the conda environment. Defaults to ''. dest_prefix : str, optional If present, prefixes will be rewritten to this path before packaging. In this case the ``conda-unpack`` script will not be generated. verbose : bool, optional If True, progress is reported to stdout. Default is False. force : bool, optional Whether to overwrite any existing archive at the output path. Default is False. compress_level : int, optional The compression level to use, from 0 to 9. Higher numbers decrease output file size at the expense of compression time. Ignored for ``format='zip'``. Default is 4. zip_symlinks : bool, optional Symbolic links aren't supported by the Zip standard, but are supported by *many* common Zip implementations. If True, store symbolic links in the archive, instead of the file referred to by the link. This can avoid storing multiple copies of the same files. *Note that the resulting archive may silently fail on decompression if the ``unzip`` implementation doesn't support symlinks*. Default is False. Ignored if format isn't ``zip``. n_threads : int, optional The number of threads to use. Set to -1 to use the number of cpus on this machine. If a file format doesn't support threaded packaging, this option will be ignored. Default is 1. zip_64 : bool, optional Whether to enable ZIP64 extensions. Default is True. filters : list, optional A list of filters to apply to the files. Each filter is a tuple of ``(kind, pattern)``, where ``kind`` is either ``'exclude'`` or ``'include'`` and ``pattern`` is a file pattern. Filters are applied in the order specified. ignore_editable_packages : bool, optional By default conda-pack will error in the presence of editable packages. Set to True to skip these checks. Returns ------- out_path : str The path to the archived environment. """ if name and prefix: raise CondaPackException("Cannot specify both ``name`` and ``prefix``") if verbose: print("Collecting packages...") if prefix: env = CondaEnv.from_prefix(prefix, ignore_editable_packages=ignore_editable_packages) elif name: env = CondaEnv.from_name(name, ignore_editable_packages=ignore_editable_packages) else: env = CondaEnv.from_default(ignore_editable_packages=ignore_editable_packages) if filters is not None: for kind, pattern in filters: if kind == 'exclude': env = env.exclude(pattern) elif kind == 'include': env = env.include(pattern) else: raise CondaPackException("Unknown filter of kind %r" % kind) return env.pack(output=output, format=format, arcroot=arcroot, dest_prefix=dest_prefix, verbose=verbose, force=force, compress_level=compress_level, n_threads=n_threads, zip_symlinks=zip_symlinks, zip_64=zip_64)
500841ec51c58ec0ff99c4b286c8a235ab887d7b
3,645,296
def rasterize(points): """ Return (array, no_data_value) tuple. Rasterize the indices of the points in an array at the highest quadtree resolution. Note that points of larger squares in the quadtree also just occupy one cell in the resulting array, the rest of the cells get the no_data_value. """ points = np.asarray(points, dtype=float) x, y = points.transpose() xs, ys = analyze(x, y) x1, y2 = x.min(), y.max() # get indices to land each point index in its own array cell j = np.int64(np.zeros_like(x) if xs is None else (x - x1) / xs) i = np.int64(np.zeros_like(y) if ys is None else (y2 - y) / ys) index = i, j no_data_value = len(points) ids = np.arange(no_data_value) values = np.full((i.max() + 1, j.max() + 1), no_data_value) values[index] = ids return values, no_data_value
41db3b63a5956aff192585c7c5ce5b6c83f0d6cd
3,645,297
def parse_aedge_layout_attrs(aedge, translation=None): """ parse grpahviz splineType """ if translation is None: translation = np.array([0, 0]) edge_attrs = {} apos = aedge.attr['pos'] # logger.info('apos = %r' % (apos,)) end_pt = None start_pt = None # if '-' in apos: # import utool # utool.embed() def safeadd(x, y): if x is None or y is None: return None return x + y strpos_list = apos.split(' ') strtup_list = [ea.split(',') for ea in strpos_list] ctrl_ptstrs = [ea for ea in strtup_list if ea[0] not in 'es'] end_ptstrs = [ea[1:] for ea in strtup_list[0:2] if ea[0] == 'e'] start_ptstrs = [ea[1:] for ea in strtup_list[0:2] if ea[0] == 's'] assert len(end_ptstrs) <= 1 assert len(start_ptstrs) <= 1 if len(end_ptstrs) == 1: end_pt = np.array([float(f) for f in end_ptstrs[0]]) if len(start_ptstrs) == 1: start_pt = np.array([float(f) for f in start_ptstrs[0]]) ctrl_pts = np.array([tuple([float(f) for f in ea]) for ea in ctrl_ptstrs]) adata = aedge.attr ctrl_pts = ctrl_pts edge_attrs['pos'] = apos edge_attrs['ctrl_pts'] = safeadd(ctrl_pts, translation) edge_attrs['start_pt'] = safeadd(start_pt, translation) edge_attrs['end_pt'] = safeadd(end_pt, translation) edge_attrs['lp'] = safeadd(parse_point(adata.get('lp', None)), translation) edge_attrs['label'] = adata.get('label', None) edge_attrs['headlabel'] = adata.get('headlabel', None) edge_attrs['taillabel'] = adata.get('taillabel', None) edge_attrs['head_lp'] = safeadd(parse_point(adata.get('head_lp', None)), translation) edge_attrs['tail_lp'] = safeadd(parse_point(adata.get('tail_lp', None)), translation) return edge_attrs
f086e2267d19710685e3515aeee352066bd983b2
3,645,298
import importlib import re def load_class_by_path(taskpath): """ Given a taskpath, returns the main task class. """ return getattr(importlib.import_module(re.sub(r"\.[^.]+$", "", taskpath)), re.sub(r"^.*\.", "", taskpath))
a9601dafbc73635d81732a0f3747fd450e393d76
3,645,299
def simplex_edge_tensors(dimensions, # type: int centers_in, # type: List[List[int]] centers_out, # type: List[List[int]] surrounds_in, # type: List[List[int]] surrounds_out, # type: List[List[int]] attractor_function=__euclid_function_generator, # type: Callable[[Real], Callable[[Real], Real]] flip=None # type: Optional[int] ): """ Generates the minimum number of edge_orientation_detector tensors needed to represent all orientations of boundaries in n-dimensional space, with positive values only. This results in one more tensor than when negative values are allowed. :param dimensions: number of dimensions. :param centers_in: list of colors added together on points on the edge_orientation_detector. :param centers_out: list of colors outputted on points on the edge_orientation_detector. :param surrounds_in: list of colors subtracted together on points off the edge_orientation_detector :param surrounds_out: list of colors outputted on points off the edge_orientation_detector. :param attractor_function: function that takes in the number of dimensions and outputs a function that takes in distances and returns positive values for small distances and negative values for large distances. :return: a list of tensors for finding all orientations of boundaries. """ simplex = __simplex_coordinates(dimensions) if flip is not None: simplex = np.flip(simplex, flip) return [edge_tensor(simplex_vector, center_in, center_out, surround_in, surround_out, attractor_function) for simplex_vector, center_in, center_out, surround_in, surround_out in zip(simplex, centers_in, centers_out, surrounds_in, surrounds_out)]
fb1fdf0a46939db10770984b28dc4f33cb42d0b9
3,645,300
def hashtoaddress(PARAMETER): """ Converts a 160-bit hash to an address. [PARAMETER] is required and should be an address hash. """ d = urllib2.urlopen(blockexplorer('hashtoaddress') + '/' + str(PARAMETER)) return d.read()
6e96698792d1e64c3feca9d6d9b14b02554cfc50
3,645,301
def magenta(msg): """Return colorized <msg> in magenta""" return __fore(msg, 'magenta')
64eda26662e283779d1a0c1884166b538aa6bb8f
3,645,303
def request_latest_news(): """ This Method queries the last item of the database and convert it to a string. :return: A String with the last item of the database """ article = News.query.order_by(News.id.desc()).first() return format_latest_article(article, request.content_type)
4ff0dc4d7f63465125d38f0683619e59a8f915e0
3,645,304
def is_vulgar(words, sentence): """Checks if a given line has any of the bad words from the bad words list.""" for word in words: if word in sentence: return 1 return 0
f8ff64f1d29313c145ebbff8fef01961e14cfd1f
3,645,305
def edges_cross(graph, nodes1, nodes2): """ Finds edges between two sets of disjoint nodes. Running time is O(len(nodes1) * len(nodes2)) Args: graph (nx.Graph): an undirected graph nodes1 (set): set of nodes disjoint from `nodes2` nodes2 (set): set of nodes disjoint from `nodes1`. """ return {e_(u, v) for u in nodes1 for v in nodes2.intersection(graph.adj[u])}
96c3b2d2de97547cb16d9f2e0071bb093e815d28
3,645,306
def basket_view(func): """ Returns rendered page for basket """ @jinja2_view('basket.html', template_lookup=[TEMPLATES_DIR]) def _basket_view_call(*args, **kwargs): func(*args, **kwargs) return {'col_mapping': COLUMN_MAPPING, 'product_list': _format_products_for_web(get_basket_products())} return _basket_view_call
c818d1bd77fe100df857d746109f20caebd8581f
3,645,307
def py2to3(target_path, interpreter_command_name="python", is_transform=False, is_del_bak=False, is_html_diff=False, is_check_requirements=False): """ The main entrance of the 2to3 function provides a series of parameter entrances. The main functions are as follows: 1. Whether to enable automatic conversion of Python2 code to Python3 2. Determine whether to keep a backup of Python2 code 3. Determine whether to open the conversion code text comparison 4. Determine whether the version of the library that the project depends on is suitable for the current Python environment. :param target_path: str, project path :param interpreter_command_name: str, interpreter command name, default "python" Please make sure that the Python terminal environment has been configured successfully :param is_transform: bool, default False :param is_del_bak: bool, default False :param is_html_diff: bool, default False :param is_check_requirements: bool, default False :return: bool, ignore """ # Whether to enable automatic conversion of Python2 code to Python3 if is_transform: files_transform( target_path=target_path, interpreter_command_name=interpreter_command_name ) # Determine whether to keep a backup of Python2 code if is_del_bak: bak_files_clear(target_path=target_path) # Determine whether to open the conversion code text comparison if is_html_diff: html_diff_generate(target_path=target_path) # Determine whether the version of the library that the project # depends on is suitable for the current Python environment. if is_check_requirements: libraries_detect_and_recommend(target_path=target_path) return True
8581beacd7daa174309da99c6857acec841345bf
3,645,308
import re def _get_hash_aliases(name): """ internal helper used by :func:`lookup_hash` -- normalize arbitrary hash name to hashlib format. if name not recognized, returns dummy record and issues a warning. :arg name: unnormalized name :returns: tuple with 2+ elements: ``(hashlib_name, iana_name|None, ... 0+ aliases)``. """ # normalize input orig = name if not isinstance(name, str): name = to_native_str(name, 'utf-8', 'hash name') name = re.sub("[_ /]", "-", name.strip().lower()) if name.startswith("scram-"): # helper for SCRAM protocol (see passlib.handlers.scram) name = name[6:] if name.endswith("-plus"): name = name[:-5] # look through standard names and known aliases def check_table(name): for row in _known_hash_names: if name in row: return row result = check_table(name) if result: return result # try to clean name up some more m = re.match(r"(?i)^(?P<name>[a-z]+)-?(?P<rev>\d)?-?(?P<size>\d{3,4})?$", name) if m: # roughly follows "SHA2-256" style format, normalize representation, # and checked table. iana_name, rev, size = m.group("name", "rev", "size") if rev: iana_name += rev hashlib_name = iana_name if size: iana_name += "-" + size if rev: hashlib_name += "_" hashlib_name += size result = check_table(iana_name) if result: return result # not found in table, but roughly recognize format. use names we built up as fallback. log.info("normalizing unrecognized hash name %r => %r / %r", orig, hashlib_name, iana_name) else: # just can't make sense of it. return something iana_name = name hashlib_name = name.replace("-", "_") log.warning("normalizing unrecognized hash name and format %r => %r / %r", orig, hashlib_name, iana_name) return hashlib_name, iana_name
537c30fee93c465a768e80dd6fc8314555b65df5
3,645,310
def dirac_2d_v_and_h(direction, G_row, vec_len_row, num_vec_row, G_col, vec_len_col, num_vec_col, a, K, noise_level, max_ini, stop_cri): """ used to run the reconstructions along horizontal and vertical directions in parallel. """ if direction == 0: # row reconstruction c_recon, min_error, b_recon, ini = \ recon_2d_dirac_vertical(G_row, vec_len_row, num_vec_row, a, K, noise_level, max_ini, stop_cri) else: # column reconstruction c_recon, min_error, b_recon, ini = \ recon_2d_dirac_vertical(G_col, vec_len_col, num_vec_col, a, K, noise_level, max_ini, stop_cri) return c_recon, min_error, b_recon, ini
e68945c68cb80ef001e027c30651d1f3a38369e4
3,645,311
import importlib from typing import Tuple def Matrix(*args, **kwargs): """*Funktion zur Erzeugung von Matrizen mit beliebiger Dimension""" h = kwargs.get("h") if h in (1, 2, 3): matrix_hilfe(h) return elif isinstance(h, (Integer, int)): matrix_hilfe(1) return Vektor = importlib.import_module('agla.lib.objekte.vektor').Vektor # Erzeugen einer SymPy-Matrix auf die übliche Art if iterable(args) and not isinstance(args[0], Vektor): m = SympyMatrix(*args, **kwargs) for i in range(m.rows): for j in range(m.cols): try: m[i, j] = nsimplify(m[i, j]) except RecursionError: pass return m # Erzeugen einer SymPy-Matrix anhand der Spaltenvektoren try: if not args: raise AglaError('mindestens zwei Vektoren angeben') if isinstance(args[0], (tuple, Tuple, list, set)): vektoren = args[0] if not type(vektoren) == list: vektoren = list(vektoren) else: vektoren = list(args) if not all(isinstance(v, Vektor) for v in vektoren): raise AglaError('Vektoren angeben') if not all(v.dim == vektoren[0].dim for v in vektoren): raise AglaError('die Vektoren haben unterschiedliche Dimension') except AglaError as e: print('agla:', str(e)) liste = [ [k for k in v.komp] for v in vektoren ] m, n = vektoren[0].dim, len(vektoren) zeilen = [ [liste[i][j] for i in range(n)] for j in range(m) ] M = SympyMatrix(zeilen) return M
f9bae41e6ce6f6b3c144d8844317ae7b2272bb91
3,645,312
def afw_word_acceptance(afw: dict, word: list) -> bool: """ Checks if a **word** is accepted by input AFW, returning True/False. The word w is accepted by a AFW if exists at least an accepting run on w. A run for AFWs is a tree and an alternating automaton can have multiple runs on a given input. A run is accepting if all the leaf nodes are accepting states. :param dict afw: input AFW; :param list word: list of symbols ∈ afw['alphabet']. :return: *(bool)*, True if the word is accepted, False otherwise. """ return __recursive_acceptance(afw, afw['initial_state'], word)
52ff4c5fa2c8d2c8af667ee9c03e587b2c4ac10b
3,645,313
from operator import and_ def get_following(): """ endpoint: /release/following method: GET param: "[header: Authorization] Token": str - Token received from firebase response_type: array response: id: 1 created: 123456789 vol: 1 chapter: 1 title: Chapter titles url: /chapter/1 manga: title: manga title url: /manga/1/manga-title cover: manga_cover_url error: 404: code: 404 message: There are no new chapters available """ list_manga = UsersManga.query.filter(and_( UsersManga.user_uid.like(g.uid), UsersManga.favorited.is_(True), )).all() list_manga_id = [x.mangas.id for x in list_manga] chapters = ( Chapter.query .filter(Chapter.manga_id.in_(list_manga_id)) .order_by(Chapter.manga_id) .distinct(Chapter.manga_id) .from_self() .order_by(Chapter.created.desc()) .limit(10).all() ) if not chapters: return jsonify({ 'code': 404, 'message': 'There are no new chapters available' }) return jsonify(chapters_schema.dump(chapters).data)
90999ec6a4e14bf3c3633ef38f0e020cca62623b
3,645,314
import re def matchNoSpaces(value): """Match strings with no spaces.""" if re.search('\s', value): return False return True
6b33c6b500f78664c04ef8c507e9b25fa19c760d
3,645,315
import re def collect_inline_comments(list_of_strings,begin_token=None,end_token=None): """Reads a list of strings and returns all of the inline comments in a list. Output form is ['comment',line_number,string_location] returns None if there are none or tokens are set to None""" if begin_token in [None] and end_token in [None]: return None match=re.compile('{0}(?P<inline_comments>.*){1}'.format(re.escape(begin_token),re.escape(end_token))) inline_comment_list=[] for index,line in enumerate(list_of_strings): comment_match=re.search(match,line) if comment_match: inline_comment_list.append([comment_match.group('inline_comments'),index,comment_match.start()]) if inline_comment_list: return inline_comment_list else: return None
8ff2dfa055b2f2a3ef72842518b2fb87bcb62c1e
3,645,316
def cli_list(apic, args): """Implement CLI command `list`. """ # pylint: disable=unused-argument instances = apic.get_instances() if instances: print('\n'.join(apic.get_instances())) return 0
7b96b1a7cf85c86627382143e1e0786956546ec1
3,645,318
def is_symmetric(a: np.array): """ Check whether the matrix is symmetric :param a: :return: """ tol = 1e-10 return (np.abs(a - a.T) <= tol).all()
223784091cd797d5ba5f3814fb097252d1afc349
3,645,319
def get_number(line, position): """Searches for the end of a number. Args: line (str): The line in which the number was found. position (int): The starting position of the number. Returns: str: The number found. int: The position after the number found. """ word = "" for pos, char in enumerate(line[position:]): if char.isdigit() or char == ".": word += char else: return word, position + pos return word, len(line)
df41a1b53953b912e5ce5d6d9b3d69c4133460f1
3,645,320
from typing import TextIO import yaml def load(f: TextIO) -> Config: """Load a configuration from a file-like object f""" config = yaml.safe_load(f) if isinstance(config["diag_table"], dict): config["diag_table"] = DiagTable.from_dict(config["diag_table"]) return config
0a977a5eda6ad8e0e5aa15315f914186ff65b4d6
3,645,321
def levelize_smooth_or_improve_candidates(to_levelize, max_levels): """Turn parameter in to a list per level. Helper function to preprocess the smooth and improve_candidates parameters passed to smoothed_aggregation_solver and rootnode_solver. Parameters ---------- to_levelize : {string, tuple, list} Parameter to preprocess, i.e., levelize and convert to a level-by-level list such that entry i specifies the parameter at level i max_levels : int Defines the maximum number of levels considered Returns ------- to_levelize : list The parameter list such that entry i specifies the parameter choice at level i. Notes -------- This routine is needed because the user will pass in a parameter option such as smooth='jacobi', or smooth=['jacobi', None], and this option must be "levelized", or converted to a list of length max_levels such that entry [i] in that list is the parameter choice for level i. The parameter choice in to_levelize can be a string, tuple or list. If it is a string or tuple, then that option is assumed to be the parameter setting at every level. If to_levelize is inititally a list, if the length of the list is less than max_levels, the last entry in the list defines that parameter for all subsequent levels. Examples -------- >>> from pyamg.util.utils import levelize_smooth_or_improve_candidates >>> improve_candidates = ['gauss_seidel', None] >>> levelize_smooth_or_improve_candidates(improve_candidates, 4) ['gauss_seidel', None, None, None] """ # handle default value (mutable) # improve_candidates=(('block_gauss_seidel', # {'sweep': 'symmetric', 'iterations': 4}), # None) # -> make it a list if isinstance(to_levelize, tuple): if isinstance(to_levelize[0], tuple): to_levelize = list(to_levelize) if isinstance(to_levelize, (str, tuple)): to_levelize = [to_levelize for i in range(max_levels)] elif isinstance(to_levelize, list): if len(to_levelize) < max_levels: mlz = max_levels - len(to_levelize) toext = [to_levelize[-1] for i in range(mlz)] to_levelize.extend(toext) elif to_levelize is None: to_levelize = [(None, {}) for i in range(max_levels)] return to_levelize
8b302b8cae04adae010607c394c2e5059aa46eeb
3,645,322
def get_max_num_context_features(model_config): """Returns maximum number of context features from a given config. Args: model_config: A model config file. Returns: An integer specifying the max number of context features if the model config contains context_config, None otherwise """ meta_architecture = model_config.WhichOneof("model") meta_architecture_config = getattr(model_config, meta_architecture) if hasattr(meta_architecture_config, "context_config"): return meta_architecture_config.context_config.max_num_context_features
1df5d220e30cfa5b440c0063149e2ebaf896352a
3,645,323
import hashlib def hashname(name, secsalt): """Obtain a sha256 hash from a name.""" m = hashlib.sha256() m.update((name + secsalt).encode("utf-8")) return m.hexdigest()
0db5fbf39eed899162535b6647a047f49e39fa34
3,645,324
def parse_encoding_header(header): """ Break up the `HTTP_ACCEPT_ENCODING` header into a dict of the form, {'encoding-name':qvalue}. """ encodings = {'identity':1.0} for encoding in header.split(","): if(encoding.find(";") > -1): encoding, qvalue = encoding.split(";") encoding = encoding.strip() qvalue = qvalue.split('=', 1)[1] if(qvalue != ""): encodings[encoding] = float(qvalue) else: encodings[encoding] = 1 else: encodings[encoding] = 1 return encodings
0d423ad51ff14589b5858681cf32a0f318e6dbfa
3,645,326
def opf_consfcn(x, om, Ybus, Yf, Yt, ppopt, il=None, *args): """Evaluates nonlinear constraints and their Jacobian for OPF. Constraint evaluation function for AC optimal power flow, suitable for use with L{pips}. Computes constraint vectors and their gradients. @param x: optimization vector @param om: OPF model object @param Ybus: bus admittance matrix @param Yf: admittance matrix for "from" end of constrained branches @param Yt: admittance matrix for "to" end of constrained branches @param ppopt: PYPOWER options vector @param il: (optional) vector of branch indices corresponding to branches with flow limits (all others are assumed to be unconstrained). The default is C{range(nl)} (all branches). C{Yf} and C{Yt} contain only the rows corresponding to C{il}. @return: C{h} - vector of inequality constraint values (flow limits) limit^2 - flow^2, where the flow can be apparent power real power or current, depending on value of C{OPF_FLOW_LIM} in C{ppopt} (only for constrained lines). C{g} - vector of equality constraint values (power balances). C{dh} - (optional) inequality constraint gradients, column j is gradient of h(j). C{dg} - (optional) equality constraint gradients. @see: L{opf_costfcn}, L{opf_hessfcn} @author: Carlos E. Murillo-Sanchez (PSERC Cornell & Universidad Autonoma de Manizales) @author: Ray Zimmerman (PSERC Cornell) """ ##----- initialize ----- ## unpack data ppc = om.get_ppc() baseMVA, bus, gen, branch = \ ppc["baseMVA"], ppc["bus"], ppc["gen"], ppc["branch"] vv, _, _, _ = om.get_idx() ## problem dimensions nb = bus.shape[0] ## number of buses nl = branch.shape[0] ## number of branches ng = gen.shape[0] ## number of dispatchable injections nxyz = len(x) ## total number of control vars of all types ## set default constrained lines if il is None: il = arange(nl) ## all lines have limits by default nl2 = len(il) ## number of constrained lines ## grab Pg & Qg Pg = x[vv["i1"]["Pg"]:vv["iN"]["Pg"]] ## active generation in p.u. Qg = x[vv["i1"]["Qg"]:vv["iN"]["Qg"]] ## reactive generation in p.u. ## put Pg & Qg back in gen gen[:, PG] = Pg * baseMVA ## active generation in MW gen[:, QG] = Qg * baseMVA ## reactive generation in MVAr ## rebuild Sbus Sbus = makeSbus(baseMVA, bus, gen) ## net injected power in p.u. ## ----- evaluate constraints ----- ## reconstruct V Va = x[vv["i1"]["Va"]:vv["iN"]["Va"]] Vm = x[vv["i1"]["Vm"]:vv["iN"]["Vm"]] V = Vm * exp(1j * Va) ## evaluate power flow equations mis = V * conj(Ybus * V) - Sbus ##----- evaluate constraint function values ----- ## first, the equality constraints (power flow) g = r_[ mis.real, ## active power mismatch for all buses mis.imag ] ## reactive power mismatch for all buses ## then, the inequality constraints (branch flow limits) if nl2 > 0: flow_max = (branch[il, RATE_A] / baseMVA)**2 flow_max[flow_max == 0] = Inf if ppopt['OPF_FLOW_LIM'] == 2: ## current magnitude limit, |I| If = Yf * V It = Yt * V h = r_[ If * conj(If) - flow_max, ## branch I limits (from bus) It * conj(It) - flow_max ].real ## branch I limits (to bus) else: ## compute branch power flows ## complex power injected at "from" bus (p.u.) Sf = V[ branch[il, F_BUS].astype(int) ] * conj(Yf * V) ## complex power injected at "to" bus (p.u.) St = V[ branch[il, T_BUS].astype(int) ] * conj(Yt * V) if ppopt['OPF_FLOW_LIM'] == 1: ## active power limit, P (Pan Wei) h = r_[ Sf.real**2 - flow_max, ## branch P limits (from bus) St.real**2 - flow_max ] ## branch P limits (to bus) else: ## apparent power limit, |S| h = r_[ Sf * conj(Sf) - flow_max, ## branch S limits (from bus) St * conj(St) - flow_max ].real ## branch S limits (to bus) else: h = zeros((0,1)) ##----- evaluate partials of constraints ----- ## index ranges iVa = arange(vv["i1"]["Va"], vv["iN"]["Va"]) iVm = arange(vv["i1"]["Vm"], vv["iN"]["Vm"]) iPg = arange(vv["i1"]["Pg"], vv["iN"]["Pg"]) iQg = arange(vv["i1"]["Qg"], vv["iN"]["Qg"]) iVaVmPgQg = r_[iVa, iVm, iPg, iQg].T ## compute partials of injected bus powers dSbus_dVm, dSbus_dVa = dSbus_dV(Ybus, V) ## w.r.t. V ## Pbus w.r.t. Pg, Qbus w.r.t. Qg neg_Cg = sparse((-ones(ng), (gen[:, GEN_BUS], range(ng))), (nb, ng)) ## construct Jacobian of equality constraints (power flow) and transpose it dg = lil_matrix((2 * nb, nxyz)) blank = sparse((nb, ng)) dg[:, iVaVmPgQg] = vstack([ ## P mismatch w.r.t Va, Vm, Pg, Qg hstack([dSbus_dVa.real, dSbus_dVm.real, neg_Cg, blank]), ## Q mismatch w.r.t Va, Vm, Pg, Qg hstack([dSbus_dVa.imag, dSbus_dVm.imag, blank, neg_Cg]) ], "csr") dg = dg.T if nl2 > 0: ## compute partials of Flows w.r.t. V if ppopt['OPF_FLOW_LIM'] == 2: ## current dFf_dVa, dFf_dVm, dFt_dVa, dFt_dVm, Ff, Ft = \ dIbr_dV(branch[il, :], Yf, Yt, V) else: ## power dFf_dVa, dFf_dVm, dFt_dVa, dFt_dVm, Ff, Ft = \ dSbr_dV(branch[il, :], Yf, Yt, V) if ppopt['OPF_FLOW_LIM'] == 1: ## real part of flow (active power) dFf_dVa = dFf_dVa.real dFf_dVm = dFf_dVm.real dFt_dVa = dFt_dVa.real dFt_dVm = dFt_dVm.real Ff = Ff.real Ft = Ft.real ## squared magnitude of flow (of complex power or current, or real power) df_dVa, df_dVm, dt_dVa, dt_dVm = \ dAbr_dV(dFf_dVa, dFf_dVm, dFt_dVa, dFt_dVm, Ff, Ft) ## construct Jacobian of inequality constraints (branch limits) ## and transpose it. dh = lil_matrix((2 * nl2, nxyz)) dh[:, r_[iVa, iVm].T] = vstack([ hstack([df_dVa, df_dVm]), ## "from" flow limit hstack([dt_dVa, dt_dVm]) ## "to" flow limit ], "csr") dh = dh.T else: dh = None return h, g, dh, dg
f90083088e6de9668ed44cdc950aa81bf96e2450
3,645,327
def iou3d_kernel(gt_boxes, pred_boxes): """ Core iou3d computation (with cuda) Args: gt_boxes: [N, 7] (x, y, z, w, l, h, rot) in Lidar coordinates pred_boxes: [M, 7] Returns: iou3d: [N, M] """ intersection_2d = rotate_iou_gpu_eval(gt_boxes[:, [0, 1, 3, 4, 6]], pred_boxes[:, [0, 1, 3, 4, 6]], criterion=2) gt_max_h = gt_boxes[:, [2]] + gt_boxes[:, [5]] * 0.5 gt_min_h = gt_boxes[:, [2]] - gt_boxes[:, [5]] * 0.5 pred_max_h = pred_boxes[:, [2]] + pred_boxes[:, [5]] * 0.5 pred_min_h = pred_boxes[:, [2]] - pred_boxes[:, [5]] * 0.5 max_of_min = np.maximum(gt_min_h, pred_min_h.T) min_of_max = np.minimum(gt_max_h, pred_max_h.T) inter_h = min_of_max - max_of_min inter_h[inter_h <= 0] = 0 #inter_h[intersection_2d <= 0] = 0 intersection_3d = intersection_2d * inter_h gt_vol = gt_boxes[:, [3]] * gt_boxes[:, [4]] * gt_boxes[:, [5]] pred_vol = pred_boxes[:, [3]] * pred_boxes[:, [4]] * pred_boxes[:, [5]] union_3d = gt_vol + pred_vol.T - intersection_3d #eps = 1e-6 #union_3d[union_3d<eps] = eps iou3d = intersection_3d / union_3d return iou3d
368f457b7afe6e5653839d130b6d6b8a6ce1ab7c
3,645,328
def get_final_metrics(raw_metrics, summarized=False): """ Calculates final metrics from all categories. :param summarized: True if the result should contain only final metrics (precision recall, f1 and f0.5) False if the result should contain all the per category metrics too. :param raw_metrics: A dictionary of tp, fp and fn values for each category :return: a dictionary with the precision, recall, f1 and f0.5 metrics, as well as the input metrics data. """ tp = 0 fp = 0 fn = 0 num_values = 0 num_samples = 0 final_metrics = dict() for category in raw_metrics: category_tp = raw_metrics[category]['TP'] category_fp = raw_metrics[category]['FP'] category_fn = raw_metrics[category]['FN'] final_metrics[category] = {} if category_tp > 0: final_metrics[category]['precision'] = category_tp / (category_tp + category_fp) final_metrics[category]['recall'] = category_tp / (category_tp + category_fn) final_metrics[category]['f1'] = f_beta(final_metrics[category]['precision'], final_metrics[category]['recall'], 1 ) if 'num_values' in raw_metrics[category]: final_metrics[category]['num_values'] = raw_metrics[category]['num_values'] if 'num_samples' in raw_metrics[category]: final_metrics[category]['num_samples'] = raw_metrics[category]['num_samples'] tp += category_tp fp += category_fp fn += category_fn num_values += final_metrics[category]['num_values'] num_samples += final_metrics[category]['num_samples'] if (tp + fp) > 0: final_metrics['precision'] = tp / (tp + fp) else: final_metrics['precision'] = np.nan if (tp + fn) > 0: final_metrics['recall'] = tp / (tp + fn) else: final_metrics['recall'] = np.nan final_metrics['f1'] = f_beta(final_metrics['precision'], final_metrics['recall'], 1) final_metrics['f0.5'] = f_beta(final_metrics['precision'], final_metrics['recall'], 0.5) final_metrics['num_values'] = num_values final_metrics['num_samples'] = num_samples if summarized: return summarize_metrics(final_metrics) else: return final_metrics
4782342efe12765a4de7d4eb9ed2b458f7d56686
3,645,329
def get_data_meta_path(either_file_path: str) -> tuple: """get either a meta o rr binary file path and return both as a tuple Arguments: either_file_path {str} -- path of a meta/binary file Returns: [type] -- (binary_path, meta_path) """ file_stripped = '.'.join(either_file_path.split('.')[:-1]) return tuple([file_stripped + ext for ext in ['.bin', '.meta']])
0456186cd99d5899e2433ac9e44ba0424077bcc0
3,645,331
import click def group(name): """ Allow to create a group with a default click context and a class for Click's ``didyoueamn`` without having to repeat it for every group. """ return click.group( name=name, context_settings=CLICK_CONTEXT_SETTINGS, cls=AliasedGroup)
5a36442760cdb86bb89d76bf88c3aa2f3d5dea5b
3,645,332
def get_files(target_files, config): """Retrieve files associated with the potential inputs. """ out = [] find_fn = _find_file(config) for fname in target_files.keys(): remote_fname = find_fn(fname) if remote_fname: out.append(remote_fname) return out
577feb99d15eeec5e22d96dd9fce47a311d60cad
3,645,333
def cmd(func, *args, **kwargs): """Takes a function followed by its arguments""" def command(*a, **ka): return func(*args, **kwargs) return command
9ace378335461080b51dce4936c9a8e0965b3454
3,645,334
def flow_accumulation(receiver_nodes, baselevel_nodes, node_cell_area=1.0, runoff_rate=1.0, boundary_nodes=None): """Calculate drainage area and (steady) discharge. Calculates and returns the drainage area and (steady) discharge at each node, along with a downstream-to-upstream ordered list (array) of node IDs. Examples -------- >>> import numpy as np >>> from landlab.components.flow_accum import flow_accumulation >>> r = np.array([2, 5, 2, 7, 5, 5, 6, 5, 7, 8])-1 >>> b = np.array([4]) >>> a, q, s = flow_accumulation(r, b) >>> a array([ 1., 3., 1., 1., 10., 4., 3., 2., 1., 1.]) >>> q array([ 1., 3., 1., 1., 10., 4., 3., 2., 1., 1.]) >>> s array([4, 1, 0, 2, 5, 6, 3, 8, 7, 9]) """ s = make_ordered_node_array(receiver_nodes, baselevel_nodes) #Note that this ordering of s DOES INCLUDE closed nodes. It really shouldn't! #But as we don't have a copy of the grid accessible here, we'll solve this #problem as part of route_flow_dn. a, q = find_drainage_area_and_discharge(s, receiver_nodes, node_cell_area, runoff_rate, boundary_nodes) return a, q, s
e3a7801ed4639ad8168491c4a1689c37adfe930f
3,645,335
def extract_ids(response_content): """Given a result's content of a research, returns a list of all ids. This method is meant to work with PubMed""" ids = str(response_content).split("<Id>") ids_str = "".join(ids) ids = ids_str.split("</Id>") ids.remove(ids[0]) ids.remove(ids[len(ids) - 1]) for i in range(len(ids)): ids[i] = int(ids[i][2:]) return ids
69ad17a9a6bc3b56a11dceafb802fbf7eb1eac66
3,645,336
def gatorosc(candles: np.ndarray, sequential=False) -> GATOR: """ Gator Oscillator by Bill M. Williams :param candles: np.ndarray :param sequential: bool - default=False :return: float | np.ndarray """ if not sequential and len(candles) > 240: candles = candles[-240:] jaw = shift(smma(candles, period=13, sequential=True), 8) teeth = shift(smma(candles, period=8, sequential=True), 5) lips = shift(smma(candles, period=5, sequential=True), 3) upper = np.abs(jaw - teeth) lower = -np.abs(teeth - lips) upper_change = talib.MOM(upper, timeperiod=1) lower_change = -talib.MOM(lower, timeperiod=1) if sequential: return GATOR(upper, lower, upper_change, lower_change) else: return GATOR(upper[-1], lower[-1], upper_change[-1], lower_change[-1])
2890fa42836ea020ebb54427f7b3c8a773cf13c5
3,645,337
def program_item(prog_hash): """ GET,DELETE /programs/<prog_hash>: query programs :prog_hash: program checksum/identifier :returns: flask response """ if request.method == 'GET': with client.client_access() as c: prog = c.user_programs.get(prog_hash) return respond_json(prog.properties) if prog else respond_error(404) else: raise NotImplementedError
7a27d4083facc02e71e08a9bffda217fadc5a22e
3,645,338
import json import logging def lambda_handler(event, context): """ Federate Token Exchange Lambda Function """ if not "body" in event: return helper.build_response( {"message": "You do not have permission to access this resource."}, 403 ) input_json = dict() input_json = json.loads(event["body"]) # verify the client_id and redirect_uri if not "client_id" in input_json or not "redirect_uri" in input_json: return helper.build_response( {"message": "You do not have permission to access this resource."}, 403 ) response_type = "code" if "response_type" in input_json: response_type = input_json["response_type"] # verify the client_id and redirect_uri if not "client_id" in input_json or not "redirect_uri" in input_json: return helper.build_response( {"message": "You do not have permission to access this resource."}, 403 ) client_id = input_json["client_id"] redirect_uri = input_json["redirect_uri"] _, msg = helper.verify_client_id_and_redirect_uri( user_pool_id=USER_POOL_ID, client_id=client_id, redirect_uri=redirect_uri ) if msg != None: logging.info(msg) return helper.build_response({"message": msg}, 403) federate_account = None platform = input_json["platform"].lower() platform_login_data = dict() platform_login_data["platform"] = platform # register the federate record in the user table if ( "id_token" in input_json or "access_token" in input_json or "platform_code" in input_json ): if "platform_code" in input_json: platform_code = input_json["platform_code"] secret_client = boto3.client("secretsmanager", region_name="ap-southeast-1") if platform == "linkedin": secret = secret_client.get_secret_value(SecretId=LINKEDIN_SECRET_ARN) secret_dict = json.loads(secret["SecretString"]) platform_client_id = secret_dict["client_id"] platform_client_secret = secret_dict["client_secret"] if "platform_redirect_uri" not in input_json: return helper.build_response( { "message": "You do not have permission to access this resource." }, 403, ) platform_redirect_uri = input_json["platform_redirect_uri"] resp, msg = federate.linkedin_code_to_access_token( linkedin_client_id=platform_client_id, linkedin_client_secret=platform_client_secret, linkedin_redirect_uri=platform_redirect_uri, code=platform_code, ) if msg != None: logging.info(msg) return helper.build_response({"message": msg}, 403) platform_login_data["access_token"] = resp["access_token"] elif platform == "facebook": secret = secret_client.get_secret_value(SecretId=FACEBOOK_SECRET_ARN) secret_dict = json.loads(secret["SecretString"]) platform_client_id = secret_dict["client_id"] platform_client_secret = secret_dict["client_secret"] resp, msg = federate.facebook_code_to_access_token( facebook_client_id=platform_client_id, facebook_client_secret=platform_client_secret, code=platform_code, ) if msg != None: logging.info(msg) return helper.build_response({"message": msg}, 403) platform_login_data["access_token"] = resp["access_token"] elif platform == "google": secret = secret_client.get_secret_value(SecretId=GOOGLE_SECRET_ARN) secret_dict = json.loads(secret["SecretString"]) platform_client_id = secret_dict["client_id"] platform_client_secret = secret_dict["client_secret"] resp, msg = federate.google_code_to_access_token( google_client_id=platform_client_id, google_client_secret=platform_client_secret, code=platform_code, ) if msg != None: logging.info(msg) return helper.build_response({"message": msg}, 403) platform_login_data["access_token"] = resp["access_token"] if "id_token" in input_json: platform_login_data["id_token"] = input_json["id_token"] if "access_token" in input_json: platform_login_data["access_token"] = input_json["access_token"] federate_account, msg = federate.verify_federate_and_register_or_get_user( user_table_name=USER_TABLE_NAME, platform_login_data=platform_login_data, mode="get", ) if msg != None: logging.info(msg) return helper.build_response({"message": msg}, 403) token_response = dict() token_response["platform"] = platform if "id_token" in platform_login_data: token_response["platform_id_token"] = platform_login_data["id_token"] if "access_token" in platform_login_data: token_response["platform_access_token"] = platform_login_data["access_token"] if not federate_account is None: # if 3rd party access_token validated correctly, check we generate our own token using CUSTOM_AUTH challenge password = "" resp, msg = helper.initiate_auth( USER_POOL_ID, federate_account["cognito_email"], password, client_id, auth_flow="CUSTOM_AUTH", ) # cognito error message check if msg != None: logger.info(msg) return helper.build_response({"message": msg}, 403) logger.info("CHALLENGE PASSED") if "AuthenticationResult" in resp: formatted_authentication_result = helper.format_authentication_result(resp) if response_type == "code": # get the authorization code auth_code, msg = helper.store_token_to_dynamodb_and_get_auth_code( auth_code_table_name=AUTH_CODE_TABLE_NAME, client_id=client_id, redirect_uri=redirect_uri, token_set=formatted_authentication_result, ) if msg != None: logging.info(msg) return helper.build_response({"message": msg}, 403) # return the authorization code return helper.build_response({"code": auth_code}, 200) elif response_type == "token": token_response["access_token"] = formatted_authentication_result[ "access_token" ] token_response["id_token"] = formatted_authentication_result["id_token"] token_response["refresh_token"] = formatted_authentication_result[ "refresh_token" ] token_response["expires_in"] = formatted_authentication_result[ "expires_in" ] token_response["token_type"] = formatted_authentication_result[ "token_type" ] else: return helper.build_response( {"message": "Unsupported response type."}, 403 ) logger.info(token_response) return helper.build_response(token_response, 200)
16456ebb905cdb2b1782a1017928574e4c90b9cd
3,645,339
from typing import List def find_domain_field(fields: List[str]): """Find and return domain field value.""" field_index = 0 for field in fields: if field == "query:": field_value = fields[field_index + 1] return field_value field_index += 1 return None
fac45f0bd7cead3ad1ec01307c6c623c8d39dbd4
3,645,340
def placeValueOf(num: int, place: int) -> int: """ Get the value on the place specified. :param num: The num :param place: The place. 1 for unit place, 10 for tens place, 100 for hundreds place. :return: The value digit. """ return lastDigitOf(num // place)
8b50ca8a79b267f40b2638b331879746e0bcad7f
3,645,341
def prepare_polygon_coords_for_bokeh(countries): """Prepares the country polygons for plotting with Bokeh. To plot series of polygons, Bokeh needs two lists of lists (one for x coordinates, and another for y coordinates). Each element in the outer list represents a single polygon, and each element in the inner lists represents the coordinate for a single point in given polygon. This function takes a GeoDataFrame with a given set of countries, and returns Bokeh-friendly lists of x coordinates and y coordinates for those countries. PARAMETERS: ----------- countries: GeoDataFrame with a given set of countries. OUTPUTS: -------- x_coords, y_coords: Bokeh-friendly lists of x and y coordinates for those countries. """ # Simplify shapes (to resolution of 10000 meters), convert polygons to multipolygons. list_of_polygons = [] for raw_poly in countries['geometry']: raw_poly = raw_poly.simplify(10000, preserve_topology=False) if isinstance(raw_poly, Polygon): raw_poly = MultiPolygon([raw_poly]) for poly in list(raw_poly): list_of_polygons.append(poly) # Create lists of lists. x_coords = [list(poly.exterior.coords.xy[0]) for poly in list_of_polygons] y_coords = [list(poly.exterior.coords.xy[1]) for poly in list_of_polygons] return x_coords, y_coords
1d325e895cf8efdcaf69ae1ebcb369216e3378de
3,645,342
def get_incident_ids_as_options(incidents): """ Collect the campaign incidents ids form the context and return them as options for MultiSelect field :type incidents: ``list`` :param incidents: the campaign incidents to collect ids from :rtype: ``dict`` :return: dict with the ids as options for MultiSelect field e.g {"hidden": False, "options": ids} """ try: ids = [str(incident['id']) for incident in incidents] ids.sort(key=lambda incident_id: int(incident_id)) ids.insert(0, ALL_OPTION) return {"hidden": False, "options": ids} except KeyError as e: raise DemistoException(NO_ID_IN_CONTEXT) from e
ea44808dfa7b5cb6aa43951062bf3a2401f0c588
3,645,343
from typing import List import glob import csv def get_result(dir_path: str) -> List[float]: """試合のログ(csv)から勝敗データを抽出する Args: file_path (str): 抽出したい試合のログが格納されているパス Returns: List[float]: 勝率データ """ files = glob.glob(dir_path + "*.csv") result = [] for file in files: csv_file = open(file, "r") csv_data = csv.reader(csv_file, delimiter=",", doublequote=True, lineterminator="\r\n", quotechar='"', skipinitialspace=True) win = 0 lose = 0 for data in csv_data: if int(data[1]) >= int(data[2]): win += 1 else: lose += 1 result.append(win/(win+lose)) return result
52f6e1d5e432ec1d56524654cba2ddae9c60426c
3,645,344
def get_local_info(hass): """Get HA's local location config.""" latitude = hass.config.latitude longitude = hass.config.longitude timezone = str(hass.config.time_zone) elevation = hass.config.elevation return latitude, longitude, timezone, elevation
1fdefbad46c7cdb58abdc36f7d8799aa1e4af87c
3,645,347
def if_present_phrase(src_str_tokens, phrase_str_tokens): """ :param src_str_tokens: a list of strings (words) of source text :param phrase_str_tokens: a list of strings (words) of a phrase :return: """ match_pos_idx = -1 for src_start_idx in range(len(src_str_tokens) - len(phrase_str_tokens) + 1): match_flag = True # iterate each word in target, if one word does not match, set match=False and break for seq_idx, seq_w in enumerate(phrase_str_tokens): src_w = src_str_tokens[src_start_idx + seq_idx] if src_w != seq_w: match_flag = False break if match_flag: match_pos_idx = src_start_idx break return match_flag, match_pos_idx
37297c78bb26c7cda28010e1f7567a19e2f875ee
3,645,348
def fit_2D_xanes_non_iter(img_xanes, eng, spectrum_ref, error_thresh=0.1): """ Solve equation of Ax=b, where: Inputs: ---------- A: reference spectrum (2-colume array: xray_energy vs. absorption_spectrum) X: fitted coefficient of each ref spectrum b: experimental 2D XANES data Outputs: ---------- fit_coef: the 'x' in the equation 'Ax=b': fitted coefficient of each ref spectrum cost: cost between fitted spectrum and raw data """ num_ref = len(spectrum_ref) spec_interp = {} comp = {} A = [] s = img_xanes.shape for i in range(num_ref): tmp = interp1d( spectrum_ref[f"ref{i}"][:, 0], spectrum_ref[f"ref{i}"][:, 1], kind="cubic" ) A.append(tmp(eng).reshape(1, len(eng))) spec_interp[f"ref{i}"] = tmp(eng).reshape(1, len(eng)) comp[f"A{i}"] = spec_interp[f"ref{i}"].reshape(len(eng), 1) comp[f"A{i}_t"] = comp[f"A{i}"].T # e.g., spectrum_ref contains: ref1, ref2, ref3 # e.g., comp contains: A1, A2, A3, A1_t, A2_t, A3_t # A1 = ref1.reshape(110, 1) # A1_t = A1.T A = np.squeeze(A).T M = np.zeros([num_ref + 1, num_ref + 1]) for i in range(num_ref): for j in range(num_ref): M[i, j] = np.dot(comp[f"A{i}_t"], comp[f"A{j}"]) M[i, num_ref] = 1 M[num_ref] = np.ones((1, num_ref + 1)) M[num_ref, -1] = 0 # e.g. # M = np.array([[float(np.dot(A1_t, A1)), float(np.dot(A1_t, A2)), float(np.dot(A1_t, A3)), 1.], # [float(np.dot(A2_t, A1)), float(np.dot(A2_t, A2)), float(np.dot(A2_t, A3)), 1.], # [float(np.dot(A3_t, A1)), float(np.dot(A3_t, A2)), float(np.dot(A3_t, A3)), 1.], # [1., 1., 1., 0.]]) M_inv = np.linalg.inv(M) b_tot = img_xanes.reshape(s[0], -1) B = np.ones([num_ref + 1, b_tot.shape[1]]) for i in range(num_ref): B[i] = np.dot(comp[f"A{i}_t"], b_tot) x = np.dot(M_inv, B) x = x[:-1] x[x < 0] = 0 x_sum = np.sum(x, axis=0, keepdims=True) x = x / x_sum cost = np.sum((np.dot(A, x) - b_tot) ** 2, axis=0) / s[0] cost = cost.reshape(s[1], s[2]) x = x.reshape(num_ref, s[1], s[2]) # cost = compute_xanes_fit_cost(img_xanes, x, spec_interp) mask = compute_xanes_fit_mask(cost, error_thresh) mask = mask.reshape(s[1], s[2]) mask_tile = np.tile(mask, (x.shape[0], 1, 1)) x = x * mask_tile cost = cost * mask return x, cost
2146223aae8bf5ac13f658134a09c5682219777d
3,645,349
def get_cmap(n_fg): """Generate a color map for visualizing foreground objects Args: n_fg (int): Number of foreground objects Returns: cmaps (numpy.ndarray): Colormap """ cmap = cm.get_cmap('Set1') cmaps = [] for i in range(n_fg): cmaps.append(np.asarray(cmap(i))[:3]) cmaps = np.vstack(cmaps) return cmaps
010df9e117d724de398eeb919417a71795aad460
3,645,350
def GetBasinOutlines(DataDirectory, basins_fname): """ This function takes in the raster of basins and gets a dict of basin polygons, where the key is the basin key and the value is a shapely polygon of the basin. IMPORTANT: In this case the "basin key" is usually the junction number: this function will use the raster values as keys and in general the basin rasters are output based on junction indices rather than keys Args: DataDirectory (str): the data directory with the basin raster basins_fname (str): the basin raster Returns: list of shapely polygons with the basins Author: FJC """ # read in the basins raster this_fname = basins_fname.split('.') print(basins_fname) OutputShapefile = this_fname[0]+'.shp' # polygonise the raster BasinDict = LSDMap_IO.PolygoniseRaster(DataDirectory, basins_fname, OutputShapefile) return BasinDict
0731451ff765318d63f36950be88dd5c73504bf0
3,645,351
def detect_park(frame, hsv): """ Expects: HSV image of any shape + current frame Returns: TBD """ #hsv = cv2.cvtColor(frame, cfg.COLOUR_CONVERT) # convert to HSV CS # filter mask = cv2.inRange(hsv, lower_green_park, upper_green_park) # operations mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel,iterations=1) mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel,iterations=1) img = cv2.bitwise_and(frame,frame,mask = mask) # logic height, width = mask.shape[:2] contours, _ = cv2.findContours(mask[0:int(height/2), 0:width], cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for cnt in contours: area = cv2.contourArea(cnt) # calculate area of the contour x,y,w,h = cv2.boundingRect(cnt) # create a rectangle around the contour #roi = frame[y:y+h, x:x+w] # select an ROI out of the frame # check if the ROI is in allowed area vr = valid_range(x,y,w,h,frame) if not vr: continue # calculate ratio of sides - anything not square is not worth checking sr = is_squarish(h, w) if not sr: continue # check the area size (too small ignore, too big ignore) if cfg.AREA_SIZE_PARK < area < cfg.MAX_AREA_SIZE: #and ( w / h < 1.0): if cfg.DEMO_MODE: cv2.rectangle(frame, (x,y), (x+w, y+h), (127,255,127), 2) cv2.putText(frame, "PARK", (x,y), cfg.FONT, 2, (127,255,127)) return "park" return None
5cd63590741ac005e7b05090ae77bca6623cf420
3,645,352
def normalize(mx): """Row-normalize sparse matrix""" mx = np.array(mx) rowsum = mx.sum(axis=1) r_inv = np.power(rowsum, -1.0).flatten() #use -1.0 as asym matrix r_inv[np.isinf(r_inv)] = 0. r_mat_inv = np.diag(r_inv) a = np.dot(r_mat_inv, mx) #a = np.dot(a, r_mat_inv) #skip for asym matrix #return a #normalized matrix return mx
6351bc777731eed2119e59ee411d7338e55d2ced
3,645,353
def th_allclose(x, y): """ Determine whether two torch tensors have same values Mimics np.allclose """ return th.sum(th.abs(x-y)) < 1e-5
e788192dede11e9af8bef08b7aff39440e0fe318
3,645,354
import h5py def _check_h5_installed(strict=True): """Aux function.""" try: return h5py except ImportError: if strict is True: raise RuntimeError('For this functionality to work, the h5py ' 'library is required.') else: return False
732300ff4171366c8a3328669068120e21411890
3,645,355
def calc_c_o(row): """ C or O excess if (C/O>1): excess = log10 [(YC/YH) - (YO/YH)] + 12 if C/O<1: excess = log10 [(YO/YH) - (YC/YH)] + 12 where YC = X(C12)/12 + X(C13)/13 YO = X(O16)/16 + X(O17)/17 + X(O18)/18 YH = XH/1.00794 """ yh = row['H'] / 1.00794 yc = row['C12'] / 12. + row['C13'] / 13. yo = row['O16'] / 16. + row['O17'] / 17. + row['O18'] / 18. if row['CO'] > 1: excess = np.log10((yc / yh) - (yo / yh)) + 12. else: excess = np.log10((yo / yh) - (yc / yh)) + 12. return excess
16677f983e17465a509f2b27ec1866d3e56f00da
3,645,356
import json def create_job_from_file(job_file): """Creates a job from a JSON job specification. :param job_file: Path to job file. :type job_file: str :returns: Job object of specified type. """ logger.info("Creating Job from {}.".format(job_file)) with open(job_file) as f: params = json.loads(f.read()) try: if not params['type'] in job_types: raise utils.JobDescriptionValueError('Job type {} is not valid.'.format(params['type'])) except KeyError as e: raise utils.JobDescriptionKeyError(e.message) params['job_file'] = job_file return job_types[params['type']](params)
3e1e2eaa1892dafc310fcb48abd096a59cb9b5a0
3,645,357
def compile_insert_unless_conflict( stmt: irast.InsertStmt, typ: s_objtypes.ObjectType, *, ctx: context.ContextLevel, ) -> irast.OnConflictClause: """Compile an UNLESS CONFLICT clause with no ON This requires synthesizing a conditional based on all the exclusive constraints on the object. """ pointers = _get_exclusive_ptr_constraints(typ, ctx=ctx) obj_constrs = typ.get_constraints(ctx.env.schema).objects(ctx.env.schema) select_ir, always_check, _ = compile_conflict_select( stmt, typ, constrs=pointers, obj_constrs=obj_constrs, parser_context=stmt.context, ctx=ctx) return irast.OnConflictClause( constraint=None, select_ir=select_ir, always_check=always_check, else_ir=None)
feaa0f0ea54ee51d78fe3b95c3ef20e6ea6bb4e2
3,645,358
import io def plot_to_image(figure): """ Converts the matplotlib plot specified by "figure" to a PNG image and returns it. The supplied figure is closed and inaccessible after this call. """ # Save the plot to a PNG in memory buf = io.BytesIO() figure.savefig(buf, format="png") buf.seek(0) # Convert PNG buffer to TF image trans = transforms.ToTensor() image = buf.getvalue() image = Image.open(io.BytesIO(image)) image = trans(image) return image
14b9f223372f05f32fc096a7dafcbce273b33d0d
3,645,359
def sent2vec(model, words): """文本转换成向量 Arguments: model {[type]} -- Doc2Vec 模型 words {[type]} -- 分词后的文本 Returns: [type] -- 向量数组 """ vect_list = [] for w in words: try: vect_list.append(model.wv[w]) except: continue vect_list = np.array(vect_list) vect = vect_list.sum(axis=0) return vect / np.sqrt((vect ** 2).sum())
06569e2bdb13d31b1218ab9a3070affe626fd915
3,645,360
import requests def postXML(server: HikVisionServer, path, xmldata=None): """ This returns the response of the DVR to the following POST request Parameters: server (HikvisionServer): The basic info about the DVR path (str): The ISAPI path that will be executed xmldata (str): This should be formatted using `utils.dict2xml` This is the data that will be transmitted to the server. It is optional. """ headers = {'Content-Type': 'application/xml'} responseRaw = requests.post( server.address() + path, data=xmldata, headers=headers, auth=HTTPDigestAuth(server.user, server.password)) if responseRaw.status_code == 401: raise Exception("Wrong username or password") responseXML = responseRaw.text return responseXML
a5566e03b13b0938e84928dc09b6509e2dfd8a12
3,645,361
import requests def get_government_trading(gov_type: str, ticker: str = "") -> pd.DataFrame: """Returns the most recent transactions by members of government Parameters ---------- gov_type: str Type of government data between: 'congress', 'senate', 'house', 'contracts', 'quarter-contracts' and 'corporate-lobbying' ticker : str Ticker to get congress trading data from Returns ------- pd.DataFrame Most recent transactions by members of U.S. Congress """ if gov_type == "congress": if ticker: url = ( f"https://api.quiverquant.com/beta/historical/congresstrading/{ticker}" ) else: url = "https://api.quiverquant.com/beta/live/congresstrading" elif gov_type.lower() == "senate": if ticker: url = f"https://api.quiverquant.com/beta/historical/senatetrading/{ticker}" else: url = "https://api.quiverquant.com/beta/live/senatetrading" elif gov_type.lower() == "house": if ticker: url = f"https://api.quiverquant.com/beta/historical/housetrading/{ticker}" else: url = "https://api.quiverquant.com/beta/live/housetrading" elif gov_type.lower() == "contracts": if ticker: url = ( f"https://api.quiverquant.com/beta/historical/govcontractsall/{ticker}" ) else: url = "https://api.quiverquant.com/beta/live/govcontractsall" elif gov_type.lower() == "quarter-contracts": if ticker: url = f"https://api.quiverquant.com/beta/historical/govcontracts/{ticker}" else: url = "https://api.quiverquant.com/beta/live/govcontracts" elif gov_type.lower() == "corporate-lobbying": if ticker: url = f"https://api.quiverquant.com/beta/historical/lobbying/{ticker}" else: url = "https://api.quiverquant.com/beta/live/lobbying" else: return pd.DataFrame() headers = { "accept": "application/json", "X-CSRFToken": "TyTJwjuEC7VV7mOqZ622haRaaUr0x0Ng4nrwSRFKQs7vdoBcJlK9qjAS69ghzhFu", # pragma: allowlist secret "Authorization": f"Token {API_QUIVERQUANT_KEY}", } response = requests.get(url, headers=headers) if response.status_code == 200: if gov_type in ["congress", "senate", "house"]: return pd.DataFrame(response.json()).rename( columns={"Date": "TransactionDate", "Senator": "Representative"} ) return pd.DataFrame(response.json()) return pd.DataFrame()
ba3599d22825cd4a3ed3cb71f384561627067b71
3,645,362
def pf_mobility(phi, gamma): """ Phase field mobility function. """ # return gamma * (phi**2-1.)**2 # func = 1.-phi**2 # return 0.75 * gamma * 0.5 * (1. + df.sign(func)) * func return gamma
10045807bdb030c362d700d61789c0a490aad93b
3,645,363
def print_df_stats(df: pd.DataFrame, df_train: pd.DataFrame, df_val: pd.DataFrame, df_test: pd.DataFrame, label_encoder, prediction): """ Print some statistics of the splitted dataset. """ try: labels = list(label_encoder.classes_) except AttributeError: labels = [] headers = ["Images"] for label in labels: headers.append("-> " + str(label)) def get_stats(df): lenghts = [len(df)] for label in range(len(labels)): df_label = df[df[DF_DICT[prediction]] == label] lenghts.append( str(len(df_label)) + " (" + str(round((len(df_label) / len(df)), 2)) + ")" ) return lenghts stats = [] stats.append(["All"] + get_stats(df)) stats.append(["Train"] + get_stats(df_train)) stats.append(["Val"] + get_stats(df_val)) stats.append(["Test"] + get_stats(df_test)) print(tabulate(stats, headers=headers)) print()
bb52799de86b069b4c480fd94c2eaf501617284f
3,645,364
def parse_author_mail(author): """从形如 ``author <author-mail>`` 中分离author与mail""" pat = author_mail_re.search(author) return (pat.group(1), pat.group(2)) if pat else (author, None)
01aacee7202e701ac11177efe71984a7fb1e9a4f
3,645,366
import attr def tag(name, content='', nonclosing=False, **attrs): """ Wraps content in a HTML tag with optional attributes. This function provides a Pythonic interface for writing HTML tags with a few bells and whistles. The basic usage looks like this:: >>> tag('p', 'content', _class="note", _id="note1") '<p class="note" id="note1">content</p>' Any attribute names with any number of leading underscores (e.g., '_class') will have the underscores strpped away. If content is an iterable, the tag will be generated once per each member. >>> tag('span', ['a', 'b', 'c']) '<span>a</span><span>b</span><span>c</span>' It does not sanitize the tag names, though, so it is possible to specify invalid tag names:: >>> tag('not valid') '<not valid></not valid> .. warning:: Please ensure that ``name`` argument does not come from user-specified data, or, if it does, that it is properly sanitized (best way is to use a whitelist of allowed names). Because attributes are specified using keyword arguments, which are then treated as a dictionary, there is no guarantee of attribute order. If attribute order is important, don't use this function. This module contains a few partially applied aliases for this function. These mostly have hard-wired first argument (tag name), and are all uppercase: - ``A`` - alias for ``<a>`` tag - ``BUTTON`` - alias for ``<button>`` tag - ``HIDDEN`` - alias for ``<input>`` tag with ``type="hidden"`` attribute - ``INPUT`` - alias for ``<input>`` tag with ``nonclosing`` set to ``True`` - ``LI`` - alias for ``<li>`` tag - ``OPTION`` - alias for ``<option>`` tag - ``P`` - alias for ``<p>`` tag - ``SELECT`` - alias for ``<select>`` tag - ``SPAN`` - alias for ``<span>`` tag - ``SUBMIT`` - alias for ``<button>`` tag with ``type="submit"`` attribute - ``TEXTAREA`` - alias for ``<textarea>`` tag - ``UL`` - alias for ``<ul>`` tag """ open_tag = '<%s>' % name close_tag = '</%s>' % name attrs = ' '.join([attr(k.lstrip('_'), to_unicode(v)) for k, v in attrs.items()]) if attrs: open_tag = '<%s %s>' % (name, attrs) if nonclosing: content = '' close_tag = '' if not isinstance(content, basestring): try: return ''.join(['%s%s%s' % (open_tag, to_unicode(c), close_tag) for c in content]) except TypeError: pass return '%s%s%s' % (open_tag, to_unicode(content), close_tag)
acf4575a2c95e105ddf4231c74116d4470cf87eb
3,645,367
def label_global_entities(ax, cmesh, edim, color='b', fontsize=10): """ Label mesh topology entities using global ids. """ coors = cmesh.get_centroids(edim) coors = _to2d(coors) dim = cmesh.dim ax = _get_axes(ax, dim) for ii, cc in enumerate(coors): ax.text(*cc.T, s=ii, color=color, fontsize=fontsize) return ax
a3e96c090b6f439bcf5991e2df306f5305758cef
3,645,369
from datetime import datetime def build_filename(): """Build out the filename based on current UTC time.""" now = datetime.datetime.utcnow() fname = now.strftime('rib.%Y%m%d.%H00.bz2') hour = int(now.strftime('%H')) if not hour % 2 == 0: if len(str(hour)) == 1: hour = "0%d" % (hour - 1) else: hour = hour - 1 fname = now.strftime('rib.%Y%m%d.') fname = fname + str(hour) + '00.bz2' return fname
0f68b09410bf1d749bf3492e974be315d2fcaa0d
3,645,370
import torch def sample_sequence(model, length, context=None, temperature=1.0, top_k=10, sample=True, device='cuda', use_constrained_decoding=False, constrained_decoding_threshold=0.3, person_to_category_to_salient_ngram_embed=(), word_embeds=(), tokenizer=None): """ :param model: :param length: :param context: :param temperature: :param top_k: :param sample: :param device: :param use_constrained_decoding: :param constrained_decoding_threshold: :param person_to_category_to_salient_ngram_embed: :param word_embeds: :param tokenizer: :return: """ # Assume batch size of 1. context = torch.tensor(context, device=device, dtype=torch.long).unsqueeze(0) orig_context_length = context.size()[-1] prev = context output = context past = None k_sample_history = torch.tensor([], device=device, dtype=torch.float) sampling_path = [] # List of (timestep, token)s tried. Could be moving forward, alternate, or backward in timestep. backtrack = 0 with torch.no_grad(): while output.size()[-1] < orig_context_length + length: # when using `past`, the context for the next call should be only # the previous token: https://github.com/huggingface/transformers/issues/1749 logits, past = model(prev, past=past) logits = logits[:, -1, :] / temperature logits = top_k_logits(logits, k=top_k) log_probs = F.softmax(logits, dim=-1) prev, output, k_sample_history, backtrack, past = sampling( output, log_probs, k_sample_history, use_constrained_decoding, constrained_decoding_threshold, sample, sampling_path, backtrack, person_to_category_to_salient_ngram_embed, word_embeds, past, tokenizer, device) if prev == tokenizer.eos_token_id: break return output, sampling_path
9d65d5b67163e4794628d5f508517e22bbada02c
3,645,371
def normalize_requires(filename, **kwargs): """Return the contents of filename, with all [Require]s split out and ordered at the top. Preserve any leading whitespace/comments. """ if filename[-2:] != '.v': filename += '.v' kwargs = fill_kwargs(kwargs) lib = lib_of_filename(filename, **kwargs) all_imports = run_recursively_get_imports(lib, **kwargs) v_name = filename_of_lib(lib, ext='.v', **kwargs) contents = get_file(v_name, **kwargs) header, contents = split_leading_comments_and_whitespace(contents) contents = strip_requires(contents) contents = ''.join('Require %s.\n' % i for i in all_imports[:-1]) + '\n' + contents.strip() + '\n' return header + contents
8973207559289308f98e7c3217a4b825eeb22c91
3,645,374
import numpy import random def uniform_dec(num): """ Declination distribution: uniform in sin(dec), which leads to a uniform distribution across all declinations. Parameters ---------- num : int The number of random declinations to produce. """ return (numpy.pi / 2.) - numpy.arccos(2 * random.random_sample(num) - 1)
bc8724e5aa2e65e87f253d271e3130b9379d5cb5
3,645,377
def helicsInputGetBytes(ipt: HelicsInput) -> bytes: """ Get the raw data for the latest value of a subscription. **Parameters** - **`ipt`** - The input to get the data for. **Returns**: Raw string data. """ if HELICS_VERSION == 2: f = loadSym("helicsInputGetRawValue") else: f = loadSym("helicsInputGetBytes") err = helicsErrorInitialize() maxDataLen = helicsInputGetByteCount(ipt) + 1024 data = ffi.new("char[{maxDataLen}]".format(maxDataLen=maxDataLen)) actualSize = ffi.new("int[1]") f(ipt.handle, data, maxDataLen, actualSize, err) if err.error_code != 0: raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode()) else: return ffi.unpack(data, length=actualSize[0])
e7d14623490aa77e800d7f1b10c1313a1f1fbf8f
3,645,379
def named_char_class(char_class, min_count=0): """Return a predefined character class. The result of this function can be passed to :func:`generate_password` as one of the character classes to use in generating a password. :param char_class: Any of the character classes named in :const:`CHARACTER_CLASSES` :param min_count: The minimum number of members of this class to appear in a generated password """ assert char_class in CHARACTER_CLASSES return CharClass(frozenset(_char_class_members[char_class]), min_count)
53f1b580eba6d5ef5ea38bd04606a9fbca2cb864
3,645,380
from typing import Sequence import torch def make_grid(spatial_dim: Sequence[int]) -> torch.Tensor: """Make the grid of coordinates for the Fourier neural operator input. Args: spatial_dim: A sequence of spatial deimensions `(height, width)`. Returns: A torch.Tensor with the grid of coordinates of size `(1, height, width, 2)`. """ grids = [] grids.append(np.linspace(0, 1, spatial_dim[0])) grids.append(np.linspace(0, 1, spatial_dim[1])) grid = np.vstack([u.ravel() for u in np.meshgrid(*grids)]).T grid = grid.reshape(1, spatial_dim[0], spatial_dim[1], 2) grid = grid.astype(np.float32) return torch.tensor(grid)
bf9c858eb068e3f20db8e736883e8b1e74155763
3,645,382
def datedif(ctx, start_date, end_date, unit): """ Calculates the number of days, months, or years between two dates. """ start_date = conversions.to_date(start_date, ctx) end_date = conversions.to_date(end_date, ctx) unit = conversions.to_string(unit, ctx).lower() if start_date > end_date: raise ValueError("Start date cannot be after end date") if unit == 'y': return relativedelta(end_date, start_date).years elif unit == 'm': delta = relativedelta(end_date, start_date) return 12 * delta.years + delta.months elif unit == 'd': return (end_date - start_date).days elif unit == 'md': return relativedelta(end_date, start_date).days elif unit == 'ym': return relativedelta(end_date, start_date).months elif unit == 'yd': return (end_date - start_date.replace(year=end_date.year)).days raise ValueError("Invalid unit value: %s" % unit)
4056af5cbf2f5ff0159a6514e8ee3d09d9f4051d
3,645,386
def tan(data): """Compute elementwise tan of data. Parameters ---------- data : relay.Expr The input data Returns ------- result : relay.Expr The computed result. """ return _make.tan(data)
5c11fa721debd0082514c62f8a8f3afa268ad502
3,645,387
def get_battery_data(battery, user=None, start = None, end = None): """ Returns a DataFrame with battery data for a user. Parameters ---------- battery: DataFrame with battery data user: string, optional start: datetime, optional end: datetime, optional """ assert isinstance(battery, pd.core.frame.DataFrame), "data is not a pandas DataFrame" if(user!= None): assert isinstance(user, str),"user not given in string format" battery_data = battery[(battery['user']==user)] else: battery_data = battery if(start!=None): start = pd.to_datetime(start) else: start = battery_data.iloc[0]['datetime'] if(end!= None): end = pd.to_datetime(end) else: end = battery_data.iloc[len(battery_data)-1]['datetime'] battery_data = battery_data[(battery_data['datetime']>=start) & (battery_data['datetime']<=end)] battery_data['battery_level'] = pd.to_numeric(battery_data['battery_level']) #df['column'].fillna(pd.Timedelta(seconds=0)) #df.dropna() battery_data = battery_data.drop_duplicates(subset=['datetime','user','device'],keep='last') battery_data = battery_data.drop(['user','device','time','datetime'],axis=1) return battery_data
d45e40e89195d099b1c7a02fc033cd665b3b72f6
3,645,388
from typing import List def generate_options_for_resource_group(control_value=None, **kwargs) -> List: """Dynamically generate options for resource group form field based on the user's selection for Environment.""" if control_value is None: return [] # Get the environment env = Environment.objects.get(id=control_value) # Get the Resource Groups as defined on the Environment. The Resource Group is a # CustomField that is only updated on the Env when the user syncs this field on the # Environment specific parameters. resource_groups = env.custom_field_options.filter(field__name="resource_group_arm") return [rg.str_value for rg in resource_groups]
8271d6bf113f18890862835dfd5d0882a7b7490f
3,645,391
def plot_map(fvcom, tide_db_path, threshold=np.inf, legend=False, **kwargs): """ Plot the tide gauges which fall within the model domain (in space and time) defined by the given FileReader object. Parameters ---------- fvcom : PyFVCOM.read.FileReader FVCOM model data as a FileReader object. tide_db_path : str Path to the tidal database. threshold : float, optional Give a threshold distance (in spherical units) beyond which a gauge is considered too far away. legend : bool, optional Set to True to add a legend to the plot. Defaults to False. Any remaining keyword arguments are passed to PyFVCOM.plot.Plotter. Returns ------- plot : PyFVCOM.plot.Plotter The Plotter object instance for the map """ tide_db = TideDB(tide_db_path) gauge_names, gauge_locations = tide_db.get_gauge_locations(long_names=True) gauges_in_domain = [] fvcom_nodes = [] for gi, gauge in enumerate(gauge_locations): river_index = fvcom.closest_node(gauge, threshold=threshold) if river_index: gauge_id, gauge_dist = tide_db.get_nearest_gauge_id(*gauge) times, data = tide_db.get_tidal_series(gauge_id, np.min(fvcom.time.datetime), np.max(fvcom.time.datetime)) if not np.any(data): continue gauges_in_domain.append(gi) fvcom_nodes.append(river_index) plot = Plotter(fvcom, **kwargs) fx, fy = plot.m(fvcom.grid.lon, fvcom.grid.lat) plot.plot_field(-fvcom.grid.h) plot.axes.plot(fx[fvcom_nodes], fy[fvcom_nodes], 'ro', markersize=3, zorder=202, label='Model') # Add the gauge locations. rx, ry = plot.m(gauge_locations[:, 0], gauge_locations[:, 1]) plot.axes.plot(rx, ry, 'wo', label='Gauges') for xx, yy, name in zip(rx, ry, gauge_names[gauges_in_domain]): plot.axes.text(xx, yy, name, fontsize=10, rotation=45, rotation_mode='anchor', zorder=203) if legend: plot.axes.legend(numpoints=1, scatterpoints=1, ncol=2, loc='upper center', fontsize=10) return plot
c73069c67ecda4429c86b6f887cc5fd5a109b10b
3,645,392
from operator import or_ def get_element_block( xml_string: str, first_name: str, second_name: str = None, include_initial: bool = True, include_final: bool = True ) -> str: """ warning: use great caution if attempting to apply this function, or anything like it, to tags that that may appear more than once in the label. this _general type of_ approach to XML parsing works reliably only in the special case where tag names (or sequences of tag names, etc.) are unique (or their number of occurrences are otherwise precisely known) """ if second_name is None: element_names = [first_name] else: element_names = [first_name, second_name] split = tuple(split_at( xml_string.splitlines(), are_in(element_names, or_), keep_separator=True )) chunk = split[2] if include_initial: chunk = split[1] + chunk if include_final: chunk = chunk + split[3] return "\n".join(chunk)
426142b5f1e96dc038640305eb918d065c9bdf20
3,645,393
def eval_eu_loss(ambiguity_values, dfs_ambiguity): """Calculate the expected utility loss that results from a setting that incorporates different levels of ambiguity. Args: ambiguity_values (dict): Dictionary with various levels of ambiguity to be implemented (key = name of scenario). dfs_ambiguity (list): List of pd.DataFrame objects that containt the of simulated models. Returns: df_EU (pd.DataFrame): Dataframe that summarizes that expected utility loss under the various ambiguity scenarios. """ EU, EU_Loss = {}, {} ambiguity_labels = get_dict_labels(ambiguity_values) # KW94 specific index_value_func = [ "Value_Function_A", "Value_Function_B", "Value_Function_Edu", "Value_Function_Home", ] # Calculate the Expected Utility and EU loss for each ambiguity value # Expected utility = value function at the initial period for df, ambiguity_label in zip(dfs_ambiguity, ambiguity_labels): EU[ambiguity_label] = [] EU_Loss[ambiguity_label] = [] # Retrieve the last identifier within looped dataframe for i in range(0, df.index[-1][0] + 1): EU[ambiguity_label].append(df[index_value_func].loc[(i, 0)].max()) EU[ambiguity_label] = np.mean(EU[ambiguity_label]) EU_Loss[ambiguity_label] = np.abs( (EU[ambiguity_label] - EU["absent"]) / EU["absent"] ) # Assemble data frames df_EU = pd.DataFrame.from_dict(EU, orient="index", columns=["EU"]) df_EU["EU_Loss"] = pd.Series(EU_Loss) return df_EU
00b658640b91de4dd48e99eac6437bebafb8e9b1
3,645,394
def reset(ip: str = None, username: str = None) -> int: """ Reset records that match IP or username, and return the count of removed attempts. This utility method is meant to be used from the CLI or via Python API. """ attempts = AccessAttempt.objects.all() if ip: attempts = attempts.filter(ip_address=ip) if username: attempts = attempts.filter(username=username) count, _ = attempts.delete() log.info('AXES: Reset %s access attempts from database.', count) return count
3e404ef4b32cc0e183e676e7d07137780beaf3f7
3,645,395
def try_patch_column(meta_column: MetaColumn) -> bool: """Try to patch the meta column from request.json. Generator assignment must be checked for errors. Disallow column type change when a generator is assigned and when the column is imported. An error is raised in that case. """ if 'col_type' in request.json and request.json['col_type'] != meta_column.col_type: if meta_column.reflected_column_idf is not None: raise ColumnError('cannot change the type of an imported column', meta_column) if meta_column.generator_setting is not None: raise ColumnError('cannot change the type of a column with an assigned generator', meta_column) patch_all_from_json(meta_column, ['name', 'col_type', 'nullable']) generator_setting_id = request.json.get('generator_setting_id') if generator_setting_id is not None: facade = inject(GeneratorFacade) return facade.update_column_generator(meta_column, generator_setting_id) return True
0feb5598853b8a5b1cd060bd806f2fcc6afd69f6
3,645,396
def readout(x, mask, aggr='add'): """ Args: x: (B, N_max, F) mask: (B, N_max) Returns: (B, F) """ return aggregate(x=x, dim=1, aggr=aggr, mask=mask, keepdim=False)
74253ad0e7a9d23bd8c3d69097e8c1b8508c8b2f
3,645,398
def axisAligned(angle, tol=None, axis=None): """ Determine if a line (represented by its angle) is aligned with an axis. Parameters ---------- angle : float The line's angle of inclination (in radians) tol : float Maximum distance from `axis` for which `angle` is still considered to be aligned. axis : {'horizontal', 'vertical'} The reference axis. Returns ------- is_aligned : bool True if `angle` is within `tol` radians of `axis`. """ if axis == 'horizontal': target_angle = 1.57 # about pi / 2 elif axis == 'vertical': target_angle = 0.0 distance = abs(target_angle - abs(angle)) is_aligned = distance < tol return is_aligned
9198f1d1e8b3755696f5ccf01b9df112d18bd363
3,645,401
def plot_1d(x_test, mean, var): """ Description ---------- Function to plot one dimensional gaussian process regressor mean and variance. Parameters ---------- x_test: array_like Array containing one dimensional inputs of the gaussian process model. Mean: array_like An array with the values of the mean function of the guassian process. Var: array_like The variance around the values of the mean function of the gaussian process. Returns ---------- Matplotlib plot of mean function and variance of the gaussian process model. """ x_test = exactly_1d(x_test) mean = exactly_1d(mean) var = exactly_1d(var) plt.fill_between(x_test, mean-.674*np.sqrt(var), mean+.674*np.sqrt(var), color='k', alpha=.4, label='50% Credible Interval') plt.fill_between(x_test, mean-1.150*np.sqrt(var), mean+1.150*np.sqrt(var), color='k', alpha=.3, label='75% Credible Interval') plt.fill_between(x_test, mean-1.96*np.sqrt(var), mean+1.96*np.sqrt(var), color='k', alpha=.2, label='95% Credible Interval') plt.fill_between(x_test, mean-2.326*np.sqrt(var), mean+2.326*np.sqrt(var), color='k', alpha=.1, label='99% Credible Interval') plt.plot(x_test, mean, c='w') return None
f53ca71b2546d6c849cdcb52c16ec77125a4c0a6
3,645,403
def sentence_to_windows(sentence, min_window, max_window): """ Create window size chunks from a sentence, always starting with a word """ windows = [] words = sentence.split(" ") curr_window = "" for idx, word in enumerate(words): curr_window += (" " + word) curr_window = curr_window.lstrip() next_word_len = len(words[idx+1]) + 1 if idx+1 < len(words) else 0 if len(curr_window) + next_word_len > max_window: curr_window = clean_sentence(curr_window) if validate_sentence(curr_window, min_window): windows.append(curr_window.strip()) curr_window = "" if len(curr_window) >= min_window: windows.append(curr_window) return windows
867240f310c9e7bc3f887a2592485a02ab646870
3,645,404
def get_master_name(els): """Function: get_master_name Description: Return name of the master node in a Elasticsearch cluster. Arguments: (input) els -> ElasticSearch instance. (output) Name of master node in ElasticSearch cluster. """ return els.cat.master().strip().split(" ")[-1]
0371dac1fdf0fd6b906646e1882e9089d9dfa12c
3,645,405
from typing import Sequence import random def flop_turn_river(dead: Sequence[str]) -> Sequence[str]: """ Get flop turn and river cards. Args: dead: Dead cards. Returns: 5 cards. """ dead_concat = "".join(dead) deck = [card for card in DECK if card not in dead_concat] return random.sample(deck, 5)
cea8289a5deb03dd74a9b20b99899d908e3f38e3
3,645,406
def smith_gassmann(kstar, k0, kfl2, phi): """ Applies the Gassmann equation. Returns Ksat2. """ a = (1 - kstar/k0)**2.0 b = phi/kfl2 + (1-phi)/k0 - (kstar/k0**2.0) ksat2 = kstar + (a/b) return ksat2
ae413d7ed55862927e5f8d06d4aff5bfc0e91167
3,645,407
import json async def _preflight_cors(request): """Respond to preflight CORS requests and load parameters.""" if request.method == "OPTIONS": return textify("ok", headers=generate_cors_headers(request)) request['args'] = {} if request.form: for key in request.form: key_lower = key.lower() if key_lower in _MUST_BE_GET_PARAM: raise UserException(CANNOT_BE_POST_PARAM % key) request['args'][key_lower] = request.form[key][0] elif request.json: for key in request.json: key_lower = key.lower() if key_lower in _MUST_BE_GET_PARAM: raise UserException(CANNOT_BE_POST_PARAM % key) # Make all url parameters strings if isinstance(request.json[key], list): request['args'][key_lower] = json.dumps(request.json[key]) else: request['args'][key_lower] = str(request.json[key]) # Take all Get parameters for key, value in list(request.raw_args.items()): key_lower = key.lower() if key_lower in _MUST_BE_POST_PARAM: raise UserException(CANNOT_BE_GET_PARAM % key) request['args'][key_lower] = value
91f6057fc4d624d576b7a8ae45cd202264fde7c1
3,645,408
def login_teacher(): """ Login User and redirect to index page. """ # forget any user session.clear() # if user reached via route POST if request.method == "POST": # check user credentials email_id = request.form.get("email_id") passw = request.form.get("password") result = db.execute("SELECT * FROM registrants WHERE email_id = :email", email = email_id) if len(result) != 1 or not pwd_context.verify(passw, result[0]['hash']): return "INVALID USERNAME/PASSWORD" else: folder_id = db.execute('SELECT folder_id FROM shared_folder WHERE user_id = :user_id', user_id = result[0]['id']) print(folder_id) session["user_id"] = result[0]["id"] session['folder_id'] = folder_id[0]['folder_id'] return redirect(url_for('index')) else: return render_template('login.html')
04982b664b18c3c10d1d5dadabe101de97f4383d
3,645,409
import base64 def mult_to_bytes(obj: object) -> bytes: """Convert given {array of bits, bytes, int, str, b64} to bytes""" if isinstance(obj, list): i = int("".join(["{:01b}".format(x) for x in obj]), 2) res = i.to_bytes(bytes_needed(i), byteorder="big") elif isinstance(obj, int): res = obj.to_bytes(bytes_needed(obj), "big") elif isBase64(obj): res = base64.b64decode(obj) elif isinstance(obj, bytes): res = obj elif isinstance(obj, str): alphabet = max([int(c) for c in obj]) + 1 res = int(obj, alphabet) return mult_to_bytes(res) else: res = bytes(obj) return res
7e86caf56f8187215c6ecbea63b259e627dde0ad
3,645,411
import six def get_barrier(loopy_opts, local_memory=True, **loopy_kwds): """ Returns the correct barrier type depending on the vectorization type / presence of atomics Parameters ---------- loopy_opts: :class:`loopy_utils.loopy_opts` The loopy options used to create this kernel. local_memory: bool [True] If true, this barrier will be used for memory in the "local" address spaces. Only applicable to OpenCL loopy_kwds: dict Any other loopy keywords to put in the instruction options Returns ------- barrier: str The built barrier instruction """ mem_kind = '' barrier_kind = 'nop' if use_atomics(loopy_opts): mem_kind = 'local' if local_memory else 'global' barrier_kind = 'lbarrier' loopy_kwds['mem_kind'] = mem_kind return '...' + barrier_kind + '{' + ', '.join([ '{}={}'.format(k, v) for k, v in six.iteritems(loopy_kwds)]) + '}'
6f45099827f93ebe41e399b6c75aa7a1b85779fb
3,645,412
def monthly_rain(year, from_month, x_months, bound): """ This function downloaded the data embedded tif files from the SILO Longpaddock Dataset and creates a cumulative annual total by stacking the xarrays. This function is embedded in the get_rainfall function or can be used separately Parameters ---------- input : year (integer) value of the year for the data to be pulled month (integer) value of the first month for the data to be pulled x_months (integer) number of months to be pulled bound (shapefile) area of interest for the final calculated tif to be clipped to Returns ------ output : rioxarray item representing each of the months pulled and summed up for the months selected """ #create month string as pandas frame mon_string = pd.DataFrame({'mon': ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12']}) #assign year column mon_string['year'] = str(year) #assign yearmon column mon_string['yearmon'] = mon_string['year'] + mon_string['mon'] #filter to first x months mon_select = mon_string[from_month-1:x_months] #set base url base = 'https://s3-ap-southeast-2.amazonaws.com/silo-open-data/monthly/monthly_rain' rain_stack = [] #loop to download tifs, reporoject, stack, sum and clip for index, i in mon_select.iterrows(): call = base + '/' + i['year'] + '/' + i['yearmon'] + '.monthly_rain.tif' month_rain = rxr.open_rasterio(call, masked = True).squeeze() rain_stack.append(month_rain) bound_crs = bound.to_crs(rain_stack[1].rio.crs) stacked_rain = sum(rain_stack).rio.clip(bound_crs.geometry) return stacked_rain
951ac32a8afcc5b0fd6f0c1b6616f3cc4d162540
3,645,413
def organize_by_chromosome(genes, transcripts): """ Iterate through genes and transcripts and group them by chromosome """ gene_dict = {} transcript_dict = {} for ID in genes: gene = genes[ID] chromosome = gene.chromosome if chromosome not in gene_dict: chrom_genes = {} chrom_genes[ID] = gene gene_dict[chromosome] = chrom_genes gene_dict[chromosome][ID] = gene for ID in transcripts: transcript = transcripts[ID] chromosome = transcript.chromosome if chromosome not in transcript_dict: chrom_transcripts = {} chrom_transcripts[ID] = transcript transcript_dict[chromosome] = chrom_transcripts transcript_dict[chromosome][ID] = transcript transcript_dict[chromosome][ID] = transcript return gene_dict, transcript_dict
2f55d29a75f5c28fbf3c79882b8b2ac18590cdb2
3,645,414
def test_show_chromosome_labels(dash_threaded): """Test the display/hiding of chromosomes labels.""" prop_type = 'bool' def assert_callback(prop_value, nclicks, input_value): answer = '' if nclicks is not None: answer = FAIL if PROP_TYPES[prop_type](input_value) == prop_value: answer = PASS return answer template_test_component( dash_threaded, APP_NAME, assert_callback, ideogram_test_props_callback, 'showChromosomeLabels', 'True', prop_type=prop_type, component_base=COMPONENT_REACT_BASE, **BASIC_PROPS ) driver = dash_threaded.driver # assert the absence of chromosomes' labels labels = driver.find_elements_by_class_name('chrLabel') assert len(labels) == 0 # trigger a change of the component prop btn = wait_for_element_by_css_selector(driver, '#test-{}-btn'.format(APP_NAME)) btn.click() # assert the presence of chromosomes' labels labels = wait_for_elements_by_css_selector(driver, '.chrLabel') assert len(labels) > 0
da3003e54c681b689703f7226b3a5f7a13756944
3,645,416
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry): """Unload a config entry.""" name = entry.data.get(CONF_NAME) ha = get_ha(hass, name) if ha is not None: await ha.async_remove() clear_ha(hass, name) return True
1783c518e919eb60b2a40603322aa2a04dbc4000
3,645,417
def calc_fn(grid, size, coefficients=(-0.005, 10)): """ Apply the FitzHugh-Nagumo equations to a given grid""" a, b, *_ = coefficients out = np.zeros(size) out[0] = grid[0] - grid[0] ** 3 - grid[1] + a out[1] = b * (grid[0] - grid[1]) return out
47a46f75a56ffb3d034a689034fa04f7593c485f
3,645,419