content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from typing import Optional def build_cluster_endpoint( domain_key: DomainKey, custom_endpoint: Optional[CustomEndpoint] = None, engine_type: EngineType = EngineType.OpenSearch, preferred_port: Optional[int] = None, ) -> str: """ Builds the cluster endpoint from and optional custom_endpoint and the localstack opensearch config. Example values: - my-domain.us-east-1.opensearch.localhost.localstack.cloud:4566 (endpoint strategy = domain (default)) - localhost:4566/us-east-1/my-domain (endpoint strategy = path) - localhost:[port-from-range] (endpoint strategy = port (or deprecated 'off')) - my.domain:443/foo (arbitrary endpoints (technically not allowed by AWS, but there are no rules in localstack)) If preferred_port is not None, it is tried to reserve the given port. If the port is already bound, another port will be used. """ # If we have a CustomEndpoint, we directly take its endpoint. if custom_endpoint and custom_endpoint.enabled: return custom_endpoint.endpoint # different endpoints based on engine type engine_domain = "opensearch" if engine_type == EngineType.OpenSearch else "es" # Otherwise, the endpoint is either routed through the edge proxy via a sub-path (localhost:4566/opensearch/...) if config.OPENSEARCH_ENDPOINT_STRATEGY == "port": if preferred_port is not None: try: # if the preferred port is given, we explicitly try to reserve it assigned_port = external_service_ports.reserve_port(preferred_port) except PortNotAvailableException: LOG.warning( f"Preferred port {preferred_port} is not available, trying to reserve another port." ) assigned_port = external_service_ports.reserve_port() else: assigned_port = external_service_ports.reserve_port() return f"{config.LOCALSTACK_HOSTNAME}:{assigned_port}" if config.OPENSEARCH_ENDPOINT_STRATEGY == "path": return f"{config.LOCALSTACK_HOSTNAME}:{config.EDGE_PORT}/{engine_domain}/{domain_key.region}/{domain_key.domain_name}" # or through a subdomain (domain-name.region.opensearch.localhost.localstack.cloud) return f"{domain_key.domain_name}.{domain_key.region}.{engine_domain}.{LOCALHOST_HOSTNAME}:{config.EDGE_PORT}"
82136d78ea6edf68fc5c8bf92653be033649bdfd
3,636,808
import requests import re def get_community_pools(): """Get community pool coins Returns: List[dict]: A list of dicts which consists of following keys: denom, amount """ url = f"{BLUZELLE_PRIVATE_TESTNET_URL}:{BLUZELLE_API_PORT}/cosmos/distribution/v1beta1/community_pool" result = requests.get(url) if result.status_code != 200: returnReqError(url, result) return None pools = result.json()["pool"] pool_list = [] for pool in pools: denom = BLZ_SYMBOL if pool["denom"] == BLZ_DENOM else pool["denom"] amount_partition = str(float(pool["amount"]) / BLZ_UBNT_RATIO).partition(".") amount_seperated = re.sub(r"(?<!^)(?=(\d{3})+$)", r",", amount_partition[0]) pool_list.append( { "denom": denom, "amount": f"{amount_seperated}{amount_partition[1]}{amount_partition[2]}", } ) return pool_list
c4a78e0a953933f453f7684e66aff2b39572f593
3,636,809
def rgb2bgr(x): """ given an array representation of an RGB image, change the image into an BGR representtaion of the image """ return(bgr2rgb(x))
c9412018e6595513c29da54f8179ff2a7c953d07
3,636,810
from datetime import datetime def draw_des1_plot(date, plot_A, plot_B): """ This function is to draw the plot of DES 1. """ #make up some data for the plot df = pd.DataFrame({'date': np.array([datetime.datetime(2020, 1, i+1) for i in range(12)]), 'Worldwide': [3, 4, 4, 7, 8, 9, 14, 17, 12, 8, 8, 13], 'Malaysia': [1, 1, 2, 3, 3, 3, 4, 3, 2, 3, 4, 7]}) plt.xkcd() # comic style function fig = plt.figure(figsize=(9, 6), dpi=35) # define the size of the figure fig.suptitle('Monthly new cases') # title of the chart ax = fig.add_subplot(111) # plot function to create 2 time series plots in a chart ax.plot(df[date], df[plot_A], label=plot_A, linewidth=3) ax.plot(df[date], df[plot_B], color='red', label=plot_B, linewidth=3) # legend for 2 times series plots ax.legend() ax.set_xlabel('Date') # define x axis label ax.set_ylabel('Cases per million people') # define y axis label return fig
811f4d844bafe3d35287b774f61c9337a3163d47
3,636,811
def kolmogorov_smirnov_rank_test(gene_set, gene_list, adj_corr, plot=False): """ Rank test used in GSEA method. It measures dispersion of genes from gene_set over a gene_list. Every gene from gene_list has its weight specified by adj_corr, where adj_corr are gene weights (correlation with fenotype) already raised to the power of parameter p, changing weights importance. Plot define if method should return list of ES for each position in ranking, if plot=False (default) second returned object is None. Reference: http://www.pnas.org/content/102/43/15545.full """ cval = 0 Dn = 0 Nr = 0 N = len(gene_list) Nh = 0 for i in range(N): if gene_list[i] in gene_set: Nr += adj_corr[i] Nh += 1 if N == Nh: miss_pen = 1. else: miss_pen = float(1) / (N - Nh) stat_plot = N * [None] if plot: stat_plot = N * [None] else: stat_plot = None for i in range(N): if gene_list[i] in gene_set: cval += adj_corr[i] / Nr else: cval -= miss_pen if plot: stat_plot[i] = cval if abs(cval) > abs(Dn): Dn = cval return (Dn, stat_plot)
6c38a40d18465729e544694bd4c61547b98076ea
3,636,812
import re async def segment_url(request: schemas.UrlSegmentationRequest) -> schemas.SegmentationResponse: """ This endpoint accept the URL of an image, and returns a SegmentationResponse. The endpoint will try to download the image at the given URL. Note: not all servers allow for non-browser user agents to download images. """ try: assert re.match(config.URL_REGEX, request.image_url) image = utils.download_image(request.image_url) segments = pipeline.segment_image(image) return schemas.SegmentationResponse(status_code=0, error_message="", segment_count=len(segments), segments=segments) except Exception as e: return error_response(e)
9cdec9a06946629d35a0dc882e27a9a218075d32
3,636,813
def generate_answers(session, model, word2id, qn_uuid_data, context_token_data, qn_token_data): """ Given a model, and a set of (context, question) pairs, each with a unique ID, use the model to generate an answer for each pair, and return a dictionary mapping each unique ID to the generated answer. Inputs: session: TensorFlow session model: QAModel word2id: dictionary mapping word (string) to word id (int) qn_uuid_data, context_token_data, qn_token_data: lists Outputs: uuid2ans: dictionary mapping uuid (string) to predicted answer (string; detokenized) """ uuid2ans = {} # maps uuid to string containing predicted answer data_size = len(qn_uuid_data) num_batches = ((data_size-1) / model.FLAGS.batch_size) + 1 batch_num = 0 detokenizer = MosesDetokenizer() print "Generating answers..." for batch in get_batch_generator(word2id, qn_uuid_data, context_token_data, qn_token_data, model.FLAGS.batch_size, model.FLAGS.context_len, model.FLAGS.question_len, model.FLAGS.num_feats, model.FLAGS.word_len, model.mcids_dict): # Get the predicted spans pred_start_batch, pred_end_batch = model.get_start_end_pos(session, batch, model.FLAGS.max_span) # Convert pred_start_batch and pred_end_batch to lists length batch_size pred_start_batch = pred_start_batch.tolist() pred_end_batch = pred_end_batch.tolist() # For each example in the batch: for ex_idx, (pred_start, pred_end) in enumerate(zip(pred_start_batch, pred_end_batch)): # Original context tokens (no UNKs or padding) for this example context_tokens = batch.context_tokens[ex_idx] # list of strings # Check the predicted span is in range assert pred_start in range(len(context_tokens)) assert pred_end in range(len(context_tokens)) # Predicted answer tokens pred_ans_tokens = context_tokens[pred_start : pred_end +1] # list of strings # Detokenize and add to dict uuid = batch.uuids[ex_idx] uuid2ans[uuid] = detokenizer.detokenize(pred_ans_tokens, return_str=True) batch_num += 1 if batch_num % 10 == 0: print "Generated answers for %i/%i batches = %.2f%%" % (batch_num, num_batches, batch_num*100.0/num_batches) print "Finished generating answers for dataset." return uuid2ans
253e2ef5a4d03d6eccf418aae23373cd17e0c143
3,636,814
import cmath def _add_agline_to_dict(geo, line, d={}, idx=0, mesh_size=1e-2, n_elements=0, bc=None): """Draw a new Air Gap line and add it to GMSH dictionary if it does not exist Parameters ---------- geo : Model GMSH Model objet line : Object Line Object d : Dictionary GMSH dictionary idx : int Surface index it belongs to mesh_size : float Points mesh size n_elements : int Number of elements on the line for meshing control Returns ------- None """ # TO-DO: Allow repeated points for the rotor and stator sliding bands dlines = list() ltag = None btag, bx, by = _find_point_tag(d, line.get_begin()) etag, ex, ey = _find_point_tag(d, line.get_end()) if btag is None: btag = geo.addPoint(bx, by, 0, meshSize=mesh_size, tag=-1) else: dlines.extend(_find_lines_from_point(d, btag)) if etag is None: etag = geo.addPoint(ex, ey, 0, meshSize=mesh_size, tag=-1) else: dlines.extend(_find_lines_from_point(d, etag)) if isinstance(line, Arc): ctag, cx, cy = _find_point_tag(d, line.get_center()) if ctag is None: ctag = geo.addPoint(cx, cy, 0, meshSize=mesh_size, tag=-1) else: dlines.extend(_find_lines_from_point(d, ctag)) if len(dlines) > 0: for iline in dlines: p = _find_points_from_line(d, iline) if p[0] == btag and p[1] == etag and p[2] == ctag: ltag = iline break elif p[0] == etag and p[1] == btag and p[2] == ctag: ltag = -iline break else: pass if ltag is None: ltag = geo.addCircleArc(btag, ctag, etag, tag=-1) if n_elements > 0: geo.mesh.setTransfiniteCurve(ltag, n_elements + 1, "Progression") else: ltag = geo.addCircleArc(btag, ctag, etag, tag=-1) if n_elements > 0: geo.mesh.setTransfiniteCurve(ltag, n_elements + 1, "Progression") # To avoid fill the dictionary with repeated lines repeated = False for lvalues in d[idx].values(): if type(lvalues) is not dict: continue else: if lvalues["tag"] == ltag: repeated = True if not repeated: nline = len(d[idx]) - 2 arc_angle = cmath.phase(complex(ex, ey)) - cmath.phase(complex(bx, by)) d[idx].update( { nline: { "tag": ltag, "n_elements": n_elements, "bc_name": bc, "begin": {"tag": btag, "coord": complex(bx, by)}, "end": {"tag": etag, "coord": complex(ex, ey)}, "cent": {"tag": ctag, "coord": complex(cx, cy)}, "arc_angle": arc_angle, "line_angle": None, } } ) else: if len(dlines) > 0: for iline in dlines: p = _find_points_from_line(d, iline) if p[0] == btag and p[1] == etag: ltag = iline break elif p[0] == etag and p[1] == btag: ltag = -iline break else: pass if ltag is None: ltag = geo.addLine(btag, etag, tag=-1) if n_elements > 0: geo.mesh.setTransfiniteCurve(ltag, n_elements + 1, "Progression") else: ltag = geo.addLine(btag, etag, tag=-1) if n_elements > 0: geo.mesh.setTransfiniteCurve(ltag, n_elements + 1, "Progression") # To avoid fill the dictionary with repeated lines repeated = False for lvalues in d[idx].values(): if type(lvalues) is not dict: continue else: if lvalues["tag"] == ltag: repeated = True if not repeated: nline = len(d[idx]) - 2 line_angle = 0.5 * ( cmath.phase(complex(ex, ey)) + cmath.phase(complex(bx, by)) ) d[idx].update( { nline: { "tag": ltag, "n_elements": n_elements, "bc_name": bc, "begin": {"tag": btag, "coord": complex(bx, by)}, "end": {"tag": etag, "coord": complex(ex, ey)}, "arc_angle": None, "line_angle": line_angle, } } ) return None
07f14b04cfb9f72d8150cfd39332ba2979cd3ae4
3,636,815
def mat_toeplitz_2d(h, x): """ Constructs a Toeplitz matrix for 2D convolutions Parameters ---------- h: list[list] A matrix of scalar values representing the filter x: list[list] A matrix of scalar values representing the signal Returns ------- list[list] A doubly block Toeplitz matrix T such that y = T(h) * x """ # Calculate the dimensions of the arrays Nh, Mh = mat_dim(h) Nx, Mx = mat_dim(x) Ny, My = Nh + Nx - 1, Mh + Mx - 1 # Pad the filter, if needed padn, padm = Ny - Nh, My - Mh # Dimensions of a Toeplitz matrix Trows, Tcols = My, Mx # Dimension of the block Toeplitz matrix (BTM) BTrows, BTcols = Ny, Nx # Dimension of the doubly block Toeplitz matrix (DBTM) DTrows, DTcols = BTrows * Trows, BTcols * Tcols # Create the Toeplitz matrices Tlist = [] for row in reversed(h): t = mat_toeplitz_1d(row, x[0]) Tlist.append(t) # Padding the blocks, if needed Tlist += [None] * padn # Construct the DBTM DBTM = mat_new(DTrows, DTcols) for col in range(BTcols): for row in range(BTrows): i = row - col offset = (row * Trows, col * Tcols) block = Tlist[i] if block: mat_submat_copy(DBTM, block, offset) return DBTM
10ca8c25eb421aa34c0caf67a63b621a93de6d32
3,636,817
import unicodedata def fix_text_segment( text, *, fix_entities='auto', remove_terminal_escapes=True, fix_encoding=True, fix_latin_ligatures=True, fix_character_width=True, uncurl_quotes=True, fix_line_breaks=True, fix_surrogates=True, remove_control_chars=True, remove_bom=True, normalization='NFC' ): """ Apply fixes to text in a single chunk. This could be a line of text within a larger run of `fix_text`, or it could be a larger amount of text that you are certain is in a consistent encoding. See `fix_text` for a description of the parameters. """ if isinstance(text, bytes): raise UnicodeError(fixes.BYTES_ERROR_TEXT) if fix_entities == 'auto' and '<' in text and '>' in text: fix_entities = False while True: origtext = text if remove_terminal_escapes: text = fixes.remove_terminal_escapes(text) if fix_encoding: text = fixes.fix_encoding(text) if fix_entities: text = fixes.unescape_html(text) if fix_latin_ligatures: text = fixes.fix_latin_ligatures(text) if fix_character_width: text = fixes.fix_character_width(text) if uncurl_quotes: text = fixes.uncurl_quotes(text) if fix_line_breaks: text = fixes.fix_line_breaks(text) if fix_surrogates: text = fixes.fix_surrogates(text) if remove_control_chars: text = fixes.remove_control_chars(text) if remove_bom and not remove_control_chars: # Skip this step if we've already done `remove_control_chars`, # because it would be redundant. text = fixes.remove_bom(text) if normalization is not None: text = unicodedata.normalize(normalization, text) if text == origtext: return text
645bbbfb2f1da94b941e51c50ef7fd682ac6e823
3,636,818
import random def SSValues(MPKa,Rfa,r): """ Steady-State Values (Numerical solutions Linear) Input: Annual MPK and Rf Rates, r (repetition index) Output: Annual MPK and Rf Rates (Input), mu, gamma, SS Capital, SS Wage, SS Investment, Value function """ #Compute Parameters MPK = pow(MPKa/100+1,years) Rf = pow(Rfa/100+1,years) #Eq(19) gamma = (np.log(MPK) - np.log(Rf))/(sigma*sigma) #Eq(18) mu = np.log(MPK) - np.log(alpha) - (sigma*sigma)/2 Xs = exp(np.sqrt(2)*sigma*nodes+mu) #Gauss-Hermite #Initialize Model i = 0 #Reset period X = l*WSS0list[r] #Non-stochastic endowment = l*WSS tr = tau*beta*(1+l)*WSS0list[r] #Non-stochastic transfer = tau*(1+l)*ISS K = beta*(1+l)*WSS0list[r] #Initial value for K W = WSS0list[r] #Initial value for W I = beta*(1+l)*WSS0list[r] #Initial value for I #Create Empty lists Klist=[] cylist=[] Ecolist=[] Wlist=[] Ilist=[] while i != periods: #Current Random Shock random.seed(i) np.random.seed(i) z = np.random.lognormal(mu, sigma) #Old Eco = pow(pi,-1/2) * sum(weights * pow(I * (1 + rK_func(Xs,K) - delta) + phiret* tauL * W_func(Xs,K) + tr, 1-gamma)) #Current Wage W = W_func(z,K) #Young Optimal Investment Decision I = least_squares(foc, (beta*(1+l)*W), bounds = (0,W*(1-tauL)-tr+X), args=(K,W,Xs,gamma,X,tr,)) I = round(I.x[0],50) cy = W*(1-tauL) -tr - I + X #Capital Motion K = (1 - delta) * K + I #Build Lists Klist.append(K) cylist.append(cy) Ecolist.append(Eco) Wlist.append(W) Ilist.append(I) i += 1 #Compute SS values KSS = round(np.mean(Klist[drop:]),50) WSS = round(np.mean(Wlist[drop:]),50) ISS = round(np.mean(Ilist[drop:]),50) #Compute Value function cylist = [1] + cylist #Fix consumption for '1st generation' of old when were young to 1 cylist = cylist[:-1] #Remove last consumption young to make it consistent Vlong = (1-beta)*np.log(np.asarray(cylist)) + beta / (1-gamma) * np.log(np.asarray(Ecolist)) V = np.mean(Vlong[drop:]) return (round(MPKa,5), round(Rfa,5), round(mu,2), round(gamma,2), round(KSS,5), round(WSS,5), round(ISS,5), round(V,5))
31e693da8878d84f6b619f0052a140bbf5307695
3,636,819
def simplify_junctures(graph, epsilon=5): """Simplifies clumps by replacing them with a single juncture node. For each clump, any nodes within epsilon of the clump are deleted. Remaining nodes are connected back to the simplified junctures appropriately.""" graph = graph.copy() max_quadrance = epsilon * epsilon clumps = find_clumps(graph, epsilon) for clump in clumps: to_delete = set([]) for node in graph.nodes_iter(): for juncture in clump: if quadrance(node, juncture) < max_quadrance: to_delete.add(node) to_join = set([]) for node in to_delete: for neighbor in nx.all_neighbors(graph, node): if not (neighbor in to_delete): to_join.add(neighbor) clump_center = (0, 0) for juncture in clump: clump_center = ( clump_center[0]+juncture[0], clump_center[1]+juncture[1]) clump_center = ( clump_center[0] / len(clump), clump_center[1] / len(clump)) for node in to_delete: graph.remove_node(node) for node in to_join: graph.add_edge(node, clump_center) return graph
b88e63d0ac5242e93d1d061c3eeed773d9a7c6bc
3,636,820
def sample_truncated_norm(clip_low, clip_high, mean, std): """ Given a range (a,b), returns the truncated norm """ a, b = (clip_low - mean) / std, (clip_high - mean) / std return int(truncnorm.rvs(a, b, mean, std))
de722881ce95b83239af74ba081539dfedca3363
3,636,821
def f(x): """ Approximated funhction.""" return x.mm(w_target)+b_target[0]
8a76358acd7d56aeb18c104198e253fda582652f
3,636,822
import requests from bs4 import BeautifulSoup def get_urls(): """ get all sci-hub-torrent url """ source_url = 'http://gen.lib.rus.ec/scimag/repository_torrent/' urls_list = [] try: req = requests.get(source_url) soups = BeautifulSoup(req.text, 'lxml').find_all('a') for soup in soups: if '.torrent' not in soup.text: continue url = source_url + soup.text print(url) urls_list.append(url) except Exception as error: print(error) finally: return urls_list
e14f15ebc7e39393bd614183e1eccb8fc1933359
3,636,823
def getVariablesForCookie(request=None): """ returns dict with variables for cookie """ cookie_path = '/' portalurl = absoluteURL(getSite(), request) cookie_name = "%s%s"%('__zojax_comment_author_', md5(portalurl).hexdigest()) return dict(name=cookie_name, path=cookie_path)
ba6578036d69ceef45be1b583d8b52b699519823
3,636,824
import torch def logsumexp(x, dim): """ sums up log-scale values """ offset, _ = torch.max(x, dim=dim) offset_broadcasted = offset.unsqueeze(dim) safe_log_sum_exp = torch.log(torch.exp(x-offset_broadcasted).sum(dim=dim)) return safe_log_sum_exp + offset
53a12a2c91c6a0cae3fcae46a860801f05480abe
3,636,825
import requests def make_request(session, verb, endpoint, data={}, timeoutInSeconds=REQUEST_TIMEOUT_IN_SECONDS, max_retries=MAX_RETRIES): """ Make a REST request """ try: if verb is RequestVerb.post: r = session.post(url=endpoint, json=data, timeout=timeoutInSeconds) if r.status_code == requests.codes.ok or r.status_code == requests.codes.created: return r else: print('Error: ' + str(r.status_code) + ' Posting to Endpoint: ' + str(endpoint)) return None elif verb is RequestVerb.delete: r = session.delete(url=endpoint, timeout=timeoutInSeconds) if r.status_code == requests.codes.ok: return r else: print('Error: ' + str(r.status_code) + ' Deleting Endpoint: ' + str(endpoint)) return None elif verb is RequestVerb.put: r = session.put(url=endpoint, json=data, timeout=timeoutInSeconds) if r.status_code == requests.codes.ok: return r else: print('Error: ' + str(r.status_code) + ' Putting Endpoint: ' + str(endpoint)) return None elif verb is RequestVerb.patch: r = session.patch(url=endpoint, json=data, timeout=timeoutInSeconds) if r.status_code == requests.codes.ok or r.status_code == requests.codes.no_content: return r else: print('Error: ' + str(r.status_code) + ' Patching Endpoint: ' + str(endpoint)) return None elif verb is RequestVerb.get: r = session.get(url=endpoint, timeout=timeoutInSeconds) if r.status_code == requests.codes.ok or r.status_code == requests.codes.no_content: return r else: print('Error: ' + str(r.status_code) + ' Getting Endpoint: ' + str(endpoint)) return None else: print('Make request verb not supported: ' + str(verb)) except HTTPError as http_err: print(f'HTTP error occurred: {http_err} Request Verb: {str(verb)} Endpoint: {str(endpoint)}') return http_err except requests.ConnectionError as err: if max_retries > 0: max_retries = max_retries - 1 sleep(0.25) return make_request(session, verb, endpoint, data, timeoutInSeconds, max_retries) else: print('Connection Error, will not retry') return err except Exception as err: print(f'Other error occurred: {err}' + ' Request Verb: ' + str(verb) + ' Endpoint: ' + str(endpoint)) return err
5a0637a130a5f2c1c834ddfb61cccaceeeb5c3c9
3,636,826
def certificate_managed( name, days_remaining=90, append_certs=None, managed_private_key=None, **kwargs ): """ Manage a Certificate name Path to the certificate days_remaining : 90 Recreate the certificate if the number of days remaining on it are less than this number. The value should be less than ``days_valid``, otherwise the certificate will be recreated every time the state is run. A value of 0 disables automatic renewal. append_certs: A list of certificates to be appended to the managed file. They must be valid PEM files, otherwise an error will be thrown. managed_private_key: Has no effect since v2016.11 and will be removed in Salt Aluminium. Use a separate x509.private_key_managed call instead. kwargs: Any arguments supported by :py:func:`x509.create_certificate <salt.modules.x509.create_certificate>` or :py:func:`file.managed <salt.states.file.managed>` are supported. not_before: Initial validity date for the certificate. This date must be specified in the format '%Y-%m-%d %H:%M:%S'. .. versionadded:: 3001 not_after: Final validity date for the certificate. This date must be specified in the format '%Y-%m-%d %H:%M:%S'. .. versionadded:: 3001 Examples: .. code-block:: yaml /etc/pki/ca.crt: x509.certificate_managed: - signing_private_key: /etc/pki/ca.key - CN: ca.example.com - C: US - ST: Utah - L: Salt Lake City - basicConstraints: "critical CA:true" - keyUsage: "critical cRLSign, keyCertSign" - subjectKeyIdentifier: hash - authorityKeyIdentifier: keyid,issuer:always - days_valid: 3650 - days_remaining: 0 - backup: True .. code-block:: yaml /etc/ssl/www.crt: x509.certificate_managed: - ca_server: pki - signing_policy: www - public_key: /etc/ssl/www.key - CN: www.example.com - days_valid: 90 - days_remaining: 30 - backup: True """ if "path" in kwargs: name = kwargs.pop("path") if "ca_server" in kwargs and "signing_policy" not in kwargs: raise salt.exceptions.SaltInvocationError( "signing_policy must be specified if ca_server is." ) if ( "public_key" not in kwargs and "signing_private_key" not in kwargs and "csr" not in kwargs ): raise salt.exceptions.SaltInvocationError( "public_key, signing_private_key, or csr must be specified." ) if managed_private_key: salt.utils.versions.warn_until( "Aluminium", "Passing 'managed_private_key' to x509.certificate_managed has no effect and " "will be removed Salt Aluminium. Use a separate x509.private_key_managed call instead.", ) ret = {"name": name, "result": False, "changes": {}, "comment": ""} is_valid, invalid_reason, current_cert_info = _certificate_is_valid( name, days_remaining, append_certs, **kwargs ) if is_valid: file_args, extra_args = _get_file_args(name, **kwargs) return _certificate_file_managed(ret, file_args) if __opts__["test"]: file_args, extra_args = _get_file_args(name, **kwargs) # Use empty contents for file.managed in test mode. # We don't want generate a new certificate, even in memory, # for security reasons. # Using an empty string instead of omitting it will at least # show the old certificate in the diff. file_args["contents"] = "" ret = _certificate_file_managed(ret, file_args) ret["result"] = None ret["comment"] = "Certificate {} will be created".format(name) ret["changes"]["Status"] = { "Old": invalid_reason, "New": "Certificate will be valid and up to date", } return ret contents = __salt__["x509.create_certificate"](text=True, **kwargs) # Check the module actually returned a cert and not an error message as a string try: __salt__["x509.read_certificate"](contents) except salt.exceptions.SaltInvocationError as e: ret["result"] = False ret[ "comment" ] = "An error occurred creating the certificate {}. The result returned from x509.create_certificate is not a valid PEM file:\n{}".format( name, str(e) ) return ret if not append_certs: append_certs = [] for append_file in append_certs: try: append_file_contents = __salt__["x509.get_pem_entry"]( append_file, pem_type="CERTIFICATE" ) contents += append_file_contents except salt.exceptions.SaltInvocationError as e: ret["result"] = False ret[ "comment" ] = "{} is not a valid certificate file, cannot append it to the certificate {}.\nThe error returned by the x509 module was:\n{}".format( append_file, name, str(e) ) return ret file_args, extra_args = _get_file_args(name, **kwargs) file_args["contents"] = contents ret = _certificate_file_managed(ret, file_args) if ret["result"]: ret["changes"]["Certificate"] = { "Old": current_cert_info, "New": __salt__["x509.read_certificate"](certificate=name), } ret["changes"]["Status"] = { "Old": invalid_reason, "New": "Certificate is valid and up to date", } return ret
0d98b58b26bb8266bbd7817b3b8533940b7b5f33
3,636,827
from typing import Iterable from typing import Dict from typing import List def _unique_field_to_col_matching( rules: Iterable[Rule], field_to_matching_cols: Dict[str, List[int]] ) -> Dict[str, int]: """ Given a potential field to column matching this functions tries to determine a unique 1-to-1 matching. Returns a dictionary in which each key is a name of a filed and the value is the index of the best matching column. """ # This method works by elimination - we give higher priority to fields with less potential # matches. We determine their best matching column and then we can eliminate that column for # other fields. sorted_rules = sorted(rules, key=lambda r: len(field_to_matching_cols[r.field_name])) unallocated_columns = set(range(len(rules))) field_to_col = {} for r in sorted_rules: col = [c for c in field_to_matching_cols[r.field_name] if c in unallocated_columns][0] field_to_col[r.field_name] = col unallocated_columns.remove(col) return field_to_col
b0f8f63eb86701605bc67140b91e2e67f368082a
3,636,828
import yaml import textwrap def minimal_config(): """Return YAML parsing result for (somatic) configuration""" return yaml.round_trip_load( textwrap.dedent( r""" static_data_config: reference: path: /path/to/ref.fa dbsnp: path: /path/to/dbsnp.vcf.gz step_config: ngs_mapping: tools: dna: ['bwa'] compute_coverage_bed: true path_target_regions: /path/to/regions.bed bwa: path_index: /path/to/bwa/index.fa targeted_seq_cnv_calling: tools: - xhmm - gcnv xhmm: path_target_interval_list_mapping: - pattern: "Agilent SureSelect Human All Exon V6.*" name: "Agilent_SureSelect_Human_All_Exon_V6" path: /path/to/Agilent/SureSelect_Human_All_Exon_V6_r2/GRCh37/Exons.bed gcnv: path_target_interval_list_mapping: - pattern: "Agilent SureSelect Human All Exon V6.*" name: "Agilent_SureSelect_Human_All_Exon_V6" path: /path/to/Agilent/SureSelect_Human_All_Exon_V6_r2/GRCh37/Exons.bed path_uniquely_mapable_bed: /path/to/uniquely/mappable/variable/GRCh37/file.bed.gz data_sets: first_batch: file: sheet.tsv search_patterns: - {'left': '*/*/*_R1.fastq.gz', 'right': '*/*/*_R2.fastq.gz'} search_paths: ['/path'] type: germline_variants naming_scheme: only_secondary_id """ ).lstrip() )
806da8976dea510997b6fea8d264ac2e7d619e75
3,636,829
def walk(n=1000, mu=0, sigma=1, alpha=0.01, s0=NaN): """ Mean reverting random walk. Returns an array of n-1 steps in the following process:: s[i] = s[i-1] + alpha*(mu-s[i-1]) + e[i] with e ~ N(0,sigma). The parameters are:: *n* walk length *s0* starting value, defaults to N(mu,sigma) *mu* target mean, defaults to 0 *sigma* volatility *alpha* in [0,1] reversion rate Use alpha=0 for a pure Gaussian random walk or alpha=1 independent samples about the mean. If *mu* is a vector, multiple streams are run in parallel. In this case *s0*, *sigma* and *alpha* can either be scalars or vectors. If *mu* is an array, the target value is non-stationary, and the parameter *n* is ignored. Note: the default starting value should be selected from a distribution whose width depends on alpha. N(mu,sigma) is too narrow. This effect is illustrated in :function:`demo`, where the following choices of sigma and alpha give approximately the same histogram:: sigma = [0.138, 0.31, 0.45, 0.85, 1] alpha = [0.01, 0.05, 0.1, 0.5, 1] """ s0, mu, sigma, alpha = [asarray(v) for v in (s0, mu, sigma, alpha)] nchains = mu.shape[0] if mu.ndim > 0 else 1 if mu.ndim < 2: if isnan(s0): s0 = mu + util.rng.randn(nchains)*sigma s = [s0*ones_like(mu)] for i in range(n-1): s.append(s[-1] + alpha*(mu-s[-1]) + sigma*util.rng.randn(nchains)) elif mu.ndim == 2: if isnan(s0): s0 = mu[0] + util.rng.randn(nchains)*sigma s = [s0*ones_like(mu[0])] for i in range(mu.shape[1]): s.append(s[-1] + alpha*(mu[i]-s[-1]) + sigma*util.rng.randn(nchains)) else: raise ValueError("mu must be scalar, vector or 2D array") return asarray(s)
c87e06b2fc046ece6acbffaf982b9258272f81a6
3,636,830
def GetCLIInfoMgr(): """ Get the vmomi type manager """ return _gCLIInfoMgr
4adf784890a6c72cacd951f675350dd40d68c0d5
3,636,831
from datetime import datetime def pretty_date(time=False): """ Get a datetime object or a int() Epoch timestamp and return a pretty string like 'an hour ago', 'Yesterday', '3 months ago', 'just now', etc """ now = datetime.now() if type(time) is int: diff = now - datetime.fromtimestamp(time) elif isinstance(time,datetime): diff = now - time elif not time: diff = now - now second_diff = diff.seconds day_diff = diff.days if day_diff < 0: return '' if day_diff == 0: if second_diff < 10: return "just now" if second_diff < 60: return str(second_diff) + " seconds ago" if second_diff < 120: return "a minute ago" if second_diff < 3600: return str( second_diff / 60 ) + " minutes ago" if second_diff < 7200: return "an hour ago" if second_diff < 86400: return str( second_diff / 3600 ) + " hours ago" if day_diff == 1: return "Yesterday" if day_diff < 7: return str(day_diff) + " days ago" if day_diff < 31: return str(day_diff/7) + " weeks ago" if day_diff < 365: return str(day_diff/30) + " months ago" return str(day_diff/365) + " years ago"
28be383e0064640f3781c781db06eb3a914205dd
3,636,832
def per_cpu_times(): """Return system CPU times as a named tuple""" ret = [] for cpu_t in cext.per_cpu_times(): user, nice, system, idle = cpu_t item = scputimes(user, nice, system, idle) ret.append(item) return ret
b43152c58323fc0d74ec8297e419622485bd7505
3,636,833
def cooldown(rate, per, type=commands.BucketType.default): """See `commands.cooldown` docs""" def decorator(func): if isinstance(func, Command): func._buckets = CooldownMapping(Cooldown(rate, per, type)) else: func.__commands_cooldown__ = Cooldown(rate, per, type) return func return decorator
c15bc00a7b71c95086088f5d42c09de350c148d5
3,636,834
def construct_psi_k2(theta, y, X, kappa = 30): """ Kappa-based filter for time-varying autoregressive component, based on Platteau (2021) """ #get parameter vector T = len(y) omega = theta[0] alpha = theta[1] beta = theta[2] #Filter Volatility psi = np.zeros(T) #initialize volatility at unconditional variance psi[0] = omega/(1-alpha) #initialise the regression filter values t = 0 xylist = [X.iloc[t]*(y.iloc[t])] x2list = [X.iloc[t]**2] xysum = sum(xylist) x2sum = sum(x2list) #do the first filtering psi[t+1] = omega + (alpha )*(psi[t]) + (beta)*(np.tanh(psi[t]) - xysum/ x2sum ) #Continue filtering, as long as kappa not reached, use all available elements for t in range(1,kappa): xylist.append(X.iloc[t]*(y.iloc[t])) x2list.append(X.iloc[t]**2) xysum = sum(xylist) x2sum = sum(x2list) psi[t+1] = omega + (alpha )*(psi[t]) + (beta)*(np.tanh(psi[t]) - xysum/ x2sum ) #When kappa is reached, also drop the first instance in each iteration for t in range(kappa -1,T-1): xylist.append(X.iloc[t]*(y.iloc[t])) x2list.append(X.iloc[t]**2) xylist.pop(0) x2list.pop(0) xysum = sum(xylist) x2sum = sum(x2list) psi[t+1] = omega + (alpha )*(psi[t]) + (beta)*(np.tanh(psi[t]) - xysum/ x2sum ) #return the autoregressive component return psi, 0, 1
e020f7f437dbba96aac15a51491496f701995693
3,636,835
def calHoahaoSancai(tian_ge, ren_ge, di_ge): """ 三才五行吉凶计算 :return: :param tian_ge: 天格 :param ren_ge: 人格 :param di_ge: 地格 :return: """ sancai = getSancaiWuxing(tian_ge) + getSancaiWuxing(ren_ge) + getSancaiWuxing(di_ge) if sancai in g_sancai_wuxing_dict: data = g_sancai_wuxing_dict[sancai] return sancai, data['result'], data['evaluate'] else: return sancai, constants.RESULT_UNKNOWN, None
ea75055c3b2c749af9b97b8271aea2242de3c85f
3,636,836
def load_coeff_swarm_mio_internal(path): """ Load internal model coefficients and other parameters from a Swarm MIO_SHA_2* product file. """ with open(path, encoding="ascii") as file_in: data = parse_swarm_mio_file(file_in) return SparseSHCoefficientsMIO( data["nm"], data["gh"], ps_extent=(data["pmin"], data["pmax"], data["smin"], data["smax"]), is_internal=True, ), data
cbc2eac4a293ecf432c2520aef741c7499a9be70
3,636,837
def plot_trajectory_from_data(X : np.array, y : np.array, sample_n = 0, excludeY=True, ylabel=None, xlabel=None): """ Plots trajectory from data sample_n: sample index """ fig, ax = plt.subplots() dim = X.shape[2] for d in range(dim): trajectory = list(X[sample_n,:,d].squeeze()) if not excludeY: trajectory.append(y[sample_n]) ax.plot(list(range(1,len(trajectory)+1)) ,trajectory, alpha=0.9) if not xlabel: ax.set_xlabel("Time steps") else: ax.set_xlabel(xlabel) if not ylabel: ax.set_ylabel("State") else: ax.set_ylabel(ylabel) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') ax.spines['bottom'].set_linewidth(3) ax.spines['left'].set_linewidth(3) return fig, ax
7bff6c0da9342501e0b230e423ca5a2201757f40
3,636,839
async def get_prices(database, match_id): """Get market prices.""" query = """ select timestamp::interval(0), extract(epoch from timestamp)::integer as timestamp_secs, round((food + (food * .3)) * 100) as buy_food, round((wood + (wood * .3)) * 100) as buy_wood, round((stone + (stone * .3)) * 100) as buy_stone, round((food - (food * .3)) * 100) as sell_food, round((wood - (wood * .3)) * 100) as sell_wood, round((stone - (stone * .3)) * 100) as sell_stone from market where match_id=:match_id order by timestamp """ results = await database.fetch_all(query, values=dict(match_id=match_id)) return list(map(dict, results))
3571006c37319135a3202622b73e7e2379ca93ee
3,636,841
def isChinese(): """ Determine whether the current system language is Chinese 确定当前系统语言是否为 中文 """ return SYSTEM_LANGUAGE == 'zh_CN'
8de1502c153189bc66774569fc00b2408d4ed694
3,636,844
def load_db(db): """ Load database as a dataframe. Extracts the zip files if necessary. The database is indexed by the user, session. """ if DEV_GENUINE == db or DEV_IMPOSTOR == db: extract_dev_db() if GENUINE == db or UNKNOWN == db: extract_test_db() return pd.read_csv(db, index_col=[0, 1])
9ebab9118d7572575fc3b82bbb02bdf1de68e6f5
3,636,845
def _pool_tags(hash, name): """Return a dict with "hidden" tags to add to the given cluster.""" return dict(__mrjob_pool_hash=hash, __mrjob_pool_name=name)
de9a9e7faa4d4f9dd3bfe05cb26790ff8ae66397
3,636,848
from pathlib import Path def clusters_dictionary(): """ Read the column 'label' from final_dataframe.tsv' and return the clusters as a dictionary. If the column 'label' is not in final_dataframe.tsv', call k_means_clustering and perform the clustering. :return: a dictionary, where the key is the cluster id and the value is a list of Areas """ # Open sample df = pd.read_csv(Path("dataframes/") / 'final_dataframe.tsv', sep='\t', skiprows=0, encoding='utf-8', dtype={'Postal code': object}) if 'label' not in df.columns: _, df = k_means_clustering() cluster_dic = {} for i in list(set(df['label'].to_list())): cluster_dic[i] = df[df.label == i][['Postal code', 'Area']].values return cluster_dic
6d47865a98a4b2136fd2ec9baf346a880ee8acfb
3,636,850
def apply_wet_day_frequency_correction(ds, process): """ Parameters ---------- ds : xr.Dataset process : {"pre", "post"} Returns ------- xr.Dataset Notes ------- [1] A.J. Cannon, S.R. Sobie, & T.Q. Murdock, "Bias correction of GCM precipitation by quantile mapping: How well do methods preserve changes in quantiles and extremes?", Journal of Climate, vol. 28, Issue 7, pp. 6938-6959. """ threshold = 0.05 # mm/day low = 1e-16 if process == "pre": ds_corrected = ds.where(ds != 0.0, np.random.uniform(low=low, high=threshold)) elif process == "post": ds_corrected = ds.where(ds >= threshold, 0.0) else: raise ValueError("this processing option is not implemented") return ds_corrected
e974a19d537888b866cf3117524815559c28108a
3,636,852
def get_nb_build_nodes_and_entities(city, print_out=False): """ Returns number of building nodes and building entities in city Parameters ---------- city : object City object of pycity_calc print_out : bool, optional Print out results (default: False) Returns ------- res_tuple : tuple Results tuple with number of building nodes (int) and number of building entities (nb_b_nodes, nb_buildings) Annotations ----------- building node might also be PV- or wind-farm (not only building entity) """ nb_b_nodes = 0 nb_buildings = 0 for n in city.nodes(): if 'node_type' in city.nodes[n]: if city.nodes[n]['node_type'] == 'building': if 'entity' in city.nodes[n]: if city.nodes[n]['entity']._kind == 'building': nb_buildings += 1 if (city.nodes[n]['entity']._kind == 'building' or city.nodes[n][ 'entity']._kind == 'windenergyconverter' or city.nodes[n]['entity']._kind == 'pv'): nb_b_nodes += 1 if print_out: # pragma: no cover print('Number of building nodes (Buildings, Wind- and PV-Farms):') print(nb_b_nodes) print() print('Number of buildings: ', nb_buildings) print() return (nb_b_nodes, nb_buildings)
ff3b36dcd2ca7cd0be316b573f20a6dd16bd1c1d
3,636,853
def generate_pairs(agoals, props): """Forms all the pairs that are applicable to the current goals""" all_pairs = [] for i in range(0, len(agoals)): for j in range(i, len(agoals)): goal1, goal2 = agoals[i], agoals[j] all_pairs.extend(list(form_pairs(goal1, goal2, props))) if props.sort_distinct_pos: all_pairs.sort(key=lambda p: distinct_pos(set.union(*agoals).difference(p[1][0]) | p[1][1])) return all_pairs
d0c7881682f057207db22cf5fb612f1a42d10c6d
3,636,854
def construct_aircraft_data(args): """ create the set of aircraft data :param args: parser argument class :return: aircraft_name(string), aircraft_data(list) """ aircraft_name = args.aircraft_name aircraft_data = [args.passenger_number, args.overall_length, args.width, args.height, args.fuselage_width, args.fuselage_height, args.max_takeoff_weight, args.max_landing_weight, args.max_zero_fuel_weight, args.cargo_volume, args.cruise_mach, args.cruise_altitude, args.cruise_range, args.lift_by_drag, args.wing_area, args.aspect_ratio, args.rectangle_angle, args.ratio_of_thickness_and_chord, args.vertical_wing_width, args.horizontal_wing_width] return aircraft_name, aircraft_data
da77ae883d67879b9c51a511f46173eb5366aead
3,636,855
def Oplus_simple(ne): """ """ return ne
7476203cb99ee93dddcf9fda249f5532e908e40f
3,636,856
def lin_exploit(version): """ The title says it all :) """ kernel = version startno = 119 exploits_2_0 = { 'Segment Limit Privilege Escalation': {'min': '2.0.37', 'max': '2.0.38', 'cve': ' CVE-1999-1166', 'src': 'https://www.exploit-db.com/exploits/19419/'} } exploits_2_2 = { 'ptrace kmod Privilege Escalation': {'min': '2.2.0', 'max': '2.2.25', 'cve': 'CVE-2003-0127', 'src': 'https://www.exploit-db.com/exploits/3/'}, 'mremap Privilege Escalation': {'min': '2.2.0', 'max': '2.2.26', 'cve': 'CVE-2004-0077', 'src': 'https://www.exploit-db.com/exploits/160/'}, 'ptrace setuid Privilege Escalation': {'min': '2.2.0', 'max': '2.2.20', 'cve': 'CVE-2001-1384', 'src': 'https://www.exploit-db.com/exploits/21124/'}, 'procfs Stream redirection to Process Memory Privilege Escalation': {'min': '2.2.0', 'max': '2.2.20', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/20979/'}, 'Privileged Process Hijacking Privilege Escalation': {'min': '2.2.0', 'max': '2.2.25', 'cve': 'CVE-2003-0127', 'src': 'https://www.exploit-db.com/exploits/22362/'}, 'Sendmail Capabilities Privilege Escalation': {'min': '2.2.0', 'max': '2.2.16', 'cve': 'CVE-2000-0506', 'src': 'https://www.exploit-db.com/exploits/20001/'} } exploits_2_4 = { 'ptrace kmod Privilege Escalation': {'min': '2.4.0', 'max': '2.4.21', 'cve': 'CVE-2003-0127', 'src': 'https://www.exploit-db.com/exploits/3/'}, 'do_brk Privilege Escalation': {'min': '2.4.0', 'max': '2.4.23', 'cve': 'CVE-2003-0961', 'src': 'https://www.exploit-db.com/exploits/131/'}, 'do_mremap Privilege Escalation': {'min': '2.4.0', 'max': '2.4.24', 'cve': ' CVE-2003-0985', 'src': 'https://www.exploit-db.com/exploits/145/'}, 'mremap Privilege Escalation': {'min': '2.4.0', 'max': '2.4.25', 'cve': 'CVE-2004-0077', 'src': 'https://www.exploit-db.com/exploits/160/'}, 'uselib Privilege Escalation': {'min': '2.4.0', 'max': '2.4.29-rc2', 'cve': 'CVE-2004-1235', 'src': 'https://www.exploit-db.com/exploits/895/'}, 'bluez Privilege Escalation': {'min': '2.4.6', 'max': '2.4.30-rc2', 'cve': 'CVE-2005-0750', 'src': 'https://www.exploit-db.com/exploits/926/'}, 'System Call Emulation Privilege Escalation': {'min': '2.4.0', 'max': '2.4.37.10', 'cve': 'CVE-2007-4573', 'src': 'https://www.exploit-db.com/exploits/4460/'}, 'ptrace setuid Privilege Escalation': {'min': '2.4.0', 'max': '2.4.10', 'cve': 'CVE-2001-1384', 'src': 'https://www.exploit-db.com/exploits/21124/'}, 'procfs Stream redirection to Process Memory Privilege Escalation': {'min': '2.4.0', 'max': '2.4.4', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/20979/'}, 'Privileged Process Hijacking Privilege Escalation': {'min': '2.4.0', 'max': '2.4.21', 'cve': 'CVE-2003-0127', 'src': 'https://www.exploit-db.com/exploits/22362/'}, 'sock_sendpage Privilege Escalation': {'min': '2.4.4', 'max': '2.4.37.4', 'cve': ' CVE-2009-2692', 'src': 'https://www.exploit-db.com/exploits/9641/'}, 'pipe.c Privilege Escalation': {'min': '2.4.1', 'max': '2.4.37', 'cve': 'CVE-2009-3547', 'src': 'https://www.exploit-db.com/exploits/9844/'}, 'Ptrace Privilege Escalation': {'min': '2.4.0', 'max': '2.4.35.3', 'cve': 'CVE-2007-4573', 'src': 'https://www.exploit-db.com/exploits/30604/'} } exploits_2_6 = { 'mremap Privilege Escalation': {'min': '2.6.0', 'max': '2.6.2', 'cve': 'CVE-2004-0077', 'src': 'https://www.exploit-db.com/exploits/160/'}, 'uselib Privilege Escalation': {'min': '2.6.0', 'max': '2.6.11', 'cve': 'CVE-2004-1235', 'src': 'https://www.exploit-db.com/exploits/895/'}, 'bluez Privilege Escalation': {'min': '2.6.0', 'max': '2.6.11.5', 'cve': 'CVE-2005-0750', 'src': 'https://www.exploit-db.com/exploits/926/'}, 'SYS_EPoll_Wait Privilege Escalation': {'min': '2.6.0', 'max': '2.6.12', 'cve': 'CVE-2005-0736', 'src': 'https://www.exploit-db.com/exploits/1397/'}, 'logrotate prctl Privilege Escalation': {'min': '2.6.13', 'max': '2.6.17.4', 'cve': ' CVE-2006-2451', 'src': 'https://www.exploit-db.com/exploits/2031/'}, 'proc Privilege Escalation': {'min': '2.6.13', 'max': '2.6.17.4', 'cve': ' CVE-2006-2451', 'src': 'https://www.exploit-db.com/exploits/2013/'}, 'System Call Emulation Privilege Escalation': {'min': '2.6.0', 'max': '2.6.22.7', 'cve': 'CVE-2007-4573', 'src': 'https://www.exploit-db.com/exploits/4460/'}, 'BlueTooth Stack Privilege Escalation': {'min': '2.6.0', 'max': '2.6.11.5', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/4756/'}, 'vmsplice Privilege Escalation': {'min': '2.6.17', 'max': '2.6.24.1', 'cve': 'CVE-2008-0600', 'src': 'https://www.exploit-db.com/exploits/5092/'}, 'ftruncate()/open() Privilege Escalation': {'min': '2.6.0', 'max': '2.6.22', 'cve': 'CVE-2008-4210', 'src': 'https://www.exploit-db.com/exploits/6851/'}, 'exit_notify() Privilege Escalation': {'min': '2.6.0', 'max': '2.6.30-rc1', 'cve': 'CVE-2009-1337', 'src': 'https://www.exploit-db.com/exploits/8369/'}, 'UDEV Privilege Escalation': {'min': '2.6.0', 'max': '2.6.40', 'cve': 'CVE-2009-1185', 'src': 'https://www.exploit-db.com/exploits/8478/'}, 'ptrace_attach() Race Condition': {'min': '2.6.0', 'max': '2.6.30-rc4', 'cve': 'CVE-2009-1527', 'src': 'https://www.exploit-db.com/exploits/8673/'}, 'Samba Share Privilege Escalation': {'min': '2.6.0', 'max': '2.6.39', 'cve': 'CVE-2004-0186', 'src': 'https://www.exploit-db.com/exploits/23674/'}, 'ReiserFS xattr Privilege Escalation': {'min': '2.6.0', 'max': '2.6.35', 'cve': 'CVE-2010-1146', 'src': 'https://www.exploit-db.com/exploits/12130/'}, 'sock_sendpage Privilege Escalation': {'min': '2.6.6', 'max': '2.6.30.5', 'cve': ' CVE-2009-2692', 'src': 'https://www.exploit-db.com/exploits/9641/'}, 'pipe.c Privilege Escalation': {'min': '2.6.0', 'max': '2.6.32-rc6', 'cve': 'CVE-2009-3547', 'src': 'https://www.exploit-db.com/exploits/33322/'}, 'Sys_Tee Privilege Escalation': {'min': '2.6.0', 'max': '2.6.17.6', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/29714/'}, 'Linux Kernel Privilege Escalation': {'min': '2.6.18', 'max': '2.6.18-20', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/10613/'}, 'Dirty COW': {'min': '2.6.22', 'max': '4.8.3', 'cve': 'CVE-2016-5195', 'src': 'https://www.exploit-db.com/exploits/40616/'}, 'compat Privilege Escalation': {'min': '2.6.0', 'max': '2.6.36', 'cve': 'CVE-2010-3081', 'src': 'https://www.exploit-db.com/exploits/15024/'}, 'DEC Alpha Linux - Privilege Escalation': {'min': '2.6.28', 'max': '3.0', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/17391/'}, 'SELinux (RHEL 5) - Privilege Escalation': {'min': '2.6.30', 'max': '2.6.31', 'cve': 'CVE-2009-1897', 'src': 'https://www.exploit-db.com/exploits/9191/'}, 'proc Handling SUID Privilege Escalation': {'min': '2.6.0', 'max': '2.6.38', 'cve': 'CVE-2011-1020', 'src': 'https://www.exploit-db.com/exploits/41770/'}, 'PERF_EVENTS Privilege Escalation': {'min': '2.6.32', 'max': '3.8.9', 'cve': 'CVE-2013-2094', 'src': 'https://www.exploit-db.com/exploits/25444/'}, 'RDS Protocol Privilege Escalation': {'min': '2.6.0', 'max': '2.6.36-rc8', 'cve': 'CVE-2010-3904', 'src': 'https://www.exploit-db.com/exploits/15285/'}, 'Full-Nelson.c Privilege Escalation': {'min': '2.6.0', 'max': '2.6.37', 'cve': 'CVE-2010-4258', 'src': 'https://www.exploit-db.com/exploits/15704/'}, 'Mempodipper Privilege Escalation': {'min': '2.6.39', 'max': '3.2.2', 'cve': 'CVE-2012-0056', 'src': 'https://www.exploit-db.com/exploits/35161/'}, 'Ext4 move extents ioctl Privilege Escalation': {'min': '2.6.0', 'max': '2.6.32-git6', 'cve': 'CVE-2009-4131', 'src': 'https://www.exploit-db.com/exploits/33395/'}, 'Ptrace Privilege Escalation': {'min': '2.6.0', 'max': '2.6.22.7', 'cve': 'CVE-2007-4573', 'src': 'https://www.exploit-db.com/exploits/30604/'}, 'udp_sendmsg Privilege Escalation': {'min': '2.6.0', 'max': '2.6.19', 'cve': 'CVE-2009-2698', 'src': 'https://www.exploit-db.com/exploits/9575/'}, 'fasync_helper() Privilege Escalation': {'min': '2.6.28', 'max': '2.6.33-rc4-git1', 'cve': 'CVE-2009-4141', 'src': 'https://www.exploit-db.com/exploits/33523/'}, 'CAP_SYS_ADMIN Privilege Escalation': {'min': '2.6.34', 'max': '2.6.40', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/15916/'}, 'CAN BCM Privilege Escalation': {'min': '2.6.0', 'max': '2.6.36-rc1', 'cve': 'CVE-2010-2959', 'src': 'https://www.exploit-db.com/exploits/14814/'}, 'ia32syscall Emulation Privilege Escalation': {'min': '2.6.0', 'max': '2.6.36-rc4-git2', 'cve': 'CVE-2010-3301', 'src': 'https://www.exploit-db.com/exploits/15023/'}, 'Half-Nelson.c Econet Privilege Escalation': {'min': '2.6.0', 'max': '2.6.36.2', 'cve': 'CVE-2010-3848', 'src': 'https://www.exploit-db.com/exploits/17787/'}, 'ACPI custom_method Privilege Escalation': {'min': '2.6.0', 'max': '2.6.37-rc2', 'cve': 'CVE-2010-4347', 'src': 'https://www.exploit-db.com/exploits/15774/'}, 'SGID Privilege Escalation': {'min': '2.6.32.62', 'max': '3.14.8', 'cve': 'CVE-2014-4014', 'src': 'https://www.exploit-db.com/exploits/33824/'}, 'libfutex Privilege Escalation': {'min': '2.6.4', 'max': '3.14.6', 'cve': 'CVE-2014-3153', 'src': 'https://www.exploit-db.com/exploits/35370/'}, 'perf_swevent_init Privilege Escalation': {'min': '2.6.37', 'max': '3.8.9', 'cve': 'CVE-2013-2094', 'src': 'https://www.exploit-db.com/exploits/26131/'}, 'MSR Driver Privilege Escalation': {'min': '2.6', 'max': '3.7.6', 'cve': 'CVE-2013-0268', 'src': 'https://www.exploit-db.com/exploits/27297/'} } exploits_3 = { 'overlayfs Privilege Escalation': {'min': '3.0.0', 'max': '3.19.0', 'cve': 'CVE-2015-1328', 'src': 'https://www.exploit-db.com/exploits/37292/'}, 'CLONE_NEWUSER|CLONE_FS Privilege Escalation': {'min': '3.0', 'max': '3.3.6', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/38390/'}, 'SO_SNDBUFFORCE & SO_RCVBUFFORCE Local Privilege Escalation': {'min': '3.5', 'max': '4.8.14', 'cve': 'CVE-2016-9793', 'src': 'https://www.exploit-db.com/exploits/41995/'}, 'Raw Mode PTY Echo Race Condition Privilege Escalation': {'min': '3.14-rc1', 'max': '3.16', 'cve': 'CVE-2014-0196', 'src': 'https://www.exploit-db.com/exploits/33516/'}, 'sock_diag_handlers() Privilege Escalation': {'min': '3.3.0', 'max': '3.7.10', 'cve': 'CVE-2013-1763', 'src': 'https://www.exploit-db.com/exploits/24555/'}, 'b43 Wireless Driver Privilege Escalation': {'min': '3.0', 'max': '3.9.4', 'cve': 'CVE-2013-2852', 'src': 'https://www.exploit-db.com/exploits/38559/'}, 'CONFIG_X86_X32=y Privilege Escalation': {'min': '3.4', 'max': '3.13.2', 'cve': 'CVE-2014-0038', 'src': 'https://www.exploit-db.com/exploits/31347/'}, 'Double-free usb-midi SMEP Local Privilege Escalation': {'min': '3.0', 'max': '4.5', 'cve': 'CVE-2016-2384', 'src': 'https://www.exploit-db.com/exploits/41999/'}, 'Remount FUSE Privilege Escalation': {'min': '3.2', 'max': '3.16.1', 'cve': 'CVE-2014-5207', 'src': 'https://www.exploit-db.com/exploits/34923/'}, 'ptrace/sysret Privilege Escalation': {'min': '3.0', 'max': '3.15.4', 'cve': 'CVE-2014-4699', 'src': 'https://www.exploit-db.com/exploits/34134/'}, 'open-time Capability file_ns_capable() Privilege Escalation': {'min': '3.0', 'max': '3.8.9', 'cve': 'CVE-2013-1959', 'src': 'https://www.exploit-db.com/exploits/25450/'}, 'REFCOUNT Overflow/Use-After-Free in Keyrings Privilege Escalation': {'min': '3.8.0', 'max': '4.4.1', 'cve': 'CVE-2016-0728', 'src': 'https://www.exploit-db.com/exploits/39277/'} } exploits_4 = { 'overlayfs Privilege Escalation': {'min': '4.0', 'max': '4.3.3', 'cve': 'CVE-2015-8660', 'src': 'https://www.exploit-db.com/exploits/39166/'}, 'BPF Privilege Escalation': {'min': '4.4.0', 'max': '4.5.5', 'cve': 'CVE-2016-4557', 'src': 'https://www.exploit-db.com/exploits/39772/'}, 'AF_PACKET Race Condition Privilege Escalation': {'min': '4.2.0', 'max': '4.9.0-2', 'cve': 'CVE-2016-8655', 'src': 'https://www.exploit-db.com/exploits/40871/'}, 'DCCP Double-Free Privilege Escalation': {'min': '4.4.0', 'max': '4.9.11', 'cve': 'CVE-2017-6074', 'src': 'https://www.exploit-db.com/exploits/41458/'}, 'Netfilter target_offset Out-of-Bounds Privilege Escalation': {'min': '4.4.0-21-generic', 'max': '4.4.0-31-generic', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/40049/'}, 'IP6T_SO_SET_REPLACE Privilege Escalation': {'min': '4.6.2', 'max': '4.6.3', 'cve': 'CVE-2016-4997', 'src': 'https://www.exploit-db.com/exploits/40489/'}, 'Packet Socket Local Privilege Escalation': {'min': '4.8.0', 'max': '4.10.6', 'cve': 'CVE-2017-7308', 'src': 'https://www.exploit-db.com/exploits/41994/'}, 'UDEV < 232 - Privilege Escalation': {'min': '4.8.0', 'max': '4.9.0', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/41886/'} } if kernel.startswith('2.2'): for name, exploit in exploits_2_2.items(): # iterate over exploits dict if kernel >= exploit['min'] and kernel < exploit['max']: return name, exploit['cve'], exploit['src'] else: continue elif kernel.startswith('2.4'): for name, exploit in exploits_2_4.items(): if kernel >= exploit['min'] and kernel < exploit['max']: return name, exploit['cve'], exploit['src'] else: continue elif kernel.startswith('2.6'): for name, exploit in exploits_2_6.items(): if kernel >= exploit['min'] and kernel < exploit['max']: return name, exploit['cve'], exploit['src'] else: continue elif kernel.startswith('2.0'): for name, exploit in exploits_2_0.items(): if kernel >= exploit['min'] and kernel < exploit['max']: return name, exploit['cve'], exploit['src'] else: continue elif kernel.startswith('3'): for name, exploit in exploits_3.items(): if kernel >= exploit['min'] and kernel < exploit['max']: return name, exploit['cve'], exploit['src'] else: continue elif kernel.startswith('4'): for name, exploit in exploits_4.items(): if kernel >= exploit['min'] and kernel < exploit['max']: return name, exploit['cve'], exploit['src'] else: continue else: return 'No exploits found for this kernel version'
499e21091fb508b26564d06ad119d8b8ea783443
3,636,857
async def get_device( hass: HomeAssistant, config_entry_id: str, device_category: str, device_type: str, vin: str, ): """Get a tesla Device for a Config Entry ID.""" entry_data = hass.data[TESLA_DOMAIN][config_entry_id] devices = entry_data["devices"].get(device_category, []) for device in devices: if device.type == device_type and device.vin() == vin: return device return None
8a3be6e7d1a5f69790b98d07433af1b6e6fe1b16
3,636,858
def cartesian2complex(real, imag): """ Calculate the complex number from the cartesian form: z = z' + i * z". Args: real (float|np.ndarray): The real part z' of the complex number. imag (float|np.ndarray): The imaginary part z" of the complex number. Returns: z (complex|np.ndarray): The complex number: z = z' + i * z". """ return real + 1j * imag
1fd44bc0accff8c9f26edfa84f4fcfafb2323728
3,636,859
def compare_maps(ra_id, method_id, type_id, method_comp=None, type_comp=None): """Function to compare maps / or just print off a given map""" # Get the map map_one = GPVal.objects.filter(my_anal_id=ra_id, type_id=type_id, method_id=method_id) if method_comp and type_comp: map_two = GPVal.objects.filter(my_anal_id=ra_id, type_id=type_comp, method_id=method_comp) # Now do the comparison for gpval in map_one: comp_gp = map_two.filter(gp_id=gpval.gp_id) if comp_gp: if gpval.value != 0.0 and comp_gp[0].value != 0.0: gpval.out_val = gpval.value / comp_gp[0].value else: gpval.out_val = 0.0 else: gpval.out_val = gpval.value # Now render this data out_m = "" for my_p in map_one: if method_comp and type_comp: my_mol = Chem.MolFromPDBBlock(str(my_p.pdb_info)) if my_p.out_val != 0.0: atm = my_mol.GetAtomWithIdx(0) atm.GetPDBResidueInfo().SetTempFactor(my_p.out_val) out_m += Chem.MolToPDBBlock(my_mol) else: if my_p.value != 0.0: out_m += my_p.pdb_info return out_m
57008a0ad144974a06ae0a12de8a8133924c50e9
3,636,860
def _row_reduce_list(mat, rows, cols, one, iszerofunc, simpfunc, normalize_last=True, normalize=True, zero_above=True, dotprodsimp=None): """Row reduce a flat list representation of a matrix and return a tuple (rref_matrix, pivot_cols, swaps) where ``rref_matrix`` is a flat list, ``pivot_cols`` are the pivot columns and ``swaps`` are any row swaps that were used in the process of row reduction. Parameters ========== mat : list list of matrix elements, must be ``rows`` * ``cols`` in length rows, cols : integer number of rows and columns in flat list representation one : SymPy object represents the value one, from ``Matrix.one`` iszerofunc : determines if an entry can be used as a pivot simpfunc : used to simplify elements and test if they are zero if ``iszerofunc`` returns `None` normalize_last : indicates where all row reduction should happen in a fraction-free manner and then the rows are normalized (so that the pivots are 1), or whether rows should be normalized along the way (like the naive row reduction algorithm) normalize : whether pivot rows should be normalized so that the pivot value is 1 zero_above : whether entries above the pivot should be zeroed. If ``zero_above=False``, an echelon matrix will be returned. dotprodsimp : bool, optional Specifies whether intermediate term algebraic simplification is used during matrix multiplications to control expression blowup and thus speed up calculation. """ def get_col(i): return mat[i::cols] def row_swap(i, j): mat[i*cols:(i + 1)*cols], mat[j*cols:(j + 1)*cols] = \ mat[j*cols:(j + 1)*cols], mat[i*cols:(i + 1)*cols] def cross_cancel(a, i, b, j): """Does the row op row[i] = a*row[i] - b*row[j]""" q = (j - i)*cols for p in range(i*cols, (i + 1)*cols): mat[p] = dps(a*mat[p] - b*mat[p + q]) dps = _dotprodsimp if dotprodsimp else lambda e: e piv_row, piv_col = 0, 0 pivot_cols = [] swaps = [] # use a fraction free method to zero above and below each pivot while piv_col < cols and piv_row < rows: pivot_offset, pivot_val, \ assumed_nonzero, newly_determined = _find_reasonable_pivot( get_col(piv_col)[piv_row:], iszerofunc, simpfunc) # _find_reasonable_pivot may have simplified some things # in the process. Let's not let them go to waste for (offset, val) in newly_determined: offset += piv_row mat[offset*cols + piv_col] = val if pivot_offset is None: piv_col += 1 continue pivot_cols.append(piv_col) if pivot_offset != 0: row_swap(piv_row, pivot_offset + piv_row) swaps.append((piv_row, pivot_offset + piv_row)) # if we aren't normalizing last, we normalize # before we zero the other rows if normalize_last is False: i, j = piv_row, piv_col mat[i*cols + j] = one for p in range(i*cols + j + 1, (i + 1)*cols): mat[p] = dps(mat[p] / pivot_val) # after normalizing, the pivot value is 1 pivot_val = one # zero above and below the pivot for row in range(rows): # don't zero our current row if row == piv_row: continue # don't zero above the pivot unless we're told. if zero_above is False and row < piv_row: continue # if we're already a zero, don't do anything val = mat[row*cols + piv_col] if iszerofunc(val): continue cross_cancel(pivot_val, row, val, piv_row) piv_row += 1 # normalize each row if normalize_last is True and normalize is True: for piv_i, piv_j in enumerate(pivot_cols): pivot_val = mat[piv_i*cols + piv_j] mat[piv_i*cols + piv_j] = one for p in range(piv_i*cols + piv_j + 1, (piv_i + 1)*cols): mat[p] = dps(mat[p] / pivot_val) return mat, tuple(pivot_cols), tuple(swaps)
a3791c303b483b158f766bacac73fd0ba63f5f18
3,636,861
import importlib def get_action_class(class_str): """Imports the action class. Args: class_str (str): A string action class. Returns: Action: A child class of Action. Raises: ActionImportError: If the class doesn't exist. """ (module_name, class_name) = class_str.rsplit('.', 1) try: module = importlib.import_module(module_name) module = getattr(module, class_name) except ImportError as e: raise ActionImportError(e) return module
fdbf6f793d8a864b82f491a15c2a0268b14715b6
3,636,862
def rate_comments(request): """ Render a bloom page where respondents can rate comments by others. """ return render(request, 'rate-comments.html')
f87777be409d79abc6c9649ec6dbe6df8cdb2ab4
3,636,863
def gaussian2d(size=(32, 32), sigma=0.5): """ Generate a Gaussian kernel (not normalized). :param size: k x m size of the returned kernel :param sigma: standard deviation of the returned Gaussian :return: A tensor with the Gaussian kernel """ x, y = tf.meshgrid(tf.linspace(-1.0, 1.0, size[0]), tf.linspace(-1.0, 1.0, size[1])) d_squared = x * x + y * y two_times_sigma_squared = 2.0 * (sigma ** 2.0) return tf.exp(-d_squared / two_times_sigma_squared)
4a27fffe68fb031e6f47304bece9e8cfffd7224b
3,636,864
def home(): """ List all users or add new user """ users = User.query.all() return render_template('home.html', users=users)
4fb67376f51d677c544ba745680bc9fefed0ced0
3,636,865
def vgg13_bn(**kwargs): """ VGG 13-layer model (configuration "B") with batch normalization """ model = VGG(make_layers(cfg['B'], batch_norm=True), **kwargs) return model
3d1ac037754384cf37dbb35c1589cbbeebc3d698
3,636,866
def lr_insight_wr(): """Return 5-fold cross validation scores r2, mae, rmse""" steps = [('scaler', t.MyScaler(dont_scale='for_profit')), ('knn', t.KNNKeepDf())] pipe = Pipeline(steps) pipe.fit(X_raw) X = pipe.transform(X_raw) lr = LinearRegression() lr.fit(X, y) cv_results = cross_validate(lr, X, y, scoring=['r2', 'neg_mean_squared_error', 'neg_mean_absolute_error'], return_train_score=True) output = pd.DataFrame( {'train_r2': [cv_results['train_r2'].mean()], 'train_rmse': [np.mean( [np.sqrt(abs(i)) for i in cv_results['train_neg_mean_squared_error']])], 'train_mae': [abs(cv_results['train_neg_mean_absolute_error'].mean())], 'test_r2': [cv_results['test_r2'].mean()], 'test_rmse': [np.mean( [np.sqrt(abs(i)) for i in cv_results['test_neg_mean_squared_error']])], 'test_mae': [abs(cv_results['test_neg_mean_absolute_error'].mean())] }, index=['LR'] ) return output
6d98e5e92ba10e390e96b3494a9dedb2c118df69
3,636,867
import collections def complete_list_value(exe_context, return_type, field_asts, info, result): """ Complete a list value by completing each item in the list with the inner type """ assert isinstance(result, collections.Iterable), \ ('User Error: expected iterable, but did not find one ' + 'for field {}.{}.').format(info.parent_type, info.field_name) item_type = return_type.of_type completed_results = [] contains_promise = False index = 0 path = info.path[:] for item in result: info.path = path + [index] completed_item = complete_value_catching_error(exe_context, item_type, field_asts, info, item) if not contains_promise and is_thenable(completed_item): contains_promise = True completed_results.append(completed_item) index += 1 return Promise.all(completed_results) if contains_promise else completed_results
bc5af63592ccf6e08bf8f7da14f7852b78ec1ff0
3,636,868
def projection_ERK(rkm, dt, f, eta, deta, w0, t_final): """Explicit Projection Runge-Kutta method.""" rkm = rkm.__num__() w = np.array(w0) # current value of the unknown function t = 0 # current time ww = np.zeros([np.size(w0), 1]) # values at each time step ww[:,0] = w.copy() tt = np.zeros(1) # time points for ww tt[0] = t b = rkm.b s = len(rkm) y = np.zeros((s, np.size(w0))) # stage values F = np.zeros((s, np.size(w0))) # stage derivatives eta0 = eta(w0) while t < t_final and not np.isclose(t, t_final): if t + dt > t_final: dt = t_final - t for i in range(s): y[i,:] = w.copy() for j in range(i): y[i,:] += rkm.A[i,j]*dt*F[j,:] F[i,:] = f(y[i,:]) w = w + dt*sum([b[i]*F[i] for i in range(s)]) t += dt lamda = 0 dlam = 10 while dlam >1.e-14: dg = deta(w) dlam = -(eta(w+dg*lamda)-eta0)/(np.dot(dg,dg)+1.e-16) lamda += dlam w = w + dg*lamda tt = np.append(tt, t) ww = np.append(ww, np.reshape(w.copy(), (len(w), 1)), axis=1) return tt, ww
137e81c1d4764cde38985d15d04716138b90ccab
3,636,869
def integer(name, value): """Validate that the value represents an integer :param name: Name of the argument :param value: A value representing an integer :returns: The value as an int, or None if value is None :raises: InvalidParameterValue if the value does not represent an integer """ if value is None: return try: return int(value) except (ValueError, TypeError): raise exception.InvalidParameterValue( _('Expected an integer for %s: %s') % (name, value))
bcdb6e02944edc875e42a1e23209ec5002b205f6
3,636,870
def generate_Euler_Maruyama_propagators(): """ importer function function that creates two functions: 1. first function created is a kernel propagator (K) 2. second function returns the kernel ratio calculator """ # let's make the kernel propagator first: this is just a batched ULA move kernel_propagator = ULA_move # let's make the kernel ratio calculator kernel_ratio_calculator = Euler_Maruyama_log_proposal_ratio #return both return kernel_propagator, kernel_ratio_calculator
d9bc844761e7f0d1689492ebc51159e64d64e4d9
3,636,871
def get_vtx_neighbor(vtx, faces, n=1, ordinal=False, mask=None): """ Get one vertex's n-ring neighbor vertices Parameters ---------- vtx : integer a vertex's id faces : numpy array the array of shape [n_triangles, 3] n : integer specify which ring should be got ordinal : bool True: get the n_th ring neighbor False: get the n ring neighbor mask : 1-D numpy array specify a area where the ROI is in. Return ------ neighbors : set contain neighbors of the vtx """ n_ring_neighbors = _get_vtx_neighbor(vtx, faces, mask) n_th_ring_neighbors = n_ring_neighbors.copy() for i in range(n-1): neighbors_tmp = set() for neighbor in n_th_ring_neighbors: neighbors_tmp.update(_get_vtx_neighbor(neighbor, faces, mask)) if i == 0: neighbors_tmp.discard(vtx) n_th_ring_neighbors = neighbors_tmp.difference(n_ring_neighbors) n_ring_neighbors.update(n_th_ring_neighbors) if ordinal: return n_th_ring_neighbors else: return n_ring_neighbors
58601345c35a96e4d0e58bfa2821dbc80f911c6c
3,636,872
def tolower(x: StringOrIter) -> StringOrIter: """Convert strings to lower case Args: x: A string or vector of strings Returns: Converted strings """ x = as_character(x) if is_scalar(x): return x.lower() return Array([elem.lower() for elem in x])
7b35e364aac78ce6e087aeeba9970e9981cdc7f0
3,636,875
import numpy import math def MWA_Tile_analytic(za, az, freq=100.0e6, delays=None, zenithnorm=True, power=False, dipheight=config.DIPOLE_HEIGHT, dip_sep=config.DIPOLE_SEPARATION, delay_int=config.DELAY_INT, jones=False, amps=None): """ gainXX,gainYY=MWA_Tile_analytic(za, az, freq=100.0e6, delays=None, zenithnorm=True, power=True, dipheight=0.278, dip_sep=1.1, delay_int=435.0e-12) if power=False, then gains are voltage gains - should be squared for power otherwise are power za is zenith-angle in radians az is azimuth in radians, phi=0 points north freq in Hz, height, sep in m delays should be a numpy array of size (2,16), although a (16,) list or a (16,) array will also be accepted """ theta = za phi = az # wavelength in meters lam = C / freq if (delays is None): delays = 0 if (isinstance(delays, float) or isinstance(delays, int)): delays = delays * numpy.ones((16)) if (isinstance(delays, numpy.ndarray) and len(delays) == 1): delays = delays[0] * numpy.ones((16)) if isinstance(delays, list): delays = numpy.array(delays) assert delays.shape == (2, 16) or delays.shape == (16,), "Delays %s have unexpected shape %s" % (delays, delays.shape) if len(delays.shape) > 1: delays = delays[0] if amps is None: amps = numpy.ones((16)) # direction cosines (relative to zenith) for direction az,za projection_east = numpy.sin(theta) * numpy.sin(phi) projection_north = numpy.sin(theta) * numpy.cos(phi) # projection_z = numpy.cos(theta) if dip_sep == config.DIPOLE_SEPARATION: dipole_north = DIPOLE_NORTH dipole_east = DIPOLE_EAST # dipole_z = DIPOLE_Z else: # compute dipole position within the tile using a custom dipole separation value dipole_north = dip_sep * numpy.array([1.5, 1.5, 1.5, 1.5, 0.5, 0.5, 0.5, 0.5, -0.5, -0.5, -0.5, -0.5, -1.5, -1.5, -1.5, -1.5]) dipole_east = dip_sep * numpy.array([-1.5, -0.5, 0.5, 1.5, -1.5, -0.5, 0.5, 1.5, -1.5, -0.5, 0.5, 1.5, -1.5, -0.5, 0.5, 1.5]) # dipole_z = dip_sep * numpy.zeros(dipole_north.shape) # loop over dipoles array_factor = 0.0 for k in range(16): # relative dipole phase for a source at (theta,phi) phase = amps[k] * numpy.exp((1j) * 2 * math.pi / lam * (dipole_east[k] * projection_east + dipole_north[k] * projection_north # + dipole_z[k] * projection_z - delays[k] * C * delay_int)) array_factor += phase / 16.0 ground_plane = 2 * numpy.sin(2 * math.pi * dipheight / lam * numpy.cos(theta)) # make sure we filter out the bottom hemisphere ground_plane *= (theta <= math.pi / 2) # normalize to zenith if (zenithnorm): # print "Normalisation factor (analytic) = %.4f" % (2*numpy.sin(2*math.pi*dipheight/lam)) ground_plane /= 2 * numpy.sin(2 * math.pi * dipheight / lam) # response of the 2 tile polarizations # gains due to forshortening dipole_ns = numpy.sqrt(1 - projection_north * projection_north) dipole_ew = numpy.sqrt(1 - projection_east * projection_east) # voltage responses of the polarizations from an unpolarized source # this is effectively the YY voltage gain gain_ns = dipole_ns * ground_plane * array_factor # this is effectively the XX voltage gain gain_ew = dipole_ew * ground_plane * array_factor if jones: # Calculate Jones matrices dipole_jones = numpy.array([[numpy.cos(theta) * numpy.sin(phi), 1 * numpy.cos(phi)], [numpy.cos(theta) * numpy.cos(phi), -numpy.sin(phi)]]) j = dipole_jones * ground_plane * array_factor # print "dipole_jones = %s" % (dipole_jones) # print "ground_plane = %s , array_factor = %s" % (ground_plane,array_factor) # Use swapaxis to place jones matrices in last 2 dimensions # insead of first 2 dims. if len(j.shape) == 4: j = numpy.swapaxes(numpy.swapaxes(j, 0, 2), 1, 3) elif len(j.shape) == 3: # 1-D j = numpy.swapaxes(numpy.swapaxes(j, 1, 2), 0, 1) else: # single value pass return j if power: return numpy.real(numpy.conj(gain_ew) * gain_ew), numpy.real(numpy.conj(gain_ns) * gain_ns) return gain_ew, gain_ns
7d1a8a2b8f02c5ae47bcc85302d670a9e3e4d413
3,636,876
import json def traindata(): """Generate Plots in the traindata page. Args: None Returns: render_template(render_template): Render template for the plots """ # read data and create visuals df_features = read_data_csv("./data/features_data.csv") table_2 = data_table( drop_cols=["Unnamed: 0", "FeatureVector", "ScaledFeatures"], num_cols=["Days", "UpPerSong", "DownPerSong", "SongsPerHour"], title="Transformed Dataset - Sample Records", ) graphs = [table_2, heat_map(df_features)] # encode plotly graphs in JSON ids = ["graph-{}".format(i) for i, _ in enumerate(graphs)] graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder) # render web page with plotly graphs return render_template("traindata.html", ids=ids, graphJSON=graphJSON)
513312785a8ba7621693c05606e457b535b39c90
3,636,877
def import_file(file_path, title, source_mime_type, dest_mime_type): """Imports a file with conversion to the native Google document format. Expects the env var GOOGLE_APPLICATION_CREDENTIALS to be set for credentials. Args: path (str): Path to file to import title(str): The title of the document to create source_mime_type(str): Original mime type of file dest_mime_type(str): Mime type to convert to Returns: str: The ID of the new file in drive """ credentials, _ = auth.default() drive_service = build('drive', 'v3', credentials=credentials) file_metadata = { 'name': title, 'mimeType': dest_mime_type } media = MediaFileUpload(file_path, mimetype=source_mime_type) file = drive_service.files().create(body=file_metadata, media_body=media, fields='id').execute() return file.get('id')
386994c49895accb1433879b43d0e7a9e0b37beb
3,636,878
def test_extract_requested_slot_from_text_with_not_intent(): """Test extraction of a slot value from text with certain intent """ # noinspection PyAbstractClass class CustomFormAction(FormAction): def slot_mappings(self): return {"some_slot": self.from_text(not_intent='some_intent')} form = CustomFormAction() tracker = Tracker('default', {'requested_slot': 'some_slot'}, {'text': 'some_text', 'intent': {'name': 'some_intent', 'confidence': 1.0}}, [], False, None, {}, 'action_listen') slot_values = form.extract_requested_slot(CollectingDispatcher(), tracker, {}) # check that the value was extracted for correct intent assert slot_values == {} tracker = Tracker('default', {'requested_slot': 'some_slot'}, {'text': 'some_text', 'intent': {'name': 'some_other_intent', 'confidence': 1.0}}, [], False, None, {}, 'action_listen') slot_values = form.extract_requested_slot(CollectingDispatcher(), tracker, {}) # check that the value was not extracted for incorrect intent assert slot_values == {'some_slot': 'some_text'}
a96deace8c9d291c3b67a13c250e84a519012fa7
3,636,879
def create_generic_io_object(ioclass, filename=None, directory=None, return_path=False, clean=False): """ Create an io object in a generic way that can work with both file-based and directory-based io objects If filename is None, create a filename. If return_path is True, also return the full path to the file. If directory is not None and path is not an absolute path already, use the file from the given directory. If return_path is True, return the full path of the file along with the io object. return reader, path. Default is False. If clean is True, try to delete existing versions of the file before creating the io object. Default is False. """ filename = get_test_file_full_path(ioclass, filename=filename, directory=directory, clean=clean) try: # actually create the object if ioclass.mode == 'file': ioobj = ioclass(filename=filename) elif ioclass.mode == 'dir': ioobj = ioclass(dirname=filename) else: ioobj = None except: print(filename) raise # return the full path if requested, otherwise don't if return_path: return ioobj, filename return ioobj
1f10537695f507f9ea0c9eec6834efd7c0d6d6fe
3,636,881
def select_channels(img_RGB): """ Returns the R' and V* channels for a skin lesion image. Args: img_RGB (np.array): The RGB image of the skin lesion """ img_RGB_norm = img_RGB / 255.0 img_r_norm = img_RGB_norm[..., 0] / ( img_RGB_norm[..., 0] + img_RGB_norm[..., 1] + img_RGB_norm[..., 2] ) img_v = np.max(img_RGB, axis=2) return (img_r_norm, img_v)
7daacf660c30702b8cbbe6da5da97937d11c0c0a
3,636,882
def upper_case(string): """ Returns its argument in upper case. :param string: str :return: str """ return string.upper()
bbf3fc8b856d466ec73229145443566d85a3457a
3,636,883
import json def repositoryDefinitions(): """ Load repositoryDefinitions page """ i_d = wmc.repository.get_definition_details() p_d = json.dumps(i_d, indent=4) + " " msg = Markup(JSONtoHTML(p_d)) return render_template('repositoryDefinitions.html', data=msg)
a4325e962a81421b4a35c4919d1fa637ae267a0a
3,636,884
def available_adapter_names(): """Return a string list of the available adapters.""" return [str(adp.name) for adp in plugins.ActiveManifest().adapters]
b5674e901de02bc9e2b94cb9fa8b0885b197cb21
3,636,885
def count_sort(seq): """ perform count sort and return sorted sequence without affecting the original """ counts = defaultdict(list) for elem in seq: counts[elem].append(elem) result = [] for i in range(min(seq), max(seq)+1): result.extend(counts[i]) return result
a035592a9a258f138f0064f4f733f405ee2b75d0
3,636,888
def detect_overrides(cls, obj): """ For each active plugin, check if it wield a packet hook. If it does, add make a not of it. Hand back all hooks for a specific packet type when done. """ res = set() for key, value in cls.__dict__.items(): if isinstance(value, classmethod): value = getattr(cls, key).__func__ if isinstance(value, (FunctionType, classmethod)): meth = getattr(obj, key) if meth.__func__ is not value: res.add(key) return res
55b7299c6239050dd94e8e4ffc3484f987c60125
3,636,889
def morningCalls(): """localhost:8080/morningcalls""" session = APIRequest.WebServiceSafra() return session.listMorningCalls()
181d491ddd9549ff8cb3b15c66201ee6b9a88249
3,636,890
def resize_image(img, h, w): """ resize image """ image = cv2.resize(img, (w, h), interpolation=cv2.INTER_NEAREST) return image
f4e1c8f24abb44714d3894a255b32d50e72e09b5
3,636,891
from typing import Dict def _parse_pars(pars) -> Dict: """ Takes dictionary of parameters, converting values to required type and providing defaults for missing values. Args: pars: Parameters dictionary. Returns: Dictionary of converted (and optionally validated) parameters. """ # Fallback to default for missing values and perform conversion. for k in PARAM_CONVERSION: if pars.get(k) is None: pars[k] = PARAM_CONVERSION[k][1] # _logger.warning(f"No value found for parameter '{k}'. Using "f"default value {pars[k]}.") else: conversion_func = PARAM_CONVERSION[k][0] if conversion_func: try: pars[k] = conversion_func(pars[k]) except ValueError as e: _logger.error( f"Unable to convert '{k}': {pars[k]} to " f"expected type {conversion_func.__name__}.") raise e # Fallback to default for missing paths. for p in DEFAULT_TO_OBS_DIR: if pars.get(p) is None: pars[p] = pars[OBS_DIR] return pars
cda13c228f7764718ca0becceae3d721ba111eda
3,636,892
def _get_base_class_names_of_parent_and_child_from_edge(schema_graph, current_location): """Return the base class names of a location and its parent from last edge information.""" edge_direction, edge_name = _get_last_edge_direction_and_name_to_location(current_location) edge_element = schema_graph.get_edge_schema_element_or_raise(edge_name) if edge_direction == INBOUND_EDGE_DIRECTION: parent_base_class_name = edge_element.base_out_connection child_base_class_name = edge_element.base_in_connection elif edge_direction == OUTBOUND_EDGE_DIRECTION: parent_base_class_name = edge_element.base_in_connection child_base_class_name = edge_element.base_out_connection else: raise AssertionError( "Expected edge direction to be either inbound or outbound." "Found: edge {} with direction {}".format(edge_name, edge_direction) ) return parent_base_class_name, child_base_class_name
7860208cb305745f5c62aec664acd60a57715f48
3,636,893
from typing import Callable def projection( v: GridVariableVector, solve: Callable = solve_fast_diag, ) -> GridVariableVector: """Apply pressure projection to make a velocity field divergence free.""" grid = grids.consistent_grid(*v) pressure_bc = boundaries.get_pressure_bc_from_velocity(v) q0 = grids.GridArray(jnp.zeros(grid.shape), grid.cell_center, grid) q0 = grids.GridVariable(q0, pressure_bc) q = solve(v, q0) q = grids.GridVariable(q, pressure_bc) q_grad = fd.forward_difference(q) v_projected = tuple( grids.GridVariable(u.array - q_g, u.bc) for u, q_g in zip(v, q_grad)) return v_projected
e20ff776603be0faef2d63e29c58f99c255ec19d
3,636,894
def a07_curve_function(curve: CustomCurve): """Computes the embedding degree (with respect to the generator order) and its complement""" q = curve.q() if q.nbits()>300: return {"embedding_degree_complement":None,"complement_bit_length":None} l = curve.order() embedding_degree = curve.embedding_degree() embedding_degree_complement = ZZ(euler_phi(l) / embedding_degree) complement_bit_length = embedding_degree_complement.nbits() curve_results = { "embedding_degree_complement": embedding_degree_complement, "complement_bit_length": complement_bit_length, } return curve_results
1f06c9c4425e7d24241d425cfbd9a764d6589963
3,636,895
def _horizontal_metrics_from_coordinates(xcoord,ycoord): """Return horizontal scale factors computed from arrays of projection coordinates. Parameters ---------- xcoord : xarray dataarray array of x_coordinate used to build the grid metrics. either plane_x_coordinate or projection_x_coordinate assume that the order of the dimensions is ('y','x'). ycoord :xarray dataarray array of y_coordinate used to build the grid metrics. either plane_y_coordinate or projection_y_coordinate assume that the order of the dimensions is ('y','x'). Return ------ e1 : xarray dataarray Array of grid cell width corresponding to cell_x_size_at_*_location e2 : xarray dataarray Array of grid cell width corresponding to cell_y_size_at_*_location """ #- Compute the centered first order derivatives of proj. coordinate arrays dy_dj,dy_di = _horizontal_gradient(ycoord) dx_dj,dx_di = _horizontal_gradient(xcoord) #- Compute the approximate size of the cells in x and y direction e1 = sqrt( dx_di**2. + dy_di**2. ) e2 = sqrt( dx_dj**2. + dy_dj**2. ) return e1,e2
a366c37172bed098607e4c5c7194812d3d82141f
3,636,896
def ultosc( df, high, low, close, ultosc, time_period_1=7, time_period_2=14, time_period_3=28, ): """ The Ultimate Oscillator (ULTOSC) by Larry Williams is a momentum oscillator that incorporates three different time periods to improve the overbought and oversold signals. Parameters: df (pd.DataFrame): DataFrame which contain the asset information. high (string): the column name for the period highest price of the asset. low (string): the column name for the period lowest price of the asset. close (string): the column name for the closing price of the asset. ultosc (string): the column name for the ultimate oscillator values. time_period_1 (int): The first time period for the indicator. By default, 7. time_period_2 (int): The second time period for the indicator. By default, 14. time_period_3 (int): The third time period for the indicator. By default, 28. Returns: df (pd.DataFrame): Dataframe with ultimate oscillator of the asset calculated. """ df[ultosc + "previous_close"] = df[close].shift(1) df = trange(df, high, low, close, ultosc + "_true_range") df = df.dropna().reset_index(drop=True) df[ultosc + "_true_low"] = df[[low, ultosc + "previous_close"]].min(axis=1) df[ultosc + "_close-tl"] = df[close] - df[ultosc + "_true_low"] df = sma(df, ultosc + "_close-tl", ultosc + "_a1", time_period_1) df = sma(df, ultosc + "_true_range", ultosc + "_b1", time_period_1) df = sma(df, ultosc + "_close-tl", ultosc + "_a2", time_period_2) df = sma(df, ultosc + "_true_range", ultosc + "_b2", time_period_2) df = sma(df, ultosc + "_close-tl", ultosc + "_a3", time_period_3) df = sma(df, ultosc + "_true_range", ultosc + "_b3", time_period_3) a1_b1 = df[ultosc + "_a1"] / df[ultosc + "_b1"] a2_b2 = df[ultosc + "_a2"] / df[ultosc + "_b2"] a3_b3 = df[ultosc + "_a3"] / df[ultosc + "_b3"] df[ultosc] = 100 * ((4 * a1_b1) + (2 * a2_b2) + a3_b3) / 7.0 df.drop( [ ultosc + "_true_range", ultosc + "previous_close", ultosc + "_true_low", ultosc + "_close-tl", ultosc + "_a1", ultosc + "_b1", ultosc + "_a2", ultosc + "_b2", ultosc + "_a3", ultosc + "_b3", ], axis=1, inplace=True, ) df = df.dropna().reset_index(drop=True) return df
d6f69906bb09e3a7075339cd4a6554d5336f6caa
3,636,897
def mating(child_id, parent1, parent2, gt_matrix): """ Given the name of a child and two parents + the genotype matrices, mate them """ child_gen = phase_parents(parent1, parent2, gt_matrix) parent1.add_children(child_id) parent2.add_children(child_id) child = Person(child_id) child.set_parents(parent1.get_name(), parent2.get_name()) return child, child_gen
9962eb24ff175b0b2d77a787dbc50a342ed4a202
3,636,898
def _tmp( generator_reconstructed_encoded_fake_data, encoded_random_latent_vectors, real_data, encoded_real_data, generator_reconstructed_encoded_real_data, alpha=0.7, scope="anomaly_score", add_summaries=False): """anomaly score. See https://arxiv.org/pdf/1905.11034.pdf for more details """ with tf.name_scope(scope): gen_rec_loss = tf.math.reduce_sum( tf.math.pow(real_data - generator_reconstructed_encoded_fake_data, 2), axis=[-2, -1]) gen_rec_loss_predict = tf.math.reduce_sum( tf.math.pow(real_data - generator_reconstructed_encoded_real_data, 2), axis=[-1]) real_to_orig_dist = tf.math.reduce_sum( tf.math.pow(encoded_real_data - encoded_random_latent_vectors, 2), axis=[-2, -1]) # real_to_orig_dist_predict = tf.math.reduce_sum( # tf.math.pow(encoded_real_data, 2), axis=[-1]) anomaly_score = (gen_rec_loss_predict * alpha) + ((1 - alpha) * real_to_orig_dist) if add_summaries: tf.summary.scalar(name=scope + "_gen_rec_loss", data=gen_rec_loss, step=None, description=None) tf.summary.scalar(name=scope + "_orig_loss", data=real_to_orig_dist, step=None, description=None) tf.summary.scalar(name=scope, data=anomaly_score, step=None, description=None) return anomaly_score, gen_rec_loss, real_to_orig_dist, gen_rec_loss_predict,
181bd45d23e14585072917d2ad707f13f13b7d4f
3,636,899
def get_key_metrics_fig(confirmed_ser, recovered_ser, deaths_ser, metric_type): """ Return key metrics graph object figure Parameters ---------- confirmed_ser: pandas.Series Confirmed pandas series objects with index=dates, values=number of cases recovered_ser: pandas.Series Recovered pandas series objects with index=dates, values=number of cases deaths_ser: pandas.Series Deaths pandas series objects with index=dates, values=number of cases metric_type: str One of ['cumulative', 'new] """ fig = go.Figure() if metric_type == 'cumulative': mode = 'number+delta' delta_confirmed = { 'reference': confirmed_ser.values[-2], 'relative': False, 'position': "bottom", 'valueformat': ">,d", 'increasing.color': 'blue', 'increasing.symbol': '+' } delta_recovered = { 'reference': recovered_ser.values[-2], 'relative': False, 'position': "bottom", 'valueformat': ">,d", 'increasing.color': 'green', 'increasing.symbol': '+' } delta_deaths = { 'reference': deaths_ser.values[-2], 'relative': False, 'position': "bottom", 'valueformat': ">,d", 'increasing.color': 'red', 'increasing.symbol': '+' } elif metric_type == 'new': mode = 'number' delta_confirmed = None delta_recovered = None delta_deaths = None fig.add_trace(go.Indicator( mode=mode, value=confirmed_ser.values[-1], number={ "valueformat": ">,d", 'font': { 'size': 60, 'color': 'blue', } }, domain={'row': 0, 'column': 0}, title={ 'text': 'Confirmed', 'font': { 'size': 24, 'color': 'blue', } }, delta=delta_confirmed)) fig.add_trace(go.Indicator( mode=mode, value=recovered_ser.values[-1], number={ "valueformat": ">,d", 'font': { 'size': 60, 'color': 'green', } }, domain={'row': 0, 'column': 1}, title={ 'text': 'Recovered', 'font': { 'size': 24, 'color': 'green', } }, delta=delta_recovered)) fig.add_trace(go.Indicator( mode=mode, value=deaths_ser.values[-1], number={ "valueformat": ">,d", 'font': { 'size': 60, 'color': 'red', } }, domain={'row': 0, 'column': 2}, title={ 'text': 'Deaths', 'font': { 'size': 24, 'color': 'red', } }, delta=delta_deaths)) fig.update_layout( grid={'rows': 1, 'columns': 3}, autosize=True, # width=500, height=300, # margin={'t': 100, 'b': 100, 'l': 0, 'r': 0} ) return fig
a11130a21d124c2bf44a02d9c3c58bcde5a326cb
3,636,900
from typing import List def pr_curve(results: List[TrecEvalResults]) -> plt: """ Create a precision-recall graph from trec_eval results. :param results: A list of TrecEvalResults files. :return: a matplotlib plt object """ names = [r.run_id for r in results] iprec = [[r.results['iprec_at_recall_0.00'], r.results['iprec_at_recall_0.10'], r.results['iprec_at_recall_0.20'], r.results['iprec_at_recall_0.30'], r.results['iprec_at_recall_0.40'], r.results['iprec_at_recall_0.50'], r.results['iprec_at_recall_0.60'], r.results['iprec_at_recall_0.70'], r.results['iprec_at_recall_0.80'], r.results['iprec_at_recall_0.90'], r.results['iprec_at_recall_1.00']] for r in results] recall = np.arange(0, 1.1, 0.1) mpl.rc('xtick', labelsize=35) mpl.rc('ytick', labelsize=35) plt.xlabel('Recall', fontsize=35) plt.ylabel('Interpolated Precision', fontsize=35) for p in iprec: plt.plot(recall, p, linewidth=10) plt.legend(names, fontsize=35) return plt
90f1e3234304fa7966b93ebdc76235e5356002e6
3,636,902
def plotData(datalist, part = "real", progressive = True, color = None, clip = False, tcutoff = None): """Plot real or imaginary parts of a given list of functions. arguments: datalist (list of tuples, each tuple of form (xlist,ylist)): data to plot; xlist should be real numbers, ylist can be complex part (str): "real" or "imag"; determines which part of ylist we use progressive (bool): if True, alphalevel starts at 0 and increases linearly as we go through datalist color (str): a color supported by axes.plot; if None then gets set to "blue" or "red" for real/imag parts respectively; if "rainbow" then color varies from blue to green as we go through datalist clip (bool): remove first and last items before plotting""" fig = plt.figure() axes = fig.add_subplot(1,1,1) alphalevel = 0.0 if color is None: if part == "real": color = "blue" if part == "imag": color = "red" if color == "rainbow": red = 0.5 green = 0.0 blue = 1.0 # iterate over list of functions n = len(datalist) for k,data in enumerate(datalist): if tcutoff is not None: tlist,zlist = zip(*[[t,z] for t,z in zip(*data) if abs(t) <= tcutoff]) else: tlist,zlist = data if clip: tlist = tlist[1:-1] zlist = zlist[1:-1] if part == "real": ylist = [z.real for z in zlist] if part == "imag": ylist = [z.imag for z in zlist] # if "progressive" is set then vary alphalevel as we go through datalist if progressive: alphalevel += 1.0 / n alphalevel = min(alphalevel, 1.0) else: alphalevel = 1.0 if color == "rainbow": hue = float(k) / n currentcolor = colors.hsv_to_rgb([hue, 0.8, 0.8]) else: currentcolor = color # now plot the points axes.plot(tlist, ylist, color=currentcolor, alpha=alphalevel) return fig
5e328c19fe63389d7f2d55e5fd5d75d2dd5d7c24
3,636,903
def train_and_test(model, dataset, robustness_tests=None, base_config_dict=None, save_model=True): """ Train a recommendation model and run robustness tests. Args: model (str): Name of model to be trained. dataset (str): Dataset name; must match the dataset's folder name located in 'data_path' path. base_config_dict: Configuration dictionary. If no config passed, takes default values. save_model (bool): Determines whether or not to externally save the model after training. robustness_tests (dict): Configuration dictionary for robustness tests. Returns: """ config_dict = get_config_dict(robustness_tests, base_config_dict) config = Config(model=model, dataset=dataset, config_dict=config_dict) init_seed(config['seed'], config['reproducibility']) logger = getLogger() if len(logger.handlers) != 0: logger.removeHandler(logger.handlers[1]) init_logger(config) logger.info(config) # dataset filtering dataset = create_dataset(config) logger.info(dataset) # dataset splitting train_data, valid_data, test_data, robustness_testing_data = data_preparation(config, dataset, save=True) for robustness_test in robustness_testing_data: if robustness_testing_data[robustness_test] is not None: logger.info(set_color('Robustness Test', 'yellow') + f': {robustness_test}') # model loading and initialization model = get_model(config['model'])(config, train_data).to(config['device']) logger.info(model) # trainer loading and initialization trainer = get_trainer(config['MODEL_TYPE'], config['model'])(config, model) # model training best_valid_score, best_valid_result = trainer.fit( train_data, valid_data, saved=save_model, show_progress=config['show_progress'] ) # model evaluation test_result = trainer.evaluate(test_data, load_best_model=save_model, show_progress=config['show_progress']) logger.info(set_color('best valid ', 'yellow') + f': {best_valid_result}') logger.info(set_color('test result', 'yellow') + f': {test_result}') test_result_transformation, test_result_sparsity, \ test_result_slice, test_result_distributional_slice = None, None, None, None if robustness_testing_data['slice'] is not None: test_result_slice = trainer.evaluate(robustness_testing_data['slice'], load_best_model=save_model, show_progress=config['show_progress']) logger.info(set_color('test result for slice', 'yellow') + f': {test_result_slice}') if robustness_testing_data['distributional_slice'] is not None: test_result_distributional_slice = trainer.evaluate(robustness_testing_data['distributional_slice'], load_best_model=save_model, show_progress=config['show_progress']) logger.info(set_color('test result for distributional slice', 'yellow') + f': ' f'{test_result_distributional_slice}') if robustness_testing_data['transformation_test'] is not None: test_result_transformation = trainer.evaluate(robustness_testing_data['transformation_test'], load_best_model=save_model, show_progress=config['show_progress']) logger.info(set_color('test result for transformation on test', 'yellow') + f': {test_result_transformation}') if robustness_testing_data['transformation_train'] is not None: transformation_model = get_model(config['model'])(config, robustness_testing_data['transformation_train']).to( config['device']) logger.info(transformation_model) transformation_trainer = get_trainer(config['MODEL_TYPE'], config['model'])(config, transformation_model) best_valid_score_transformation, best_valid_result_transformation = transformation_trainer.fit( robustness_testing_data['transformation_train'], valid_data, saved=save_model, show_progress=config['show_progress']) test_result_transformation = transformation_trainer.evaluate(test_data, load_best_model=save_model, show_progress=config['show_progress']) logger.info( set_color('best valid for transformed training set', 'yellow') + f': {best_valid_result_transformation}') logger.info(set_color('test result for transformed training set', 'yellow') + f': {test_result_transformation}') if robustness_testing_data['sparsity'] is not None: sparsity_model = get_model(config['model'])(config, robustness_testing_data['sparsity']).to(config['device']) logger.info(sparsity_model) sparsity_trainer = get_trainer(config['MODEL_TYPE'], config['model'])(config, sparsity_model) best_valid_score_sparsity, best_valid_result_sparsity = sparsity_trainer.fit( robustness_testing_data['sparsity'], valid_data, saved=save_model, show_progress=config['show_progress']) test_result_sparsity = sparsity_trainer.evaluate(test_data, load_best_model=save_model, show_progress=config['show_progress']) logger.info(set_color('best valid for sparsified training set', 'yellow') + f': {best_valid_result_sparsity}') logger.info(set_color('test result for sparsified training set', 'yellow') + f': {test_result_sparsity}') logger.handlers.clear() shutdown() del logger return { 'test_result': test_result, 'distributional_test_result': test_result_distributional_slice, 'transformation_test_result': test_result_transformation, 'sparsity_test_result': test_result_sparsity, 'slice_test_result': test_result_slice }
3681bc732b2b39837c871267f4659825105d6e2b
3,636,904
def total_cost(content_cost, style_cost, alpha, beta): """Return a tensor representing the total cost.""" return alpha * content_cost + beta * style_cost
98d42bd8d62dc8cd7110b2f5eb9a9a4e4eb6bc65
3,636,905
def create_command_using_pip_action( num_bash_entries=10, uninstall_use_creation_time=False, skip=0): """Create commands using latest pip action.""" valid_pip_commands = get_valid_pip_history(num_bash_entries)[skip:] assert valid_pip_commands, 'No undoable pip commands.' last_valid_pip_command = valid_pip_commands[0] last_valid_pip_action = last_valid_pip_command.split()[1] command = '' if uninstall_use_creation_time: command = 'pip uninstall -y {}'.format(get_uninstall_candidates()) elif last_valid_pip_action == 'install': command = create_command_using_packages(get_pip_command_packages( last_valid_pip_command)) elif last_valid_pip_action == 'uninstall': command = 'pip install {}'.format(get_reinstall_candidates()) elif last_valid_pip_action == 'download': command = 'rm {}'.format(get_file_candidates()) assert command, 'No undoable pip commands.' return command
5ba22f63d33c1ae60ec3a93590965453885e5e29
3,636,906
def extract_word_pos_sequences(form, unknown_category, morpheme_splitter=None, extract_morphemes=False): """Return the unique word-based pos sequences, as well as (possibly) the morphemes, implicit in the form. :param form: a form model object :param morpheme_splitter: callable that splits a strings into its morphemes and delimiters :param str unknown_category: the string used in syntactic category strings when a morpheme-gloss pair is unknown :param morphology: the morphology model object -- needed because its extract_morphemes_from_rules_corpus attribute determines whether we return a list of morphemes. :returns: 2-tuple: (set of pos/delimiter sequences, list of morphemes as (pos, (mb, mg)) tuples). """ if not form.syntactic_category_string: return None, None morpheme_splitter = morpheme_splitter or get_morpheme_splitter() pos_sequences = set() morphemes = [] sc_words = form.syntactic_category_string.split() mb_words = form.morpheme_break.split() mg_words = form.morpheme_gloss.split() for sc_word, mb_word, mg_word in zip(sc_words, mb_words, mg_words): pos_sequence = tuple(morpheme_splitter(sc_word)) if unknown_category not in pos_sequence: pos_sequences.add(pos_sequence) if extract_morphemes: morpheme_sequence = morpheme_splitter(mb_word)[::2] gloss_sequence = morpheme_splitter(mg_word)[::2] for pos, morpheme, gloss in zip(pos_sequence[::2], morpheme_sequence, gloss_sequence): morphemes.append((pos, (morpheme, gloss))) return pos_sequences, morphemes
e08c285910c4da2f827f81ac65abc2ee3d62b1dc
3,636,907
def model_test(Py, Px_y, testDataArr, testLabelArr): """ 模型测试 @Args: Py: 先验概率分布 Px_y: 条件概率分布 testDataArr: 测试集数据 testLabelArr: 测试集标签 @Returns: 准确率 @Riase: """ # 错误值 errorCnt = 0 # 循环遍历测试集中的每一个样本 for i in range(len(testDataArr)): # 获取预测值 presict = NaiveBayes(Py, Px_y, testDataArr[i]) if presict != testLabelArr[i]: errorCnt += 1 # 返回准确率 return 1 - (errorCnt / len(testDataArr))
f1e06725511750c79e34dc528728aab8cebc7c34
3,636,908
from typing import Optional import torch def int2c2e(shortname: str, wrapper: LibcintWrapper, other: Optional[LibcintWrapper] = None) -> torch.Tensor: """ 2-centre 2-electron integrals where the `wrapper` and `other1` correspond to the first electron, and `other2` corresponds to another electron. The returned indices are sorted based on `wrapper`, `other1`, and `other2`. The available shortname: "ar12" """ # don't really care, it will be ignored rinv_pos = torch.zeros(1, dtype=wrapper.dtype, device=wrapper.device) # check and set the others otherw = _check_and_set(wrapper, other) return _Int2cFunction.apply( *wrapper.params, rinv_pos, [wrapper, otherw], IntorNameManager("int2c2e", shortname))
1f5b6c70c8373c885103d6deb7cba77ea8d0aa73
3,636,909
def send_group_membership_request(request, group_id, template='group_send_request.html'): """ Send membership request to the administrator of a private group. """ if request.method == 'POST': form = GroupMembershipRequestForm(request.POST) if form.is_valid(): group = Group.objects.get(pk=group_id) form.save(user=request.user, group=group) return redirect('group:group_list') form = GroupMembershipRequestForm() return render(request, template, {'form': form})
22324765a915e1677fb6abab41dfa214fcc05d40
3,636,910
def _dataset_type_dir(signer): """Returns the directory name of the corresponding dataset type. There is a `TFRecord` file written for each of the 25 signers. The `TFRecord` files of the first 17 signers are assigned to the train dataset, the `TFRecord` files of the next 4 signers are assigned to the validation dataset, and the `TFRecord` files of the last 4 signers are assigned to the test dataset. Arguments: signer: The index of the signer. Returns: The directory name of the corresponding dataset type. """ if signer > 20: return DatasetType.TEST.value elif signer > 16: return DatasetType.VALIDATION.value else: return DatasetType.TRAIN.value
515fd2e0871cf9549f3f724da0b45bb07f09e24b
3,636,912
def _merge_blanks(src, targ, verbose=False): """Read parallel corpus 2 lines at a time. Merge both sentences if only either source or target has blank 2nd line. If both have blank 2nd lines, then ignore. Returns tuple (src_lines, targ_lines), arrays of strings sentences. """ merges_done = [] # array of indices of rows merged sub = None # replace sentence after merge with open(src, 'rb') as src_file, open(targ, 'rb') as targ_file: src_lines = src_file.readlines() targ_lines = targ_file.readlines() print("src: %d, targ: %d" % (len(src_lines), len(targ_lines))) print("=" * 30) for i in range(0, len(src_lines) - 1): s = src_lines[i].decode().rstrip() s_next = src_lines[i + 1].decode().rstrip() t = targ_lines[i].decode().rstrip() t_next = targ_lines[i + 1].decode().rstrip() if t == '.': t = '' if t_next == '.': t_next = '' if (len(s_next) == 0) and (len(t_next) > 0): targ_lines[i] = "%s %s" % (t, t_next) # assume it has punctuation targ_lines[i + 1] = b'' src_lines[i] = s if len(s) > 0 else sub merges_done.append(i) if verbose: print("t [%d] src: %s\n targ: %s" % (i, src_lines[i], targ_lines[i])) print() elif (len(s_next) > 0) and (len(t_next) == 0): src_lines[i] = "%s %s" % (s, s_next) # assume it has punctuation src_lines[i + 1] = b'' targ_lines[i] = t if len(t) > 0 else sub merges_done.append(i) if verbose: print("s [%d] src: %s\n targ: %s" % (i, src_lines[i], targ_lines[i])) print() elif (len(s) == 0) and (len(t) == 0): # both blank -- remove merges_done.append(i) else: src_lines[i] = s if len(s) > 0 else sub targ_lines[i] = t if len(t) > 0 else sub # handle last line s_last = src_lines[-1].decode().strip() t_last = targ_lines[-1].decode().strip() if (len(s_last) == 0) and (len(t_last) == 0): merges_done.append(len(src_lines) - 1) else: src_lines[-1] = s_last targ_lines[-1] = t_last # remove empty sentences for m in reversed(merges_done): del src_lines[m] del targ_lines[m] print("merges done: %d" % len(merges_done)) return (src_lines, targ_lines)
fe5f765022b2b4de5320272701148cc9f8e691b8
3,636,913
import codecs def get_line(file_path, line_rule): """ 搜索指定文件的指定行到指定行的内容 :param file_path: 指定文件 :param line_rule: 指定行规则 :return: """ s_line = int(line_rule.split(',')[0]) e_line = int(line_rule.split(',')[1][:-1]) result = [] # with open(file_path) as file: file = codecs.open(file_path, "r", encoding='utf-8', errors='ignore') line_number = 0 for line in file: line_number += 1 if s_line <= line_number <= e_line: result.append(line) return result
a6ccda48f8083e5ff6827306f4abd7f19e8d445c
3,636,914
def _generate_odd_sequence(sequence_id: int, start_value: int, k_factor: int, max_iterations: int): """ This method generates a Collatz sequence containing only odd numbers. :param sequence_id: ID of the sequence. :param start_value: The integer value to start with. The value must be a natural number > 0. If an even number is handed over, the next odd number will be used as start value. :param k_factor: The factor by which odd numbers are multiplied in the sequence. :param max_iterations: The maximum number of iterations performed before the method exits. :return: The Collatz sequence as a pandas data frame. """ odds = commons.odd_collatz_sequence(start_value, k_factor, max_iterations=max_iterations) next_odds = odds[1:] odds.pop() collatz_frame = pd.DataFrame({"v_i": odds}) collatz_frame["sequence_id"] = sequence_id collatz_frame["sequence_len"] = len(collatz_frame) collatz_frame["n"] = collatz_frame.index + 1 collatz_frame["k_factor"] = k_factor collatz_frame["v_1"] = start_value collatz_frame["kv_i+1"] = collatz_frame["v_i"].apply( commons.next_collatz_number, args=(k_factor,)) collatz_frame["v_i+"] = next_odds collatz_frame["terminal"] = collatz_frame["v_i+"] == 1 collatz_frame["cycle"] = collatz_frame["v_i+"] == collatz_frame["v_1"] # Logs collatz_frame["v_i_log2"] = collatz_frame["v_i"].apply(log2) collatz_frame["kv_i+1_log2"] = collatz_frame["kv_i+1"].apply(log2) collatz_frame["v_i+_log2"] = collatz_frame["v_i+"].apply(log2) # Binary strings collatz_frame["v_1_bin"] = collatz_frame["v_1"].apply(commons.to_binary) collatz_frame["v_i_bin"] = collatz_frame["v_i"].apply(commons.to_binary) # Mods collatz_frame["v_i_mod4"] = collatz_frame["v_i"] % 4 collatz_frame["kv_i+1_mod4"] = collatz_frame["kv_i+1"] % 4 collatz_frame["v_i+_mod4"] = collatz_frame["v_i+"] % 4 # Alpha collatz_frame["alpha_i"] = collatz_frame["kv_i+1"].apply(commons.trailing_zeros) collatz_frame["alpha_i"] = collatz_frame["alpha_i"].astype('int64') collatz_frame["alpha_i_max"] = log2(k_factor) + collatz_frame["v_i"].apply(log2) collatz_frame["alpha_i_max"] += (1 + 1 / (k_factor * collatz_frame["v_i"])).apply(log2) # Round result here to avoid loss of precision errors collatz_frame["alpha_i_max"] = collatz_frame["alpha_i_max"].round(9) collatz_frame["alpha"] = collatz_frame["alpha_i"].cumsum() collatz_frame["alpha_cycle"] = (log2(k_factor) * collatz_frame["n"]).astype('int64') + 1 collatz_frame["alpha_max"] = log2(start_value) + (collatz_frame["n"] * log2(k_factor)) collatz_frame["alpha_max"] = collatz_frame["alpha_max"].astype('int64') + 1 # Beta collatz_frame["beta_i"] = 1 + 1 / (k_factor * collatz_frame["v_i"]) collatz_frame["beta"] = collatz_frame["beta_i"].cumprod() # Lambda collatz_frame["bin_len"] = collatz_frame["v_i_log2"].astype('int64') + 1 collatz_frame["next_bin_len"] = collatz_frame["kv_i+1_log2"].astype('int64') + 1 collatz_frame["bin_diff"] = collatz_frame["next_bin_len"] - collatz_frame["bin_len"] collatz_frame["lambda_i"] = collatz_frame["bin_diff"] collatz_frame.loc[collatz_frame["lambda_i"] < 0, "lambda_i"] = 0 collatz_frame["lambda"] = collatz_frame["lambda_i"].cumsum() collatz_frame["lambda_i_min"] = int(log2(k_factor)) collatz_frame["lambda_i_max"] = int(log2(k_factor) + 1) collatz_frame["lambda_hyp"] = (collatz_frame["n"] * log2(k_factor)) collatz_frame["lambda_min"] = collatz_frame["lambda_hyp"].astype('int64') collatz_frame["lambda_max"] = collatz_frame["lambda_hyp"].astype('int64') + 2 # Omega collatz_frame["omega_i"] = collatz_frame["lambda_i"] - collatz_frame["alpha_i"] collatz_frame["omega"] = collatz_frame["lambda"] - collatz_frame["alpha"] collatz_frame["omega_i_max"] = collatz_frame["lambda_i_max"] - 1 collatz_frame["omega_max"] = collatz_frame["lambda_max"] - collatz_frame["n"] result_frame = collatz_frame[[ "sequence_id", "sequence_len", "n", "k_factor", "v_1", "v_i", "kv_i+1", "v_i+", "v_i_log2", "v_i+_log2", "kv_i+1_log2", "v_i_mod4", "kv_i+1_mod4", "v_i+_mod4", "v_1_bin", "v_i_bin", "terminal", "cycle", "alpha_i", "alpha_i_max", "alpha", "alpha_cycle", "alpha_max", "beta_i", "beta", "bin_len", "next_bin_len", "lambda_i", "lambda_i_min", "lambda_i_max", "lambda", "lambda_min", "lambda_max", "omega_i", "omega_i_max", "omega", "omega_max"]] result_frame.columns = [ "sequence_id", "sequence_len", "n", "k", "v_1", "v_i", "kv_i+1", "v_i+", "v_i_log2", "v_i+_log2", "kv_i+1_log2", "v_i_mod4", "kv_i+1_mod4", "v_i+_mod4", "v_1_bin", "v_i_bin", "terminal", "cycle", "a_i", "a_i_max", "a", "a_cycle", "a_max", "b_i", "b", "bin_len", "next_bin_len", "l_i", "l_i_min", "l_i_max", "l", "l_min", "l_max", "o_i", "o_i_max", "o", "o_max"] return result_frame
d886631d153531fafa2cd9b15be621df9746a909
3,636,915
def _is_unique_rec_name(info_name): """ helper method to see if we should use the uniqueness recommendation on the fact comparison """ UNIQUE_INFO_SUFFIXES = [".ipv4_addresses", ".ipv6_addresses", ".mac_address"] UNIQUE_INFO_PREFIXES = ["fqdn"] if info_name.startswith("network_interfaces.lo."): return False for prefix in UNIQUE_INFO_PREFIXES: if info_name.startswith(prefix): return True for suffix in UNIQUE_INFO_SUFFIXES: if info_name.endswith(suffix): return True return False
cba744e1e5b6a9612363d2ca12d4751e1894c8ad
3,636,917
def initialized(): """ Connection finished initializing? """ return __context__["netmiko_device"].get("initialized", False)
6ca85744478bdb17ac99ce827825cde1db8bae3a
3,636,918
def n_round(a, b): """safe round""" element_round = np.vectorize(np.round) return element_round(a, intify(b))
2c38e1585b71d5717ea3cc560521b8a006ceeee3
3,636,919
def _json_view_params(shape, affine, vmin, vmax, cut_slices, black_bg=False, opacity=1, draw_cross=True, annotate=True, title=None, colorbar=True, value=True): """ Create a dictionary with all the brainsprite parameters. Returns: params """ # Set color parameters if black_bg: cfont = '#FFFFFF' cbg = '#000000' else: cfont = '#000000' cbg = '#FFFFFF' # Deal with limitations of json dump regarding types if type(vmin).__module__ == 'numpy': vmin = vmin.tolist() # json does not deal with numpy array if type(vmax).__module__ == 'numpy': vmax = vmax.tolist() # json does not deal with numpy array params = {'canvas': '3Dviewer', 'sprite': 'spriteImg', 'nbSlice': {'X': shape[0], 'Y': shape[1], 'Z': shape[2]}, 'overlay': {'sprite': 'overlayImg', 'nbSlice': {'X': shape[0], 'Y': shape[1], 'Z': shape[2]}, 'opacity': opacity}, 'colorBackground': cbg, 'colorFont': cfont, 'crosshair': draw_cross, 'affine': affine.tolist(), 'flagCoordinates': annotate, 'title': title, 'flagValue': value, 'numSlice': {'X': cut_slices[0] - 1, 'Y': cut_slices[1] - 1, 'Z': cut_slices[2] - 1}} if colorbar: params['colorMap'] = {'img': 'colorMap', 'min': vmin, 'max': vmax} return params
50ea71a5a99facf4c472f0c18984d84e23b8e301
3,636,920
from typing import List from datetime import datetime def get_timestamps_from_df_data(df) -> List[datetime.datetime]: """Get a list of timestamp from rows of a DataFrame containing raw data. """ timestamps = [] for index, row in df.iterrows(): year = int(row["dteday"][:4]) month = int(row["dteday"][5:7]) day = int(row["dteday"][-2:]) hour = int(row["hr"]) timestamp = datetime.datetime(year, month, day, hour) timestamps.append(timestamp) return timestamps
21f985ebf28d6f5819635a13294e8db0544a292b
3,636,921
def select(var_name, attr_name=None): """ Return attribute(s) of a variable given the variable name and an optional field name, or list of attribute name(s) :param var_name: Name of the variable we're interested in. :param attr_name: A string representing the name of the attribute whose value we want to fetch. This can also be a list of strings in case of multiple attributes. If None, all attributes of the variable are returned. :return: A dictionary of attribute => value mappings if multiple attributes were requested (i.e. attr_name is a list), or a string value if a single attribute name was requested (i.e. attr_name is a string) """ single = isinstance(attr_name, str) if attr_name is not None: if single: params = {attr_name: attr_name} else: params = dict([(f, f) for f in attr_name]) else: params = None endpoint = 'variable/%s' % var_name data = _get(endpoint, params) return data[attr_name] if single else data
22b65439ff4dc831c2fb334595b0f0cd2e764b67
3,636,923
import re def _parseWinBuildTimings(logfile): """Variant of _parseBuildTimings for Windows builds.""" res = {'Compile': re.compile(r'\d+>Time Elapsed (\d+):(\d+):([0-9.]+)'), 'Test running': re.compile(r'.*?\.+.*?([0-9.]+) sec')} times = dict([(k, 0.0) for k in res]) for line in logfile: for key, regexp in res.iteritems(): m = regexp.match(line) if m: multiplier = 1 for time_part in reversed(m.groups()): times[key] += float(time_part) * multiplier multiplier *= 60 break times['Total'] = sum(times.values()) return times
0473c426d29bb7fe44ff3384f81962f121c11afa
3,636,924
def get_malid(anime: AnimeThemeAnime) -> int: """ Returns anime theme of resource. """ for resource in anime['resources']: if resource["site"] == "MyAnimeList": return resource['external_id']
a745f95e73e8e061d98100e314faf5a662d69693
3,636,926