content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import re def is_mismatch_before_n_flank_of_read(md, n): """ Returns True if there is a mismatch before the first n nucleotides of a read, or if there is a mismatch before the last n nucleotides of a read. :param md: string :param n: int :return is_mismatch: boolean """ is_mismatch = False flank_mm_regex = r"^(\d+).*[ACGT](\d+)$" flank_mm = re.findall(flank_mm_regex,md) if flank_mm: flank_mm = flank_mm[0] if flank_mm[1]: if int(flank_mm[1]) < n: is_mismatch = True if flank_mm[0]: if int(flank_mm[0]) < n: is_mismatch = True return is_mismatch
1e41c67e29687d93855ed212e2d9f683ef8a88d7
3,644,836
from typing import Dict def get_county() -> Dict: """Main method for populating county data""" api = SocrataApi('https://data.marincounty.org/') notes = ('This data only accounts for Marin residents and does not ' 'include inmates at San Quentin State Prison. ' 'The tests timeseries only includes the number of tests ' 'performed and not how many were positive or negative. ' 'Demographic breakdowns for testing are not available.') return { 'name': 'Marin', 'update_time': get_latest_update(api).isoformat(), # The county's data dashboard is at: # https://coronavirus.marinhhs.org/surveillance # Which links to the data portal category with the data sets we # actually use at: # https://data.marincounty.org/browse?q=covid 'source_url': 'https://coronavirus.marinhhs.org/surveillance', 'meta_from_source': '', 'meta_from_baypd': notes, 'series': { 'cases': get_timeseries_cases(api), 'deaths': get_timeseries_deaths(api), 'tests': get_timeseries_tests(api), }, 'case_totals': get_case_totals(api), 'death_totals': get_death_totals(api), # Marin does not currently provide demographic breakdowns for # testing, so no test totals right now. }
62fd267141e3cdcb3f5b81b78be2aafb1322335b
3,644,837
import traceback def address_book(request): """ This Endpoint is for getting contact details of all people at a time. We will paginate this for 10 items at a time. """ try: paginator = PageNumberPagination() paginator.page_size = 10 persons = Person.objects.all() paginated_persons = paginator.paginate_queryset(persons, request) serializer = PersonDetailSerializer(paginated_persons, many=True) return Response(serializer.data, status=status.HTTP_200_OK) except: print(traceback.format_exc()) return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR)
88ec5613a7433128a2d06665319a6e3fd83f870f
3,644,839
def decrement_items (inventory, items): """ :param inventory: dict - inventory dictionary. :param items: list - list of items to decrement from the inventory. :return: dict - updated inventory dictionary with items decremented. """ return add_or_decrement_items (inventory, items, 'minus')
253339e3a8f9ff49e69372dc99d8b8f626a3b98b
3,644,840
def global_ave_pool(x): """Global Average pooling of convolutional layers over the spatioal dimensions. Results in 2D tensor with dimension: (batch_size, number of channels) """ return th.mean(x, dim=[2, 3])
3f681e39041762ee2ca8bc52c542952eebd9b97c
3,644,841
import operator def get_output(interpreter, top_k=1, score_threshold=0.0): """Returns no more than top_k classes with score >= score_threshold.""" scores = output_tensor(interpreter) classes = [ Class(i, scores[i]) for i in np.argpartition(scores, -top_k)[-top_k:] if scores[i] >= score_threshold ] return sorted(classes, key=operator.itemgetter(1), reverse=True)
69c4e956cee796384fa74d12338f3fb2cc90ba31
3,644,843
def bag_of_words_features(data, binary=False): """Return features using bag of words""" vectorizer = CountVectorizer( ngram_range=(1, 3), min_df=3, stop_words="english", binary=binary ) return vectorizer.fit_transform(data["joined_lemmas"])
55ed963df31c2db79eaab58b585ad264a257c241
3,644,844
import time def duration(func): """ 计时装饰器 """ def wrapper(*args, **kwargs): print('2') start = time.time() f = func(*args, **kwargs) print(str("扫描完成, 用时 ") + str(int(time.time()-start)) + "秒!") return f return wrapper
c55a941574a92cbe70c9b265eaa39563b91ab45a
3,644,845
def enumerate_assignments(max_context_number): """ enumerate all possible assignments of contexts to clusters for a fixed number of contexts. Has the hard assumption that the first context belongs to cluster #1, to remove redundant assignments that differ in labeling. :param max_context_number: int :return: list of lists, each a function that takes in a context id number and returns a cluster id number """ cluster_assignments = [{}] # context 0 is always in cluster 1 for contextNumber in range(0, max_context_number): cluster_assignments = augment_assignments(cluster_assignments, contextNumber) return cluster_assignments
881723e2ca6a663821979a9029e03bb4f35195dc
3,644,846
def KL_monte_carlo(z, mean, sigma=None, log_sigma=None): """Computes the KL divergence at a point, given by z. Implemented based on https://www.tensorflow.org/tutorials/generative/cvae This is the part "log(p(z)) - log(q(z|x)) where z is sampled from q(z|x). Parameters ---------- z : (B, N) mean : (B, N) sigma : (B, N) | None log_sigma : (B, N) | None Returns ------- KL : (B,) """ if log_sigma is None: log_sigma = tf.math.log(sigma) zeros = tf.zeros_like(z) log_p_z = log_multivar_gaussian(z, mean=zeros, log_sigma=zeros) log_q_z_x = log_multivar_gaussian(z, mean=mean, log_sigma=log_sigma) return log_q_z_x - log_p_z
6d509607b3d4d6c248544330af06f2ef92fc3739
3,644,847
def get_order_discrete(p, x, x_val, n_full=None): """ Calculate the order of the discrete features according to the alt/null ratio Args: p ((n,) ndarray): The p-values. x ((n,) ndarray): The covaraites. The data is assumed to have been preprocessed. x_val ((n_val,) ndarray): All possible values for x, sorted in ascending order. n_full (int): Total number of hypotheses before filtering. Returns: x_order ((d,) ndarray): the order (of x_val) from smallest alt/null ratio to the largest. """ n_val = x_val.shape[0] # Separate the null and the alt proportion. _, t_BH = bh_test(p, alpha=0.1, n_full=n_full) x_null, x_alt = x[p>0.75], x[p<t_BH] # Calculate the alt/null ratio。 cts_null = np.zeros([n_val], dtype=int) cts_alt = np.zeros([n_val], dtype=int) for i,val in enumerate(x_val): cts_null[i] = np.sum(x_null==val)+1 cts_alt[i] = np.sum(x_alt==val)+1 p_null = cts_null/np.sum(cts_null) p_alt = cts_alt/np.sum(cts_alt) p_ratio = p_alt/p_null # Calculate the order of x_val based on the ratio. x_order = p_ratio.argsort() return x_order
de8f05d7a882c2917e618bf315a45969f55dbd16
3,644,848
def _read_txt(file_path: str) -> str: """ Read specified file path's text. Parameters ---------- file_path : str Target file path to read. Returns ------- txt : str Read txt. """ with open(file_path) as f: txt: str = f.read() return txt
5f0657ee223ca9f8d96bb612e35304a405d2339e
3,644,849
def dedupe(entries): """ Uses fuzzy matching to remove duplicate entries. """ return thefuzz.process.dedupe(entries, THRESHOLD, fuzz.token_set_ratio)
d5d56f2acc25a107b5f78eefc4adc71676712f98
3,644,851
import binascii def generate_openssl_rsa_refkey(key_pub_raw, # pylint: disable=too-many-locals, too-many-branches, too-many-arguments, too-many-statements keyid_int, refkey_file, key_size, encode_format="", password="nxp", cert=""): """ Generate rsa reference key using openssl :param key_pub_raw: Retrieved public key :param keyid_int: Key index :param refkey_file: File name to store reference key :param key_size: RSA key size :param encode_format: Encode format to store file :param password: Password for encryption of pkcs12 reference key :param cert: Input certificate :return: Status """ # generate rsa key pair key_openssl = rsa.generate_private_key(public_exponent=65537, key_size=key_size, backend=default_backend()) key_prv_bytes = key_openssl.private_bytes(encoding=Encoding.DER, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption()) key_openssl_hex = binascii.hexlify(key_prv_bytes) key_openssl_list = list() for k in range(0, len(key_openssl_hex), 2): key_openssl_list.append(key_openssl_hex[k:k + 2]) # convert the retrieved public key to hex format key_pub_list = list(key_pub_raw) # trim the header of public key if key_size == 1024: key_pub_no_header_list = key_pub_list[25:] elif key_size in [2048, 3072, 4096]: key_pub_no_header_list = key_pub_list[28:] else: log.error("key size: %s is not supported. Should be one of 1024, 2048, 3072, 4096", (str(key_size),)) return apis.kStatus_SSS_Fail key_pub_str_list = list() for key_pub_no_header_item in key_pub_no_header_list: key_pub_no_header_item = format(key_pub_no_header_item, 'x') if len(key_pub_no_header_item) == 1: key_pub_no_header_item = "0" + key_pub_no_header_item key_pub_str_list.append(key_pub_no_header_item) openssl_index = 7 # Public Key section retrieved_pub_len = get_length(key_pub_str_list) openssl_pub_len = get_length(key_openssl_list[openssl_index:]) key_openssl_list = replace_bytes(key_openssl_list, openssl_pub_len, openssl_index, key_pub_str_list, retrieved_pub_len) openssl_index += retrieved_pub_len # publicExponent section openssl_index += get_length(key_openssl_list[openssl_index:]) # Private key Exponent section openssl_index += get_length(key_openssl_list[openssl_index:]) # prime1 section magic_prime1_data = ['02', '01', '01'] openssl_prime1_len = get_length(key_openssl_list[openssl_index:]) key_openssl_list = replace_bytes(key_openssl_list, openssl_prime1_len, openssl_index, magic_prime1_data, len(magic_prime1_data)) openssl_index += len(magic_prime1_data) # convert keyID to hex format and add TLV keyid_str = format("%08x" % keyid_int) key_id_list = ['02'] if len(keyid_str) < 31: key_id_len = int(len(keyid_str) / 2) key_id_len_hex = format("%x" % key_id_len) if len(key_id_len_hex) == 1: key_id_len_hex = "0" + key_id_len_hex key_id_list.append(key_id_len_hex) for i in range(0, len(keyid_str), 2): key_id_list.append(keyid_str[i:i + 2]) # prime 2 section openssl_prime2_len = get_length(key_openssl_list[openssl_index:]) key_openssl_list = replace_bytes(key_openssl_list, openssl_prime2_len, openssl_index, key_id_list, len(key_id_list)) openssl_index += len(key_id_list) # exponent1 section openssl_index += get_length(key_openssl_list[openssl_index:]) # exponent2 section openssl_index += get_length(key_openssl_list[openssl_index:]) # coefficient section magic_mod_p = ['02', '04', 'a5', 'a6', 'b5', 'b6'] openssl_coefficient_len = get_length(key_openssl_list[openssl_index:]) key_openssl_list = replace_bytes(key_openssl_list, openssl_coefficient_len, openssl_index, magic_mod_p, len(magic_mod_p)) # Recalculate total length of the key key_openssl_len = len(key_openssl_list) - 4 key_openssl_len_str = format("%04x" % key_openssl_len) total_len_list = [] for i in range(0, len(key_openssl_len_str), 2): total_len_list.append(key_openssl_len_str[i:i + 2]) key_openssl_list[2] = total_len_list[0] key_openssl_list[3] = total_len_list[1] # convert key to der or pem format key_der_hex = "" for key_openssl_item in key_openssl_list: if isinstance(key_openssl_item, bytes): key_der_hex += bytes.decode(key_openssl_item) else: key_der_hex += key_openssl_item key_der = binascii.unhexlify(key_der_hex) key_pem_obj = openssl.backend.load_der_private_key(key_der, None) key_pem = key_pem_obj.private_bytes(Encoding.PEM, PrivateFormat.TraditionalOpenSSL, NoEncryption()) status = write_refkey_to_file(refkey_file, password, key_pem, key_der, cert, encode_format) return status
ca3acdcf4fe615378f2f7088d015a7acbc58b7ff
3,644,852
import select async def fetch_ongoing_alerts( requester=Security(get_current_access, scopes=[AccessType.admin, AccessType.user]), session=Depends(get_session) ): """ Retrieves the list of ongoing alerts and their information """ if await is_admin_access(requester.id): query = ( alerts.select().where( alerts.c.event_id.in_( select([events.c.id]) .where(events.c.end_ts.is_(None)) ))) return await crud.base.database.fetch_all(query=query) else: retrieved_alerts = (session.query(models.Alerts) .join(models.Events) .filter(models.Events.end_ts.is_(None)) .join(models.Devices) .join(models.Accesses) .filter(models.Accesses.group_id == requester.group_id)) retrieved_alerts = [x.__dict__ for x in retrieved_alerts.all()] return retrieved_alerts
721deaac7cca5f6589417f07d66a83111a062134
3,644,853
def breweryBeers(id): """Finds the beers that belong to the brewery with the id provided id: string return: json object list or empty json list """ try: # [:-1:] this is because the id has a - added to the end to indicate # that it is for this method, removes the last character from a string return BreweryDb.brewery(id[:-1:] + "/beers")['data'] except Exception: return id[:-1:] + "/beers"
f2d8824ad49ffeeec68077cb5e0ed143f4603d4e
3,644,854
def min_max_date(rdb, patient): """ Returns min and max date for selected patient """ sql = """SELECT min_date,max_date FROM patient WHERE "Name"='{}'""".format(patient) try: df = pd.read_sql(sql, rdb) min_date, max_date = df['min_date'].iloc[0].date(), df['max_date'].iloc[0].date() except: min_date, max_date = '', '' return min_date, max_date
7f08f42bd7dd9742bef300f5f7009807e47b7f23
3,644,855
def integrate(f, a, b, N, method): """ @param f: function to integrate @param a: initial point @param b: end point @param N: number of intervals for precision @param method: trapeze, rectangle, Simpson, Gauss2 @return: integral from a to b of f(x) """ h = (b-a)/(N) if method == "trapeze": for i in range(0,n-1): xi = a+i*h Lhf += f(xi)+f(xi+h) Lhf *= h/2 elif method == "rectangle": for i in range(0,n-1): xi = a+i*h Lhf += f(xi)+h/2 Lhf *= h elif method == "Simpson": for i in range(0,n-1): xi = a+i*h Lhf += f(xi)+4*f(xi+h/2)+f(xi+h) Lhf *= h/6 elif method == "Gauss2" for i in range(0,n-1): xi = a+i*h Lhf += f(xi+h*(1/2)*(1-(1/sqrt(3))))+f(xi+h*(1/2)*(1-(1/sqrt(3)))) Lhf *= h/2 return Lhf
e716733160fd46943de3518e573215b3cf058113
3,644,856
def sum_naturals(n): """Sum the first N natural numbers. >>> sum_naturals(5) 15 """ total, k = 0, 1 while k <= n: total, k = total + k, k + 1 return total
0ef1ff7e8f0f2df522c73d6d4affc890ba4ad2fa
3,644,857
def load_data(data_map,config,log): """Collect data locally and write to CSV. :param data_map: transform DataFrame map :param config: configurations :param log: logger object :return: None """ for key,df in data_map.items(): (df .coalesce(1) .write .csv(f'{config["output"]}/{key}', mode='overwrite', header=True)) return None
2b690c4f5970df7f9e98ce22970ce3eb892f15bc
3,644,858
import logging def _filter_credential_warning(record) -> bool: """Rewrite out credential not found message.""" if ( not record.name.startswith("azure.identity") or record.levelno != logging.WARNING ): return True message = record.getMessage() if ".get_token" in message: if message.startswith("EnvironmentCredential"): print("Attempting to sign-in with environment variable credentials...") if message.startswith("AzureCliCredential"): print("Attempting to sign-in with Azure CLI credentials...") if message.startswith("ManagedIdentityCredential"): print("Attempting to sign-in with Managed Instance credentials...") print("Falling back to interactive logon.") return not message
bc9d2a96ccadfbdb297af86bbdf0f80ab8d2dafa
3,644,860
import importlib def import_module_from_path(mod_name, mod_path): """Import module with name `mod_name` from file path `mod_path`""" spec = importlib.util.spec_from_file_location(mod_name, mod_path) mod = importlib.util.module_from_spec(spec) spec.loader.exec_module(mod) return mod
18891db514b4f1e41bce6de69f5b66fbf51d06e5
3,644,861
def preprocessing(text, checkpoint_dir, minocc): """ This time, we cannot leave the file as it is. We have to modify it first. - replace "\n" by " \n " -> newline is a word - insert space between punctuation and last word of sentence - create vocab, but only for those words that occur more than once - replace all words that occur too seldomly with "<unk>" returns the list of integers we will use as the dataset as well as char2idx and idx2char """ splitted = prepare_text(text) print("Total number of words:",len(splitted)) occurences = dict() for word in splitted: if word in list(occurences.keys()): occurences[word] += 1 else: occurences[word] = 1 vocab = ["<unk>"] for word in list(occurences.keys()): if occurences[word] > minocc: vocab.append(word) splitted = remove_unknowns(vocab, splitted) # removing words that appear less than two times print(splitted[0:250]) print("Number of unique relevant words:", len(vocab)) char2idx = {u:i for i, u in enumerate(vocab)} idx2char = np.array(vocab) pickle_rick(checkpoint_dir, char2idx, 'char2idx') pickle_rick(checkpoint_dir, idx2char, 'idx2char') pickle_rick(checkpoint_dir, splitted, 'dataset') return splitted, char2idx, idx2char
f3dd597ac144d1c52ca2a65852ef59f2cee63d8b
3,644,862
def dwave_chimera_graph( m, n=None, t=4, draw_inter_weight=draw_inter_weight, draw_intra_weight=draw_intra_weight, draw_other_weight=draw_inter_weight, seed=0, ): """ Generate DWave Chimera graph as described in [1] using dwave_networkx. Parameters ---------- m: int Number of cells per column n: int Number of cells per row t: int Number of nodes on each side of a bipartite cell subgraph draw_inter_weight: function (seed) -> number Function to call for weights of inter-cell edges draw_intra_weight: function (seed) -> number Function to call for weights of intra-cell edges draw_other_weight: function (seed) -> number Function to call for weights of intra-cell edges seed: integer, random_state, or None Indicator of random number generation state Returns ------- graph: nx.Graph The generated Chimera graph References ---------- ..[1] https://docs.ocean.dwavesys.com/en/latest/concepts/topology.html """ if not n: n = m g = dwave.chimera_graph(m, n, t) _initialize_weights_chimera( chimera_graph=g, size=m, draw_inter_weight=lambda: draw_inter_weight(seed), draw_intra_weight=lambda: draw_intra_weight(seed), draw_other_weight=lambda: draw_other_weight(seed), ) return g
cec6232d1f3413b6cedd74d909e8d9fa03d9b43f
3,644,863
def extract_first_value_in_quotes(line, quote_mark): """ Extracts first value in quotes (single or double) from a string. Line is left-stripped from whitespaces before extraction. :param line: string :param quote_mark: type of quotation mark: ' or " :return: Dict: 'value': extracted value; 'remainder': the remainder after extraction 'error' empty string if success or 'syntax' otherwise; """ line = line.lstrip() result = {'value': '', 'remainder': line, 'error': 'syntax'} if len(line) < 2: return result if line[0] != quote_mark: return result next_qm_pos = line.find(quote_mark, 1) if next_qm_pos == -1: return result result['value'] = line[1:next_qm_pos] result['remainder'] = line[next_qm_pos + 1:] result['error'] = '' return result
4f614cbbb3a1a04ece0b4da63ea18afb32c1c86b
3,644,864
def dynamic(graph): """Returns shortest tour using dynamic programming approach. The idea is to store lengths of smaller sub-paths and re-use them to compute larger sub-paths. """ adjacency_M = graph.adjacency_matrix() tour = _dynamic(adjacency_M, start_node=0) return tour
06d1adcadc6456aa29a7c0d176329f9d1569bf58
3,644,865
import yaml def read_login_file(): """ Parse the credentials file into username and password. Returns ------- dict """ with open('.robinhood_login', 'r') as login_file: credentials = yaml.safe_load(login_file) return credentials
16ef8a74c9523ac0809e80995069c3bbc0e8f8c0
3,644,866
def flatten(ls): """ Flatten list of list """ return list(chain.from_iterable(ls))
afab4515644ce340a73f5a5cf9f97e59fa8c4d7e
3,644,867
def gaussian_kernel(size, size_y=None): """ Gaussian kernel. """ size = int(size) if not size_y: size_y = size else: size_y = int(size_y) x, y = np.mgrid[-size:size+1, -size_y:size_y+1] g = np.exp(-(x**2/float(size)+y**2/float(size_y))) fwhm = size fwhm_aper = photutils.CircularAperture((frame_center(g)), fwhm/2.) fwhm_aper_phot = photutils.aperture_photometry(g, fwhm_aper) g_norm = g/np.array(fwhm_aper_phot['aperture_sum']) return g_norm/g_norm.max()
6752c4fc9355507d3b411515b8c687dc02b81d2b
3,644,868
from typing import Any def parse_property_value(prop_tag: int, raw_values: list, mem_id: int = 0) -> Any: """ Parse property raw values :param prop_tag: The property tag, see 'PropertyTag' enum :param raw_values: The property values :param mem_id: External memory ID (default: 0) """ if prop_tag not in PROPERTIES.keys(): return None cls = PROPERTIES[prop_tag]['class'] # type: ignore kwargs = PROPERTIES[prop_tag]['kwargs'] # type: ignore kwargs['mem_id'] = mem_id # type: ignore return cls(prop_tag, raw_values, **kwargs)
fc8d54a3f8b8ca762acdc5f6123749236e4eaeb3
3,644,869
from typing import Optional from typing import Iterator from typing import List from typing import Tuple def scan_stanzas_string( s: str, *, separator_regex: Optional[RgxType] = None, skip_leading_newlines: bool = False, ) -> Iterator[List[Tuple[str, str]]]: """ .. versionadded:: 0.4.0 Scan a string for zero or more stanzas of RFC 822-style header fields and return a generator of lists of ``(name, value)`` pairs, where each list represents a stanza of header fields in the input. The stanzas are terminated by blank lines. Consecutive blank lines between stanzas are treated as a single blank line. Blank lines at the end of the input are discarded without creating a new stanza. .. deprecated:: 0.5.0 Use `scan_stanzas()` instead :param s: a string which will be broken into lines on CR, LF, and CR LF boundaries and passed to `scan_stanzas()` :param kwargs: Passed to the `Scanner` constructor :rtype: generator of lists of pairs of strings :raises ScannerError: if the header section is malformed """ return scan_stanzas( # pragma: no cover s, separator_regex=separator_regex, skip_leading_newlines=skip_leading_newlines, )
f68694ce344b738f23b689b74d92f7ab4c20b237
3,644,870
def format_dependency(dependency: str) -> str: """Format the dependency for the table.""" return "[coverage]" if dependency == "coverage" else f"[{dependency}]"
981a38074dbfb1f332cc49bce2c6d408aad3e9e2
3,644,871
def _addSuffixToFilename(suffix, fname): """Add suffix to filename, whilst preserving original extension, eg: 'file.ext1.ext2' + '_suffix' -> 'file_suffix.ext1.ext2' """ head = op.split(fname)[0] fname, ext = _splitExts(fname) return op.join(head, fname + suffix + ext)
2fc0a16f6f8b8be1f27fd7ff32673ed79f84fccb
3,644,872
import re def parse_into_tree(abbr, doc_type = 'html'): """ Преобразует аббревиатуру в дерево элементов @param abbr: Аббревиатура @type abbr: str @param doc_type: Тип документа (xsl, html) @type doc_type: str @return: Tag """ root = Tag('', 1, doc_type) parent = root last = None token = re.compile(r'([\+>])?([a-z][a-z0-9:\!\-]*)(#[\w\-\$]+)?((?:\.[\w\-\$]+)*)(?:\*(\d+))?', re.IGNORECASE) def expando_replace(m): ex = m.group(1) if 'expandos' in zen_settings[doc_type] and ex in zen_settings[doc_type]['expandos']: return zen_settings[doc_type]['expandos'][ex] else: return ex # заменяем разворачиваемые элементы abbr = re.sub(r'([a-z][a-z0-9]*)\+$', expando_replace, abbr) def token_expander(operator, tag_name, id_attr, class_name, multiplier): multiplier = multiplier and int(multiplier) or 1 current = is_snippet(tag_name, doc_type) and Snippet(tag_name, multiplier, doc_type) or Tag(tag_name, multiplier, doc_type) if id_attr: current.add_attribute('id', id_attr[1:]) if class_name: current.add_attribute('class', class_name[1:].replace('.', ' ')) # двигаемся вглубь дерева if operator == '>' and token_expander.last: token_expander.parent = token_expander.last; token_expander.parent.add_child(current) token_expander.last = current; return ''; token_expander.parent = root token_expander.last = None abbr = re.sub(token, lambda m: token_expander(m.group(1), m.group(2), m.group(3), m.group(4), m.group(5)), abbr) # если в abbr пустая строка — значит, вся аббревиатура без проблем # была преобразована в дерево, если нет, то аббревиатура была не валидной return not abbr and root or None;
8bb0ecaa9b2a2e9ce41882b8f140442f28f3c922
3,644,873
def banner(): """Verify banner in HTML file match expected.""" def match(path, expected_url=None, expected_base=None): """Assert equals and return file contents. :param py.path.local path: Path to file to read. :param str expected_url: Expected URL in <a href="" /> link. :param str expected_base: Expected base message. :return: File contents. :rtype: str """ contents = path.read() actual = RE_BANNER.findall(contents) if not expected_url and not expected_base: assert not actual else: assert actual == [(expected_url, expected_base)] return contents return match
54777fe767075561cbb20c3e7ab88ca209fa8c87
3,644,875
import tqdm import operator def rerank(x2ys, x2cnt, x2xs, width, n_trans): """Re-rank word translations by computing CPE scores. See paper for details about the CPE method.""" x2ys_cpe = dict() for x, ys in tqdm(x2ys.items()): cntx = x2cnt[x] y_scores = [] for y, cnty in sorted(ys.items(), key=operator.itemgetter(1), reverse=True)[:width]: ts = cnty / float(cntx) # translation score: initial value if x in x2xs: for x2, cntx2 in x2xs[x].items(): # Collocates p_x_x2 = cntx2 / float(cntx) p_x2_y2 = 0 if x2 in x2ys: p_x2_y2 = x2ys[x2].get(y, 0) / float(x2cnt[x2]) ts -= (p_x_x2 * p_x2_y2) y_scores.append((y, ts)) _ys_ = sorted(y_scores, key=lambda y_score: y_score[1], reverse=True)[:n_trans] _ys_ = [each[0] for each in _ys_] x2ys_cpe[x] = _ys_ return x2ys_cpe
57d9c5012341acf89e92ffd6df29688af5d6965f
3,644,876
def ParallelTempering(num_sweeps=10000, num_replicas=10, max_iter=None, max_time=None, convergence=3): """Parallel tempering workflow generator. Args: num_sweeps (int, optional): Number of sweeps in the fixed temperature sampling. num_replicas (int, optional): Number of replicas (parallel states / workflow branches). max_iter (int/None, optional): Maximum number of iterations of the update/swaps loop. max_time (int/None, optional): Maximum wall clock runtime (in seconds) allowed in the update/swaps loop. convergence (int/None, optional): Number of times best energy of the coldest replica has to repeat before we terminate. Returns: Workflow (:class:`~hybrid.core.Runnable` instance). """ # expand single input state into `num_replicas` replica states preprocess = SpawnParallelTemperingReplicas(num_replicas=num_replicas) # fixed temperature sampling on all replicas in parallel update = hybrid.Map(FixedTemperatureSampler(num_sweeps=num_sweeps)) # replica exchange step: do the top-down sweep over adjacent pairs # (good hot samples sink to bottom) swap = SwapReplicasDownsweep() # loop termination key function def key(states): if states is not None: return states[-1].samples.first.energy # replicas update/swap until Loop termination criteria reached loop = hybrid.Loop( update | swap, max_iter=max_iter, max_time=max_time, convergence=convergence, key=key) # collapse all replicas (although the bottom one should be the best) postprocess = hybrid.MergeSamples(aggregate=True) workflow = preprocess | loop | postprocess return workflow
48b62b2814f67b66823fc1c35024eaab6cde7591
3,644,877
def get_document_info(file): """ Scrape document information using ChemDataExtractor Scrapers :param file: file path to target article :type file: str :return: list of dicts containing the document information """ if file.endswith('.html'): file_type = 'html' elif file.endswith('.xml'): file_type = 'xml' else: return print("file type", file_type) f = open(file, 'rb').read() sel = Selector.from_text(f) # Determine publishers, use the RSC scraper by default publisher = detect_publisher(f) if publisher == 'acs': document_info = AcsHtmlDocument(sel) elif publisher == 'rsc': document_info = RscHtmlDocument(sel) elif publisher == 'elsevier' and file_type == 'html': document_info = ElsevierHtmlDocument(sel) elif publisher == 'elsevier' and file_type == 'xml': document_info = ElsevierXmlDocument(sel) elif publisher == 'springer' and file_type == 'html': document_info = SpringerHtmlDocument(sel) else: print('Unknown Journal for file' + file + 'using RSC HTML formatting by default') document_info = RscHtmlDocument(sel) return document_info
5d5697ce9a7916920c938a3cff17fdeda8b5f81b
3,644,878
def qlog(q): """ Compute logarithm of a unit quaternion (unit norm is important here). Let q = [a, qv], where a is the scalar part and qv is the vector part. qv = sin(phi/2)*nv, where nv is a unit vector. Then ln(q) = ln(||q||) + qv / ||qv|| * arccos(a / ||q||) Therefore for a unit quaternion, the scalar part of ln(q) is zero and the vector part of ln(q) is 1/2 * phi * nv, i.e. half of rotation vector rv = phi * nv because a = cos(phi/2) in attitude quaternion (see quatRotVec()) Reference: https://en.wikipedia.org/wiki/Quaternion NOTE 1: due to existing implementation in C++, this function returns just the vector part of ln(q) NOTE 2: According to Wiki description, ln(q)_v should be a half of rotation vector. However the previous implementation computed the full rotation vector. So, using the rotation vector for now until cleared up. """ rv = quatRotVec(q) return rv
80e01568cc5fe2ab2c7d11bdd642906374992985
3,644,879
from datetime import datetime def trx(): """Response from ADN about current transaction APPROVED/DECLINED and showing Receipt of transaction""" trx = web.trxs[-1] trx.shoppingCartUuid = request.args.get('shoppingCartUuid', default = "", type = str) trx.mediaType = request.args.get('mediaType', default = "", type = str) trx.correlationId = request.args.get('correlationId', default = "", type = str) trx.trxId = request.args.get('payId', default = "", type = str) trx.maskedMediaId = request.args.get('maskedMediaId', default = "", type = str) trx.status = request.args.get('status', default = "", type = str) trx.author_time = datetime.now().strftime("%d.%m.%Y %H:%M:%S") web.logger.info(f"ShoppingCart {trx.shoppingCartUuid} Transaction {trx.trxId} {trx.mediaType} {trx.maskedMediaId} {trx.status}") return render_template('trx.html', trx=trx)
4ffa01c2d6682a6320870ac158f564c37aa5a32e
3,644,880
def get_counts_by_domain(df): """ Parameters: df (pandas.Dataframe) - form of `get_counts_df` output Returns: pandas.Dataframe """ columns = ['study', 'study_label', 'domain_code', 'domain_label'] df2 = df.groupby(columns, as_index=False)[["count", "subjects"]].max() return df2
544aaa734858209c36c84d87bb6beb05761a5194
3,644,881
def batch_cosine_similarity(x1, x2): """ https://en.wikipedia.org/wiki/Cosine_similarity """ mul = np.multiply(x1, x2) s = np.sum(mul, axis=1) return s
6ed5e4ca426cc61d25dd272f92ba9220186bfd8e
3,644,882
def plot(ax, x, y): """Plot """ return ax._plot(x, y)
90cc2616d21e3c1239524437f653f85602c1984b
3,644,883
def concatenatePDFs(filelist, pdfname, pdftk='pdftk', gs='gs', cleanup=False, quiet=False): """ Takes a list or a string list of PDF filenames (space-delimited), and an output name, and concatenates them. It first tries pdftk (better quality), and if that fails, it tries ghostscript (more commonly installed). Todd Hunter """ if (type(filelist) == list): filelist = ' '.join(filelist) cmd = '%s %s cat output %s' % (pdftk, filelist, pdfname) if not quiet: print "Running command = %s" % (cmd) mystatus = os.system(cmd) if (mystatus != 0): print "status = ", mystatus cmd = '%s -q -sPAPERSIZE=letter -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -sOutputFile=%s %s' % (gs,pdfname,filelist) print "Running command = %s" % (cmd) mystatus = os.system(cmd) if (mystatus != 0): gs = '/opt/local/bin/gs' cmd = '%s -q -sPAPERSIZE=letter -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -sOutputFile=%s %s' % (gs,pdfname,filelist) print "Running command = %s" % (cmd) mystatus = os.system(cmd) if (mystatus != 0): print "Both pdftk and gs are missing, no PDF created." cleanup = False if (cleanup): os.system('rm %s' % filelist) return (mystatus)
3e138e84db9650af3afbbab4d904dc3a4cb581c9
3,644,884
def get_module_offset( process_id: int, process_name: str ) -> Address: """Returns an Adress with the base offset of the process. Args: process_id (int): PID process_name (str): Name of the process. Case does not matter. Returns: Address: Adress with the base offset of the process. """ flag = TH32CS_SNAPMODULE | TH32CS_SNAPMODULE32 snap = CreateToolhelp32Snapshot(flag, process_id) me32 = MODULEENTRY32() me32.dwSize = sizeof(MODULEENTRY32) Module32First(snap, byref(me32)) while True: name = me32.szModule.decode("ascii") if process_name.lower() in name.lower(): base_addr = me32.modBaseAddr addr = Address(addressof(base_addr.contents)) CloseHandle(snap) return addr if not Module32Next(snap, byref(me32)): break CloseHandle(snap)
09e0775213e4a32f1ea786ad9d1184e7f4dbd7cf
3,644,885
from typing import Sequence def sequence_to_header(sequence: Sequence[Bytes]) -> Header: """ Build a Header object from a sequence of bytes. The sequence should be containing exactly 15 byte sequences. Parameters ---------- sequence : The sequence of bytes which is supposed to form the Header object. Returns ------- header : `Header` The obtained `Header` object. """ ensure(len(sequence) == 15) ensure(len(sequence[12]) <= 32) return Header( parent_hash=Hash32(sequence[0]), ommers_hash=Hash32(sequence[1]), coinbase=Address(sequence[2]), state_root=Root(sequence[3]), transactions_root=Root(sequence[4]), receipt_root=Root(sequence[5]), bloom=Bloom(sequence[6]), difficulty=Uint.from_be_bytes(sequence[7]), number=Uint.from_be_bytes(sequence[8]), gas_limit=Uint.from_be_bytes(sequence[9]), gas_used=Uint.from_be_bytes(sequence[10]), timestamp=U256.from_be_bytes(sequence[11]), extra_data=sequence[12], mix_digest=Hash32(sequence[13]), nonce=Bytes8(sequence[14]), )
b1c4040b216162777e33bbbab0f7774b8b02af91
3,644,886
def makeASdef(isd_id, as_id_tail, label, public_ip, is_core=False, is_ap=False): """ Helper for readable ASdef declaration """ return ASdef(isd_id, _expand_as_id(as_id_tail), label, public_ip, is_core, is_ap)
19bc51a648ac558f524f29744e1574a245e50cf2
3,644,887
def EnableTrt(mod, params=None, trt_version=None): """Converts the "main" function in the module into one that can be executed using TensorRT. If any of the operators are not supported by the TensorRT conversion, the unmodified program will be returned instead. Parameters ---------- mod: Module The original module. params : dict of str to NDArray Input parameters to the graph that do not change during inference time. Used for constant folding. trt_version : Optional[Tuple[int]] Which version of TensorRT to target for partitioning as a tuple of (major, minor, patch). If not specified, will attempt to get using GetTrtVersion. Returns ------- mod: Module The modified module which will use the TensorRT runtime if compatible. """ if not trt_version: trt_version = GetTrtVersion() # If TVM wasn't built against TRT, default to target TRT 6. Since the # actual conversion to TRT is done at runtime, building against TRT is # not required for compilation. if not trt_version: trt_version = (6, 0, 1) assert isinstance(trt_version, (list, tuple)) assert len(trt_version) == 3 # Apply passes required for TRT mod = relay.transform.RemoveUnusedFunctions()(mod) mod = relay.transform.InferType()(mod) mod = relay.transform.ConvertLayout('NCHW')(mod) mod = PreprocessForTrt(mod) if params: # Bind params so that we can use FoldConstant. mod['main'] = _bind_params(mod['main'], params) mod = relay.transform.FoldConstant()(mod) return _transform.EnableTrt(*trt_version)(mod)
c3cac75de48e2c2a9af30ce427bc57d86a56dbc4
3,644,889
import cupy def _setup_cuda_fft_resample(n_jobs, W, new_len): """Set up CUDA FFT resampling. Parameters ---------- n_jobs : int | str If n_jobs == 'cuda', the function will attempt to set up for CUDA FFT resampling. W : array The filtering function to be used during resampling. If n_jobs='cuda', this function will be shortened (since CUDA assumes FFTs of real signals are half the length of the signal) and turned into a gpuarray. new_len : int The size of the array following resampling. Returns ------- n_jobs : int Sets n_jobs = 1 if n_jobs == 'cuda' was passed in, otherwise original n_jobs is passed. cuda_dict : dict Dictionary with the following CUDA-related variables: use_cuda : bool Whether CUDA should be used. fft_plan : instance of FFTPlan FFT plan to use in calculating the FFT. ifft_plan : instance of FFTPlan FFT plan to use in calculating the IFFT. x_fft : instance of gpuarray Empty allocated GPU space for storing the result of the frequency-domain multiplication. x : instance of gpuarray Empty allocated GPU space for the data to resample. Notes ----- This function is designed to be used with fft_resample(). """ cuda_dict = dict(use_cuda=False, rfft=rfft, irfft=irfft) rfft_len_x = len(W) // 2 + 1 # fold the window onto inself (should be symmetric) and truncate W = W.copy() W[1:rfft_len_x] = (W[1:rfft_len_x] + W[::-1][:rfft_len_x - 1]) / 2. W = W[:rfft_len_x] if n_jobs == 'cuda': n_jobs = 1 init_cuda() if _cuda_capable: try: # do the IFFT normalization now so we don't have to later W = cupy.array(W) logger.info('Using CUDA for FFT resampling') except Exception: logger.info('CUDA not used, could not instantiate memory ' '(arrays may be too large), falling back to ' 'n_jobs=1') else: cuda_dict.update(use_cuda=True, rfft=_cuda_upload_rfft, irfft=_cuda_irfft_get) else: logger.info('CUDA not used, CUDA could not be initialized, ' 'falling back to n_jobs=1') cuda_dict['W'] = W return n_jobs, cuda_dict
34a949250239b5334650b89d6566b81460079591
3,644,890
def sentensize(text): """Break a text into sentences. Args: text (str): A text containing sentence(s). Returns: list of str: A list of sentences. """ return nltk.tokenize.sent_tokenize(text)
ae16aff476842c8e0fc2fa2506b68ad60dc603f0
3,644,891
def tokenize(texts, context_length=77): """ Returns the tokenized representation of given input string(s) Parameters ---------- texts : Union[str, List[str]] An input string or a list of input strings to tokenize context_length : int The context length to use; all CLIP models use 77 as the context length Returns ------- A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length] """ if isinstance(texts, str): texts = [texts] sot_token = tokenizer.encoder["<|startoftext|>"] eot_token = tokenizer.encoder["<|endoftext|>"] all_tokens = [[sot_token] + tokenizer.encode(text) + [eot_token] for text in texts] result = paddle.zeros((len(all_tokens), context_length), dtype='int64') for i, tokens in enumerate(all_tokens): if len(tokens) > context_length: raise RuntimeError( f"Input {texts[i]} is too long for context length {context_length}") result[i, :len(tokens)] = paddle.to_tensor(tokens) return result
1fe73425cb30f0f6fbce6caa740f118ee9591347
3,644,892
def _int64_feature_list(values): """Wrapper for inserting an int64 FeatureList into a SequenceExample proto, e.g, sentence in list of ints """ return tf.train.FeatureList(feature=[_int64_feature(v) for v in values])
edf4605c1dd9ad45d3a2508122b85213657f56cb
3,644,893
def read_relative_pose(object_frame_data: dict) -> tf.Transform: """ Read the pose of an object relative to the camera, from the frame data. For reasons (known only to the developer), these poses are in OpenCV convention. So x is right, y is down, z is forward. Scale is still 1cm, so we divide by 100 again. see https://github.com/jskinn/Dataset_Synthesizer/blob/local-devel/Source/Plugins/NVSceneCapturer/Source/NVSceneCapturer/Private/NVSceneFeatureExtractor_DataExport.cpp#L143 :param object_frame_data: The frame data dict from the matching object in the objects array :return: The relative pose of the object, as a Transform """ tx, ty, tz = object_frame_data['location'] qx, qy, qz, qw = object_frame_data['quaternion_xyzw'] return tf.Transform( location=(tz / 100, -tx / 100, -ty / 100), rotation=(qw, qz, -qx, -qy), w_first=True )
dae13aa0a10db2133f87c399ec90113ef157a210
3,644,894
import select def upsert_task(task_uuid: str, task: Task) -> Task: """Upsert a task. It is used to create a task in the database if it does not already exists, else it is used to update the existing one. Args: task_uuid: The uuid of the task to upsert. task: The task data. Returns: The upserted task. """ with Session(engine) as session: # check if the task exists statement = select(Task).where(Task.uuid == task_uuid) result = session.exec(statement).first() # if not, create it if result is None: result = task # sync the data for key, value in task.dict(exclude_unset=True).items(): setattr(result, key, value) # persist the data to the database session.add(result) session.commit() session.refresh(result) return result
7fbf296377fb1e4e59b7c9884c6191ff2b0a273b
3,644,895
def shuffle_entries(x, entry_cls, config=None, value_type=sgf2n, reverse=False, perm_size=None): """ Shuffle a list of ORAM entries. Randomly permutes the first "perm_size" entries, leaving the rest (empty entry padding) in the same position. """ n = len(x) l = len(x[0]) if n & (n-1) != 0: raise CompilerError('Entries must be padded to power of two length.') if perm_size is None: perm_size = n xarrays = [Array(n, value_type.reg_type) for i in range(l)] for i in range(n): for j,value in enumerate(x[i]): if isinstance(value, MemValue): xarrays[j][i] = value.read() else: xarrays[j][i] = value if config is None: config = config_shuffle(perm_size, value_type) for xi in xarrays: shuffle(xi, config, value_type, reverse) for i in range(n): x[i] = entry_cls(xarrays[j][i] for j in range(l)) return config
827506de7e572b1df1b210ccfb990db5839b5273
3,644,896
import json def entities(request): """Get entities for the specified project, locale and paths.""" try: project = request.GET['project'] locale = request.GET['locale'] paths = json.loads(request.GET['paths']) except MultiValueDictKeyError as e: log.error(str(e)) return HttpResponse("error") try: project = Project.objects.get(slug=project) except Entity.DoesNotExist as e: log.error(str(e)) return HttpResponse("error") try: locale = Locale.objects.get(code__iexact=locale) except Locale.DoesNotExist as e: log.error(str(e)) return HttpResponse("error") search = None if request.GET.get('keyword', None): search = request.GET entities = Entity.for_project_locale(project, locale, paths, search) return JsonResponse(entities, safe=False)
686f9298302d30e89ad0d34ed4c0c96d22fd455d
3,644,898
import json def info(request, token): """ Return the HireFire json data needed to scale worker dynos """ if not settings.HIREFIRE_TOKEN: return HttpResponseBadRequest( "Hirefire not configured. Set the HIREFIRE_TOKEN environment variable on the app to use Hirefire for dyno scaling" ) if token != settings.HIREFIRE_TOKEN: raise PermissionDenied("Invalid token") current_tasks = 0 queues = [] for index, config in enumerate(QUEUES_LIST): queue = get_queue_by_index(index) connection = queue.connection # Only look at the default queue if queue.name != "default": continue queue_data = { "name": queue.name, "jobs": queue.count, "index": index, "connection_kwargs": connection.connection_pool.connection_kwargs, } connection = get_connection(queue.name) all_workers = Worker.all(connection=connection) queue_workers = [worker for worker in all_workers if queue in worker.queues] queue_data["workers"] = len(queue_workers) finished_job_registry = FinishedJobRegistry(queue.name, connection) started_job_registry = StartedJobRegistry(queue.name, connection) deferred_job_registry = DeferredJobRegistry(queue.name, connection) queue_data["finished_jobs"] = len(finished_job_registry) queue_data["started_jobs"] = len(started_job_registry) queue_data["deferred_jobs"] = len(deferred_job_registry) current_tasks += queue_data["jobs"] current_tasks += queue_data["started_jobs"] queues.append(queue_data) payload = [{"quantity": current_tasks, "name": "worker"}] payload = json.dumps(payload) return HttpResponse(payload, content_type="application/json")
7164d7f19b14ef601480484d6182f4b62cc250bf
3,644,899
def get_domain_from_url(url): """get domain from url""" domain='' # url is http://a.b.com/ads/asds if re.search(r'://.*?/',url): try: domain = url.split('//', 1)[1].split('/', 1)[0] except IndexError, e: LOGGER.warn('Get domain error,%s,%s' % (url, e)) # http://a.b.com?a=adsd elif re.search(r'://.*?\?',url): try: domain = url.split('//', 1)[1].split('?', 1)[0] except IndexError, e: LOGGER.warn('Get domain error,%s,%s' % (url, e)) elif re.search(r'://.*?',url): try: domain = url.split('//', 1)[1].split('/', 1)[0] except IndexError, e: LOGGER.warn('Get domain error,%s,%s' % (url, e)) # url is a.b.com/a/b/c, a.b.com, /a/b/c, elif re.search(r'/',url): value = url.split('/', 1)[0] if value=='': pass elif value=='.': pass elif '.' not in value: pass elif domain=='..': pass return domain
6b364a74c86337108d21539c4a5678af2e6ea48a
3,644,900
import json def render_response(body=None, status=None, headers=None): """生成WSGI返回消息""" headers = [] if headers is None else list(headers) if body is None: body = '' status = status or (204, 'No Content') else: body = json.dumps(body, encoding='utf-8') headers.append(('Content-Type', 'application/json')) status = status or (200, 'OK') resp = webob.Response(body=body, status='%s %s' % status, headerlist=headers) return resp
b31128db57ca99a840d4adce6f3116f629d8a0b8
3,644,901
def nashpobench_benchmark(params): """ The underlying tabulated blackbox does not have an `elapsed_time_attr`, but only a `time_this_resource_attr`. """ config_space = dict( CONFIGURATION_SPACE, epochs=params['max_resource_level'], dataset_name=params['dataset_name']) return { 'script': None, 'metric': METRIC_VALID_LOSS, 'mode': 'min', 'resource_attr': RESOURCE_ATTR, 'elapsed_time_attr': METRIC_ELAPSED_TIME, 'max_resource_attr': 'epochs', 'config_space': config_space, 'cost_model': None, 'supports_simulated': True, 'blackbox_name': BLACKBOX_NAME, }
74e1e619cc8c4a3201e41820f5f641c651a5f283
3,644,903
def horizontal_plate_natual_convection_2(Gr, Pr): """hot side downward, or cold side upward """ """ 1e5 < Ra < 1e10 """ Ra = Gr * Pr return 0.27 * Ra**0.25
bc44118e871e977a7ecb6a877f7232b837d1bf0e
3,644,904
import typing def translate_value_data( new_values: list, options: dict, parent_value: str, translate_dict: typing.Optional[dict], values: list, ): """Translates value data if necessary and checks if it falls within the Castor optiongroup""" for value in values: if pd.isnull(parent_value): if translate_dict: value = translate_dict.get(str(value), "Error: no translation provided") new_values.append(options.get(str(value), "Error: non-existent option")) else: if translate_dict: value = translate_dict.get(str(value), parent_value) new_values.append(options.get(str(value), parent_value)) return new_values
ccfc64e54fae868877c6852ebeeadae11bb1221b
3,644,906
def makeVocabFromText( filelist=None, max_size=10*10000, least_freq=2, trunc_len=100, filter_len=0, print_log=None, vocab_file=None, encoding_format='utf-8', lowercase = True): """ the core of this function is getting a word2count dict and writing it to a .txt file,then use Vocab to read it """ if print_log: print_log("%s: the max vocab size = %d, least_freq is %d truncate length = %d" \ % ( filelist[0], max_size, least_freq , trunc_len )) else: print("%s: the max vocab size = %d, least_freq is %d truncate length = %d" \ % ( filelist[0], max_size, least_freq , trunc_len )) """tokenizing sentence and add word to word2count dict""" word2count={} for filename in filelist: with open(filename, 'r', encoding = encoding_format) as f: for sent in f: tokens = sent.strip().split() if 0 < filter_len < len(sent.strip().split()): continue if trunc_len > 0: tokens = tokens[:trunc_len] for word in tokens: word = word if not lowercase else word.lower() if word not in word2count: word2count[word] = 1 else: word2count[word] += 1 return makeVocabFormDict(word2count=word2count,max_size=max_size,least_freq=least_freq,\ vocab_file=vocab_file,encoding_format=encoding_format,filename=filelist[0])
2a3c0c42ee5c541d19bbe695c12f977fd29dfeaf
3,644,907
def import_supplemental(file_path): """Get data from a supplemental file""" data = sio.loadmat(file_path) data['move'] = np.squeeze(data['move']) data['rep'] = np.squeeze(data['rep']) data['emg_time'] = np.squeeze(data['emg_time']) return data
4544a0ee292cb4e323c31545009c4d1e17ca98e1
3,644,908
def _unpickle_injected_object(base_class, mixin_class, class_name=None): """ Callable for the pickler to unpickle objects of a dynamically created class based on the InjectableMixin. It creates the base object from the original base class and re-injects the mixin class when unpickling an object. :param type base_class: The base class of the pickled object before adding the mixin via injection. :param type mixin_class: The :class:`InjectableMixin` subclass that was injected into the pickled object. :param str class_name: The class name of the pickled object's dynamically created class. :return: The initial unpickled object (before the pickler restores the object's state). """ obj = base_class.__new__(base_class, ()) return mixin_class.inject_into_object(obj, class_name)
1821509506ad31dcdb21f07a2b83c544ff3c3eb3
3,644,909
from pathlib import Path import re def parse_endfblib(libdir): """Parse ENDF/B library Parametres: ----------- libdir : str directory with ENDFB file structure""" filepaths = [] nuclidnames = [] endf_dir = Path(libdir) neutron_files = tuple((endf_dir / "neutrons").glob("*endf")) for n in neutron_files: filepaths.append(n.absolute()) nuclidnames.append(n.name.split('_')[1] + re.split("^0*", n.name.split('_')[2][:-5])[-1]) return nuclidnames, filepaths
3587b849132e4b2eeb6ad184bf58755340473bd9
3,644,910
def build_val_col_list(tableName): """Build and return a schema to use for the sample data.""" statement = "( SELECT column_name, data_type, case when data_type='NUMBER' THEN NVL(DATA_PRECISION,38) + DATA_SCALE ELSE DATA_LENGTH END AS ORACLE_LENGTH FROM dba_tab_columns WHERE table_name = '" + tableName + "' order by column_id asc )" buildColTypeList = spark.read.format("jdbc") \ .option("url","jdbc:oracle:thin:system/oracle@//0.0.0.0:1521/xe") \ .option("dbtable", statement) \ .option("user","system") \ .option("password","oracle") \ .option("driver","oracle.jdbc.driver.OracleDriver") \ .load() xList = buildColTypeList.collect() return xList
d6602078a458fa3f36de3558c8044749caf7f4d5
3,644,912
from datetime import datetime def save_image(user, filename, image_tif, process, latency, size, hist): """ Function that saves image to Mongo database Args: user: username filename: desired file name in database image_tif: tiff image in byte format process: processing algorithm applied to image latency: time to process image size: image size hist: histogram values of image bins: bin locations of image Returns: outstr: Confirmation that image has been saved """ time = datetime.datetime.now() Image_Dict = { "File": filename, "Image": image_tif, "Process": process, "Timestamp": time, "Latency": latency, "Size": size, "Histogram": hist, } Image_List = user.ImageFile Image_List.append(Image_Dict) user.filenames.append(filename) user.save() outstr = "Image saved successfully" return outstr
ea416fcdc09c71aef56250a8e0b7f558e8e8a884
3,644,913
def run_simulation_with_params( sim_params, replicate, repeats=10, should_perform_gwas=True): """Runs simulation with given params and returns result object. """ try: simulation_result = run_simulation( simulation_params=sim_params) except Exception as e: print sim_params raise e result = { 'num_snps_considered': sim_params.num_snps_considered, 'num_samples': sim_params.num_samples, 'num_snps_with_effect': sim_params.num_snps_with_effect, 'replicate': replicate, 'total_fitness_effect': np.prod(simulation_result['snp_effect']), 'mage_cycles': sim_params.mage_cycles, 'population_size': sim_params.population_size } # Apply linear modeling. lm_result = run_linear_modeling( simulation_result['wgs_samples'], simulation_result['wgs_samples_doubling_times'], repeats=repeats) lm_eval_results = evaluate_modeling_result( simulation_result, lm_result) lm_eval_results_df = lm_eval_results['results_df'] result.update({ 'lm_pearson_r': lm_eval_results['pearson_r'], 'lm_pearson_p': lm_eval_results['p_value'], }) result.update( calculate_modeling_metrics( lm_eval_results_df, 'linear_model_coef', results_prefix='lm_')) # Maybe perform GWAS. if should_perform_gwas: gwas_results_df = run_gwas( simulation_result['wgs_samples'], simulation_result['wgs_samples_doubling_times']) gwas_eval_results = evaluate_gwas_result( gwas_results_df, lm_eval_results_df) gwas_eval_results_df = gwas_eval_results['results_df'] result.update({ 'gwas_pearson_r': gwas_eval_results['pearson_r'], 'gwas_pearson_p': gwas_eval_results['p_value'], }) result.update( calculate_modeling_metrics( gwas_eval_results_df, 'gwas_coef', results_prefix='gwas_')) # Perform enrichment analysis on final timepoint. enrichment_result_df = run_enrichment_analysis(simulation_result) result.update( calculate_enrichment_metrics( enrichment_result_df)) return result
a7a1383708c1b6e69c975488b03704698f9b1066
3,644,914
import colorsys def hsl_to_rgb(hsl): """Convert hsl colorspace values to RGB.""" # Convert hsl to 0-1 ranges. h = hsl[0] / 359. s = hsl[1] / 100. l = hsl[2] / 100. hsl = (h, s, l) # returns numbers between 0 and 1 tmp = colorsys.hls_to_rgb(h, s, l) # convert to 0 to 255 r = int(round(tmp[0] * 255)) g = int(round(tmp[1] * 255)) b = int(round(tmp[2] * 255)) return (r, g, b)
4417ce8468e71b7139b57fe270809c7030b2c3df
3,644,915
import itertools async def test_filterfalse_matches_itertools_filterfalse( arange: ty.Type[ty.AsyncIterator[int]], stop: int ): """Ensure that our async filterfalse implementation follows the standard implementation. """ async def _pair(x): return (x % 2) == 0 target = list(itertools.filterfalse(lambda x: (x % 2) == 0, range(stop))) result = [x async for x in none.collection.a.filterfalse(_pair, arange(stop))] assert result == target
59fd932f3906eb411e21207d920f752f7f78df44
3,644,917
def extract_buffer_info(mod, param_dict): """ This function is to read the tvm.IRModule that contains Relay to TIR compiled IRModule. Thereafter, this will extract the buffer information as the shape and constant data (if any). Parameters ---------- mod : tvm.IRModule The NPU TIR IRModule. param_dict : dict A dictionary containing param idx --> const numpy.NDArray Returns ------- dict a dictionary of buffer names --> BufferInfo """ buffer_info = dict() # There should only be a single function assert len(mod.functions.items()) == 1 primfunc = mod.functions.items()[0][1] for idx, const_data in param_dict.items(): param = primfunc.params[idx] buffer_info[primfunc.buffer_map[param].data] = BufferInfo( const_data, const_data.shape, const_data.dtype, BufferType.constant ) for param in primfunc.params: if primfunc.buffer_map[param].data not in buffer_info.keys(): buffer_info[primfunc.buffer_map[param].data] = BufferInfo( None, primfunc.buffer_map[param].shape, primfunc.buffer_map[param].dtype, BufferType.input_or_output, ) def populate_allocate_buffer_info(stmt): if isinstance(stmt, tvm.tir.stmt.Allocate): allocate = stmt buffer_info[allocate.buffer_var] = BufferInfo( None, allocate.extents, allocate.dtype, BufferType.scratch, ) tvm.tir.stmt_functor.post_order_visit(primfunc.body, populate_allocate_buffer_info) return buffer_info
291f091d06aa768ceb28f2738823f5eeb336c47e
3,644,918
def find_external_nodes(digraph): """Return a set of external nodes in a directed graph. External nodes are node that are referenced as a dependency not defined as a key in the graph dictionary. """ external_nodes = set() for ni in digraph: for nj in digraph[ni]: if nj not in digraph: external_nodes.add(nj) return external_nodes
de63af1b649e450214907dd704bde782820d393d
3,644,919
import six def strip(val): """ Strip val, which may be str or iterable of str. For str input, returns stripped string, and for iterable input, returns list of str values without empty str (after strip) values. """ if isinstance(val, six.string_types): return val.strip() try: return list(filter(None, map(strip, val))) except TypeError: return val
893986e69f6d64167f45daf30dacb72f4b7f2bff
3,644,920
def construct_area_cube(var_name, area_data, global_atts, dim_coords): """Construct the new area cube """ dim_coords_list = [] for i, coord in enumerate(dim_coords): dim_coords_list.append((coord, i)) if var_name == 'areacello': long_name = 'Grid-Cell Area for Ocean Variables' else: long_name = 'Grid-Cell Area for Atmospheric Grid Variables' area_cube = iris.cube.Cube(area_data, standard_name='cell_area', long_name=long_name, var_name=var_name, units='m2', attributes=global_atts, dim_coords_and_dims=dim_coords_list) return area_cube
07c01610f800202ccbdebf834648840b77d47fb7
3,644,922
def _switch_obs_2_time_dim(ds): """Function to create a single time variable that is the midpoint of the ObsPack averaging interval, and make it the xarray coordinate. """ # Get the midpoint of the average pulled from the model: midpoint = pd.to_datetime(ds.averaging_interval_start.data) + \ np.asarray(ds.averaging_interval.data) / 2 # Make it the time midpoint a new variable in the dataset. t = midpoint.to_series().reset_index(drop=True) ds['time'] = ("obs", t) # Tell xarray that we want time to be a coordinate. ds = ds.set_coords('time') # And tell it to replace Obs # with time as the preferred dimension. ds = ds.swap_dims({"obs": "time"}) return ds
6fa53b3f1a0472f45fa59c11b5d869786b5a9f4f
3,644,923
def bitfield_v(val, fields, col=15): """ return a string of bit field components formatted vertically val: the value to be split into bit fields fields: a tuple of (name, output_function, (bit_hi, bit_lo)) tuples """ fmt = '%%-%ds: %%s' % col s = [] for (name, func, field) in fields: s.append(fmt % (name, func(bits(val, field)))) return '\n'.join(s)
139b9328190f61a1cd649826bfde806e565d4201
3,644,924
from typing import Tuple from typing import Iterable def split_housenumber_line(line: str) -> Tuple[str, bool, bool, str, Tuple[int, str], str, Tuple[int, str], Iterable[str], Tuple[int, str]]: """ Augment TSV Overpass house numbers result lines to aid sorting. It prepends two bools to indicate whether an entry is missing either a house number, a house name or a conscription number. Entries lacking either a house number or all of the above IDs come first. The following fields are interpreted numerically: oid, house number, conscription number. """ field = line.split('\t') oid = get_array_nth(field, 0) street = get_array_nth(field, 1) housenumber = get_array_nth(field, 2) postcode = get_array_nth(field, 3) housename = get_array_nth(field, 4) cons = get_array_nth(field, 5) tail = field[6:] if len(field) > 6 else [] have_housenumber = housenumber != '' have_houseid = have_housenumber or housename != '' or cons != '' return (postcode, have_houseid, have_housenumber, street, split_house_number(housenumber), housename, split_house_number(cons), tail, split_house_number(oid))
c3d93d459c9b004d199725b11e1b92340e6154b9
3,644,925
import math def tau_polinomyal_coefficients(z): """ Coefficients (z-dependent) for the log(tau) formula from Raiteri C.M., Villata M. & Navarro J.F., 1996, A&A 315, 105-115 """ log_z = math.log10(z) log_z_2 = log_z ** 2 a0 = 10.13 + 0.07547 * log_z - 0.008084 * log_z_2 a1 = -4.424 - 0.7939 * log_z - 0.1187 * log_z_2 a2 = 1.262 + 0.3385 * log_z + 0.05417 * log_z_2 return [a0, a1, a2]
ebef7d773eeb400ef87553fc5838ee2cb97d0669
3,644,926
from typing import Optional import this def register( # lgtm[py/similar-function] fn: callbacks.ResourceHandlerFn, *, id: Optional[str] = None, errors: Optional[errors_.ErrorsMode] = None, timeout: Optional[float] = None, retries: Optional[int] = None, backoff: Optional[float] = None, cooldown: Optional[float] = None, # deprecated, use `backoff` registry: Optional[registries.ResourceChangingRegistry] = None, labels: Optional[bodies.Labels] = None, annotations: Optional[bodies.Annotations] = None, when: Optional[callbacks.WhenHandlerFn] = None, ) -> callbacks.ResourceHandlerFn: """ Register a function as a sub-handler of the currently executed handler. Example:: @kopf.on.create('zalando.org', 'v1', 'kopfexamples') def create_it(spec, **kwargs): for task in spec.get('tasks', []): def create_single_task(task=task, **_): pass kopf.register(id=task, fn=create_single_task) This is efficiently an equivalent for:: @kopf.on.create('zalando.org', 'v1', 'kopfexamples') def create_it(spec, **kwargs): for task in spec.get('tasks', []): @kopf.on.this(id=task) def create_single_task(task=task, **_): pass """ decorator = this( id=id, registry=registry, errors=errors, timeout=timeout, retries=retries, backoff=backoff, cooldown=cooldown, labels=labels, annotations=annotations, when=when, ) return decorator(fn)
d2e539c97a4946f819616d0f596e68e190a68c78
3,644,927
def pd_read_csv_using_metadata(filepath_or_buffer, table_metadata, ignore_partitions=False, *args, **kwargs): """ Use pandas to read a csv imposing the datatypes specified in the table_metadata Passes through kwargs to pandas.read_csv If ignore_partitions=True, assume that partitions are not columns in the dataset """ if ignore_partitions: table_metadata = _remove_paritions_from_table_metadata(table_metadata) dtype = _pd_dtype_dict_from_metadata(table_metadata, ignore_partitions) parse_dates = _pd_date_parse_list_from_metadatadata(table_metadata) return pd.read_csv(filepath_or_buffer, dtype = dtype, parse_dates = parse_dates, *args, **kwargs)
bddc8da985c7e252effe566c640bca25acd01d6a
3,644,928
def read_parfile_dirs_props(filename): """Reads BRUKER parfile-dirs.prop file to in order to get correct mapping of the topspin parameters. Args: filename: input Bruker parfile-dirs.prop file Returns: A dict mapping parameter classes to the their respective directory. E.g. {'PY_DIRS': ['py/user', 'py']} """ fh = open(filename) dirs = fh.read() fh.close() par_dc = {} dirs = dirs.replace('\\\n', '').replace(';', ' ') for line in dirs.split('\n'): if len(line) > 0 and line[0] != '#': key, values = line.split('=') par_dc[key] = values.split() if verbose_level > 1: print 'Dictionary for BRUKER search paths:' for key in par_dc.keys(): print key, par_dc[key] return par_dc
ca54dc948923826bb81af94e41be42caadfe6004
3,644,929
def get_all_playlist_items(playlist_id, yt_client): """ Get a list of video ids of videos currently in playlist """ return yt_client.get_playlist_items(playlist_id)
c7a8cc806b552b1853eba1d8223aa00225d5539e
3,644,930
def _get_last_measurement(object_id: int): """ Get the last measurement of object with given ID. Args: object_id (int): Object ID whose last measurement to look for. Returns: (GamMeasurement): The last measurement of the object, or None if it doesn't exist. """ last_mea = (GamMeasurement.select() .where(GamMeasurement.mea_object == object_id) .order_by(GamMeasurement.mea_id.desc()) .get()) return last_mea if last_mea else None
a5ee460f57912bb885ae0cb534f6195c92983aad
3,644,931
def get_library_isotopes(acelib_path): """ Returns the isotopes in the cross section library Parameters ---------- acelib_path : str Path to the cross section library (i.e. '/home/luke/xsdata/endfb7/sss_endfb7u.xsdata') Returns ------- iso_array: array array of isotopes in cross section library: """ lib_isos_list = [] with open(acelib_path, 'r') as f: lines = f.readlines() for line in lines: iso = line.split()[0] lib_isos_list.append(iso) return lib_isos_list
d93d319b84c02b8156c5bad0998f5943a5bbe8ae
3,644,932
from typing import Mapping def read_wires(data: str) -> Mapping[int, Wire]: """Read the wiring information from data.""" wires = {} for line in data.splitlines(): wire_name, wire = get_wire(line) wires[wire_name] = wire return wires
87c8b82bceab0252204ababf842ca0b00ab6a059
3,644,933
def back_ease_out(p): """Modeled after overshooting cubic y = 1-((1-x)^3-(1-x)*sin((1-x)*pi))""" f = 1 - p return 1 - (f * f * f - f * sin(f * pi))
9946b8929211df4624ecc201ce026b981ffb3d0c
3,644,934
def configure_estimator_params(init_args, train_args): """Validates the initialization and training arguments and constructs a `params` dictionary for creating a TensorFlow Estimator object.""" params = {} init_val = ArgumentsValidator(init_args, "Initialization arguments") with init_val: params["rm_dir_on_init"] = init_val.get("rm_dir", ATYPE_BOOL, True) params["use_ortho_weights"] = init_val.get("use_ortho_weights", ATYPE_BOOL, True) params["max_lsuv_iters"] = init_val.get("max_lsuv_iters", [ATYPE_NONE, ATYPE_INT], True) params["lsuv_tolerance"] = init_val.get("lsuv_tolerance", ATYPE_FLOAT, True) params["init_alpha"] = init_val.get("init_alpha", ATYPE_FLOAT, True) train_val = ArgumentsValidator(train_args, "Training arguments") with train_val: params["save_time"] = train_val.get("save_time", ATYPE_FLOAT, True) params["val_throttle_time"] = train_val.get("val_throttle_time", ATYPE_FLOAT, True) params["learning_rate"] = train_val.get("learning_rate", ATYPE_FLOAT, True) params["sgd_momentum"] = train_val.get("sgd_momentum", [ATYPE_NONE, ATYPE_FLOAT], True) params["sgd_use_nesterov"] = train_val.get("sgd_use_nesterov", ATYPE_BOOL, True) params["use_rmsprop"] = train_val.get("use_rmsprop", ATYPE_BOOL, True) params["rmsprop_decay"] = train_val.get("rmsprop_decay", ATYPE_FLOAT, True) params["rmsprop_momentum"] = train_val.get("rmsprop_momentum", ATYPE_FLOAT, True) params["rmsprop_epsilon"] = train_val.get("rmsprop_epsilon", ATYPE_FLOAT, True) params["reg_weight_decay"] = train_val.get("reg_weight_decay", [ATYPE_NONE, ATYPE_FLOAT], True) params["cost_type"] = train_val.get("cost_type", ATYPE_STRING, True).lower() params["max_grad_norm"] = train_val.get("max_grad_norm", [ATYPE_NONE, ATYPE_FLOAT], True) params["parallel_grad_gate"] = train_val.get("parallel_grad_gate", ATYPE_BOOL, True) return params
f132eaa4077dd197faed72d6805f15255b7dd680
3,644,935
def bit_lshift(bin_name, bit_offset, bit_size, shift, policy=None): """Creates a bit_lshift_operation to be used with operate or operate_ordered. Server left shifts bitmap starting at bit_offset for bit_size by shift bits. No value is returned. Args: bin_name (str): The name of the bin containing the map. bit_offset (int): The offset where the bits will start being shifted. bit_size (int): The number of bits that will be shifted by shift places. shift (int): How many bits to shift by. policy (dict, optional): The bit_policy policy dictionary. See: See :ref:`aerospike_bit_policies`. default: None Returns: A dictionary usable in operate or operate_ordered. The format of the dictionary should be considered an internal detail, and subject to change. """ return { OP_KEY: aerospike.OP_BIT_LSHIFT, BIN_KEY: bin_name, BIT_OFFSET_KEY: bit_offset, BIT_SIZE_KEY: bit_size, VALUE_KEY: shift, POLICY_KEY: policy }
3e8224a3f48eade9ee01a43819b4c6aa88ef308e
3,644,936
def compute_ccas(sigma_xx, sigma_xy, sigma_yx, sigma_yy, epsilon, verbose=True): """Main cca computation function, takes in variances and crossvariances. This function takes in the covariances and cross covariances of X, Y, preprocesses them (removing small magnitudes) and outputs the raw results of the cca computation, including cca directions in a rotated space, and the cca correlation coefficient values. Args: sigma_xx: 2d numpy array, (num_neurons_x, num_neurons_x) variance matrix for x sigma_xy: 2d numpy array, (num_neurons_x, num_neurons_y) crossvariance matrix for x,y sigma_yx: 2d numpy array, (num_neurons_y, num_neurons_x) crossvariance matrix for x,y (conj) transpose of sigma_xy sigma_yy: 2d numpy array, (num_neurons_y, num_neurons_y) variance matrix for y epsilon: small float to help with stabilizing computations verbose: boolean on whether to print intermediate outputs Returns: [ux, sx, vx]: [numpy 2d array, numpy 1d array, numpy 2d array] ux and vx are (conj) transposes of each other, being the canonical directions in the X subspace. sx is the set of canonical correlation coefficients- how well corresponding directions in vx, Vy correlate with each other. [uy, sy, vy]: Same as above, but for Y space invsqrt_xx: Inverse square root of sigma_xx to transform canonical directions back to original space invsqrt_yy: Same as above but for sigma_yy x_idxs: The indexes of the input sigma_xx that were pruned by remove_small y_idxs: Same as above but for sigma_yy """ (sigma_xx, sigma_xy, sigma_yx, sigma_yy, x_idxs, y_idxs) = remove_small(sigma_xx, sigma_xy, sigma_yx, sigma_yy, epsilon) numx = sigma_xx.shape[0] numy = sigma_yy.shape[0] if numx == 0 or numy == 0: return ([0, 0, 0], [0, 0, 0], np.zeros_like(sigma_xx), np.zeros_like(sigma_yy), x_idxs, y_idxs) if verbose: print("adding eps to diagonal and taking inverse") sigma_xx += epsilon * np.eye(numx) sigma_yy += epsilon * np.eye(numy) inv_xx = np.linalg.pinv(sigma_xx) inv_yy = np.linalg.pinv(sigma_yy) if verbose: print("taking square root") invsqrt_xx = positivedef_matrix_sqrt(inv_xx) invsqrt_yy = positivedef_matrix_sqrt(inv_yy) if verbose: print("dot products...") arr = np.dot(invsqrt_xx, np.dot(sigma_xy, invsqrt_yy)) if verbose: print("trying to take final svd") u, s, v = np.linalg.svd(arr) if verbose: print("computed everything!") return [u, np.abs(s), v], invsqrt_xx, invsqrt_yy, x_idxs, y_idxs
67827220cdbdd41250a8a40f140c8c21e0625df7
3,644,937
def generate_samples( segment_mask: np.ndarray, num_of_samples: int = 64, p: float = 0.5 ) -> np.ndarray: """Generate samples by randomly selecting a subset of the segments. Parameters ---------- segment_mask : np.ndarray The mask generated by `create_segments()`: An array of shape (image_width, image_height). num_of_samples : int The number of samples to generate. p : float The probability for each segment to be removed from a sample. Returns ------- samples : np.ndarray A two-dimensional array of size (num_of_samples, num_of_segments). """ num_of_segments = int(np.max(segment_mask) + 1) return np.random.binomial(n=1, p=p, size=(num_of_samples, num_of_segments))
99ee42abf95bd338714e42beee42610e3ac2f09d
3,644,938
def get_mix_bandpassed(bp_list, comp, param_dict_file=None,bandpass_shifts=None, ccor_cen_nus=None, ccor_beams=None, ccor_exps = None, normalize_cib=True,param_dict_override=None,bandpass_exps=None,nus_ghz=None,btrans=None, dust_beta_param_name='beta_CIB', radio_beta_param_name='beta_radio', override_lbeam_bnus=None): """ Get mixing factors for a given component that have "color corrections" that account for a non-delta-function bandpass and for possible variation of the beam within the bandpass. If the latter is provided, the resulting output is of shape [Nfreqs,nells], otherwise the output is of shape [Nfreqs,]. Parameters ---------- bp_list : list of strings a list of strings of length Nfreqs where each string is the filename for a file containing a specification of the bandpass for that frequency channel. For each file, the first column is frequency in GHz and the second column is the transmission whose overall normalization does not matter. comp : string a string specifying the component whose mixing is requested. Currently, the following are supported (1) CMB or kSZ (considered identical, and always returns ones) (2) tSZ (3) mu (4) rSZ (5) CIB (6) radio param_dict_file : string, optional filename of a YAML file used to create a dictionary of SED parameters and values (only needed for some SEDs). If None, defaults to parameters specified in input/fg_SEDs_default_params.yml. bandpass_shifts : list of floats, optional A list of floats of length [Nfreqs,] specifying how much in GHz to shift the entire bandpass. Each value can be positive (shift right) or negative (shift left). If None, no shift is applied and the bandpass specified in the files is used as is. ccor_cen_nus : list of floats, optional If not None, this indicates that the dependence of the beam on frequency with the bandpass should be taken into account. ccor_cen_nus will then be interpreted as a [Nfreqs,] length list of the "central frequencies" of each bandpass in GHz. The provided beams in ccor_beams for each channel are then scaled by (nu/nu_central)**ccor_exp where ccor_exp defaults to -1. ccor_beams : list of array_like, optional Only used if ccor_cen_nus is not None. In that mode, ccor_beams is interpreted as an [Nfreqs,] length list where each element is a 1d numpy array specifying the beam transmission starting from ell=0 and normalized to one at ell=0. The provided beams for each channel are then scaled by (nu/nu_central)**ccor_exp where ccor_exp defaults to -1 and nu_central is specified through ccor_cen_nus. If any list element is None, no scale dependent color correction is applied for that frequency channel. See get_scaled_beams for more information. ccor_exps : list of floats, optional Only used if ccor_cen_nus is not None. Defaults to -1 for each frequncy channel. This controls how the beam specified in ccor_beams for the central frequencies specified in ccor_cen_nus is scaled to other frequencies. """ if bandpass_shifts is not None and np.any(np.array(bandpass_shifts)!=0): print("WARNING: shifted bandpasses provided.") assert (comp is not None) assert (bp_list is not None) N_freqs = len(bp_list) if ccor_cen_nus is not None: assert len(ccor_cen_nus)==N_freqs assert len(ccor_beams)==N_freqs lmaxs = [] for i in range(N_freqs): if ccor_beams[i] is not None: assert ccor_beams[i].ndim==1 lmaxs.append( ccor_beams[i].size ) if len(lmaxs)==0: ccor_cen_nus = None shape = N_freqs else: lmax = max(lmaxs) shape = (N_freqs,lmax) if ccor_exps is None: ccor_exps = [-1]*N_freqs elif override_lbeam_bnus is not None: lbeam,bnus = override_lbeam_bnus lmax = lbeam.size shape = (N_freqs,lmax) else: shape = N_freqs if (comp == 'CIB' or comp == 'rSZ' or comp == 'radio'): if param_dict_file is None: p = default_dict else: p = read_param_dict_from_yaml(param_dict_file) if (comp == 'CMB' or comp == 'kSZ'): #CMB (or kSZ) output = np.ones(shape) #this is unity by definition, since we're working in Delta T units [uK_CMB]; output ILC map will thus also be in uK_CMB for i in range(N_freqs): if(bp_list[i] == None): #this case is appropriate for HI or other maps that contain no CMB-relevant signals (and also no CIB); they're assumed to be denoted by None in bp_list output[i] = 0. return output else: output = np.zeros(shape) for i,bp in enumerate(bp_list): if (bp_list[i] is not None): if nus_ghz is None: nu_ghz, trans = np.loadtxt(bp, usecols=(0,1), unpack=True) else: nu_ghz = nus_ghz trans = btrans if bandpass_shifts is not None: nu_ghz = nu_ghz + bandpass_shifts[i] if bandpass_exps is not None: trans = trans * nu_ghz**bandpass_exps[i] lbeam = 1 bnus = 1 # It turns out scaling the beam is actually the slowest part of the calculation # so we allow pre-calculated ones to be provided if override_lbeam_bnus is not None: lbeam,bnus = override_lbeam_bnus else: if ccor_cen_nus is not None: if ccor_beams[i] is not None: lbeam = ccor_beams[i] ells = np.arange(lbeam.size) cen_nu_ghz = ccor_cen_nus[i] bnus = get_scaled_beams(ells,lbeam,cen_nu_ghz,nu_ghz,ccor_exp=ccor_exps[i]).swapaxes(0,1) assert np.all(np.isfinite(bnus)) if (comp == 'tSZ' or comp == 'mu' or comp == 'rSZ'): # Thermal SZ (y-type distortion) or mu-type distortion or relativistic tSZ # following Sec. 3.2 of https://arxiv.org/pdf/1303.5070.pdf # -- N.B. IMPORTANT TYPO IN THEIR EQ. 35 -- see https://www.aanda.org/articles/aa/pdf/2014/11/aa21531-13.pdf mixs = get_mix(nu_ghz, comp, param_dict_file=param_dict_file, param_dict_override=param_dict_override, dust_beta_param_name=dust_beta_param_name,radio_beta_param_name=radio_beta_param_name) val = np.trapz(trans * dBnudT(nu_ghz) * bnus * mixs, nu_ghz) / np.trapz(trans * dBnudT(nu_ghz), nu_ghz) / lbeam # this is the response at each frequency channel in uK_CMB for a signal with y=1 (or mu=1) elif (comp == 'CIB'): # following Sec. 3.2 of https://arxiv.org/pdf/1303.5070.pdf # -- N.B. IMPORTANT TYPO IN THEIR EQ. 35 -- see https://www.aanda.org/articles/aa/pdf/2014/11/aa21531-13.pdf # CIB SED parameter choices in dict file: Tdust_CIB [K], beta_CIB, nu0_CIB [GHz] # N.B. overall amplitude is not meaningful here; output ILC map (if you tried to preserve this component) would not be in sensible units mixs = get_mix(nu_ghz, 'CIB_Jysr', param_dict_file=param_dict_file, param_dict_override=param_dict_override, dust_beta_param_name=dust_beta_param_name,radio_beta_param_name=radio_beta_param_name) vnorm = np.trapz(trans * dBnudT(nu_ghz), nu_ghz) val = (np.trapz(trans * mixs * bnus , nu_ghz) / vnorm) / lbeam # N.B. this expression follows from Eqs. 32 and 35 of # https://www.aanda.org/articles/aa/pdf/2014/11/aa21531-13.pdf , # and then noting that one also needs to first rescale the CIB emission # in Jy/sr from nu0_CIB to the "nominal frequency" nu_c that appears in # those equations (i.e., multiply by get_mix(nu_c, 'CIB_Jysr')). # The resulting cancellation leaves this simple expression which has no dependence on nu_c. elif (comp == 'radio'): # same logic/formalism as used for CIB component immediately above this # radio SED parameter choices in dict file: beta_radio, nu0_radio [GHz] mixs = get_mix(nu_ghz, 'radio_Jysr', param_dict_file=param_dict_file, param_dict_override=param_dict_override, dust_beta_param_name=dust_beta_param_name,radio_beta_param_name=radio_beta_param_name) val = (np.trapz(trans * mixs * bnus , nu_ghz) / np.trapz(trans * dBnudT(nu_ghz), nu_ghz)) / lbeam else: print("unknown component specified") raise NotImplementedError if (ccor_cen_nus is not None) and (ccor_beams[i] is not None): val[lbeam==0] = 0 output[i] = val assert np.all(np.isfinite(val)) elif (bp_list[i] is None): #this case is appropriate for HI or other maps that contain no CMB-relevant signals (and also no CIB); they're assumed to be denoted by None in bp_list output[i] = 0. if (comp == 'CIB' or comp == 'radio') and normalize_cib: #overall amplitude not meaningful, so divide by max to get numbers of order unity; # output gives the relative conversion between CIB (or radio) at different frequencies, for maps in uK_CMB omax = output.max(axis=0) ret = output / omax if (ccor_cen_nus is not None): ret[:,omax==0] = 0 else: ret = output assert np.all(np.isfinite(ret)) return ret
d4693e41c755dd1067c371bfa740ce1436dfc85a
3,644,939
def partition(data, label_name, ratio): """ Partitions data set according to a provided ratio. params: data - The data set in a pandas data frame label_name - the name of the collumn in the data set that contains the labels ratio - the training/total data ratio returns: training_data - The data set to train on training_labels - Indexed labels for training set testing_data - The data set to test on testing_labels - The data set to test on """ data = data.loc[np.random.permutation(data.index)] partition_idx = int(data.shape[0] * ratio) train, test = np.split(data, [partition_idx]) def splitDataLabels(data): """Separates labels from data.""" labels = data[label_name].to_frame() data = data.drop(columns = [label_name]) return data , labels train_data, train_label = splitDataLabels(train) test_data, test_label = splitDataLabels(test) return train_data, train_label, test_data, test_label
6f00c8df9e5fb42f4e3fb01744215214e732f441
3,644,940
def get_piesocket_api_key(): """ Retrieves user's Piesocket API key. Returns: (str) Piesocket API key. Raises: (ImproperlyConfigured) if the Piesocket API key isn't specified in settings. """ return get_setting_or_raise( setting="PIESOCKET_API_KEY", setting_str="PieSocket API Key" )
657bba650a914ed1a15d54b9d0000f37b99568d0
3,644,942
def downsample(myarr,factor,estimator=np.mean): """ Downsample a 2D array by averaging over *factor* pixels in each axis. Crops upper edge if the shape is not a multiple of factor. This code is pure numpy and should be fast. keywords: estimator - default to mean. You can downsample by summing or something else if you want a different estimator (e.g., downsampling error: you want to sum & divide by sqrt(n)) """ ys,xs = myarr.shape crarr = myarr[:ys-(ys % int(factor)),:xs-(xs % int(factor))] dsarr = estimator(np.concatenate([[crarr[i::factor,j::factor] for i in range(factor)] for j in range(factor)]), axis=0) return dsarr
45b6422cb7f9b01512bc4860229164b043201675
3,644,943
def getActiveWindow(): """Returns a Window object of the currently active Window.""" # Source: https://stackoverflow.com/questions/5286274/front-most-window-using-cgwindowlistcopywindowinfo windows = Quartz.CGWindowListCopyWindowInfo(Quartz.kCGWindowListExcludeDesktopElements | Quartz.kCGWindowListOptionOnScreenOnly, Quartz.kCGNullWindowID) for win in windows: if win['kCGWindowLayer'] == 0: return '%s %s' % (win[Quartz.kCGWindowOwnerName], win.get(Quartz.kCGWindowName, '')) # Temporary. For now, we'll just return the title of the active window. raise Exception('Could not find an active window.')
ca1c810525f0a49cd9f4b53d0d621cb39b3b733e
3,644,944
def _derivative_log(x): """Chain rule on natural log = (1/x)*(dx/dr)""" return _protected_inverse(x[0])[:, :, np.newaxis, np.newaxis]*x[1]
5f4bf5416575126cd93adaee6ccfca942ad6218f
3,644,945
def svn_wc_merge_props(*args): """ svn_wc_merge_props(svn_wc_notify_state_t state, char path, svn_wc_adm_access_t adm_access, apr_hash_t baseprops, apr_array_header_t propchanges, svn_boolean_t base_merge, svn_boolean_t dry_run, apr_pool_t pool) -> svn_error_t """ return _wc.svn_wc_merge_props(*args)
54187e010f71798bee90eb179a10da11bf410fce
3,644,946
def is_paused(): """ Return True if is_paused is set in the global settings table of the database. """ try: is_paused_val = Settings.objects.get().is_paused except ObjectDoesNotExist: is_paused_val = False return is_paused_val
59b99d4a4842e14205376d7923d3e5c8b52c30a6
3,644,947
import itertools def get_accurate(clustering_res_df, cluster_number, error=False): """ :param clustering_res_df: a pandas DataFrame about clustering result :param cluster_number: the number of the cluster (the first column is the index, the second column is the right information, the third column is the clustering information) :param error: if error=True, then return the error rate, else, return the accuracy rate :return: the clustering accuracy """ if clustering_res_df.shape[1] != 3: raise Exception("Shape Error: the input DataFrame's column number is not 3") real_dict = {} clustering_dict = {} for i in range(cluster_number): real_df = clustering_res_df.loc[clustering_res_df['ClusterInfo'] == i] clustering_df = clustering_res_df.loc[clustering_res_df['ClusterExp'] == i] real_dict[i] = real_df['IndexNum'].tolist() clustering_dict[i] = clustering_df['IndexNum'].tolist() accuracy_matrix = np.zeros((cluster_number, cluster_number)) for i in range(cluster_number): for j in range(cluster_number): accuracy_matrix[i][j] = len(set(real_dict[i]).intersection(set(clustering_dict[j]))) # for test # print("The accuracy matrix is: \n", accuracy_matrix) case_iterator = itertools.permutations(range(cluster_number), cluster_number) accurate = 0 for item in case_iterator: acc = sum([accuracy_matrix[i][item[i]] for i in range(cluster_number)]) if acc > accurate: accurate = acc if not error: return accurate / clustering_res_df.shape[0] else: return 1 - accurate / clustering_res_df.shape[0]
7ba71bcd82e70d9344994f9b6a2133676d58f683
3,644,949