content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from typing import List def connect_with_interior_or_edge_bulk( polygon: Polygon, polygon_array: GeometryArray ) -> List[bool]: """ Return boolean array with True iff polys overlap in interior/edge, but not corner. Args: polygon (Polygon): A shapely Polygon polygon_array (GeometryArray): The other shapely Polygons in a geopandas geometry array Returns: List[bool]: Boolean array with value True, iff `polygon` and the polygon in `polygon_array` at the given location overlap in their interior/edge. """ patterns = polygon_array.relate(polygon) return [ de9im_match(pattern, EDGE_ONLY_PATTERN) or de9im_match(pattern, OVERLAP_PATTERN) for pattern in patterns ]
852a43d1782ae85dbb2d2adb70feb59ace7a6a44
3,643,445
import pickle def get_history(kmodel=None): """ returns a python dict with key = metric_id val = [metric each epoch ] """ # get kmodel object from input str if the input is a string if isinstance(kmodel,str): try: kmodel = KModel.objects.get(id=kmodel) except ObjectDoesNotExist: # object with name doesn't exist return None except ValidationError: # input string isn't a valid uuid return None elif isinstance(kmodel, KModel): # awesome! proceed pass else: raise ValueError("call get_history with etiher a str uuid for model or a db model instance") # get the history object and load history if kmodel.artifacts.filter(descriptor="history").exists(): artifact_path = kmodel.artifacts.get(descriptor="history").path return pickle.load(open(artifact_path,"rb")) else: return None
da2886f565ca2f96e49a38b458368de2e4216c01
3,643,446
def get_neighbor_v4_by_search(search=None): """Return a list of NeighborV4's by dict.""" try: objects = NeighborV4.objects.filter() search_dict = search if search else dict() object_map = build_query_to_datatable_v3(objects, search_dict) except FieldError as e: raise api_rest_exceptions.ValidationAPIException(str(e)) except Exception as e: raise api_rest_exceptions.NetworkAPIException(str(e)) else: return object_map
6893c32014d6b2a8871825744a1953024fe3a289
3,643,447
def load_clean_data(): """funcion that loads tuberculosis file and preprocesses/cleans the dataframe""" df = pd.read_csv('tb.csv') # drop columns 'fu' and 'mu' since they only contain missing values and would mess up the following processing steps df = df.drop(columns = ['fu', 'mu']) # define row and column length initial_rows = len(df.index) initial_col = len(df.columns) # melt the gender-age columns of the df df = pd.melt(df, id_vars=['country', 'year'], var_name='variable', value_name='value') melted_row = len(df.index) # assert that (initial col-number - id_var_no) * rows = length of rows afterwards assert (initial_col - 2)*initial_rows == melted_row # the column 'variable' needs to be split into two columns 'gender' and 'age', delete column 'variable' df['gender'] = df.variable.str[0] df['age'] = df.variable.str[1:3] df = df.drop(columns = 'variable') # transform age into an integer df['age'] = pd.to_numeric(df['age'], errors='coerce') # transform gender into category in order to store memory df['gender'] = df['gender'].astype('category') return df #print(df.info()) #print(df.head()) #print(df.loc[df['country'] == 'AD']) # the transformation seems to be correct. The columns age and gender have no missing values (which would have been # suspicious)
2430bb61705f95c77f68eabbcda535e7d0f443ea
3,643,448
def is_anagram_passphrase(phrase): """ Checks whether a phrase contains no words that are anagrams of other words. >>> is_anagram_passphrase(["abcde", "fghij"]) True >>> is_anagram_passphrase(["abcde", "xyz", "ecdab"]) False >>> is_anagram_passphrase(["a", "ab", "abc", "abd", "abf", "abj"]) True >>> is_anagram_passphrase(["iiii", "oiii", "ooii", "oooi", "oooo"]) True >>> is_anagram_passphrase(["oiii", "ioii", "iioi", "iiio"]) False """ return not any( any( first_word == "".join(permutated_word) for permutated_word in permutations(second_word) ) for first_word, second_word in combinations(phrase, 2) )
aa7a95cda82317a41d8c4f2765a4706896135f45
3,643,449
def _client_ip(client): """Compatibility layer for Flask<0.12.""" return getattr(client, 'environ_base', {}).get('REMOTE_ADDR')
1bd110563c5e7165ec795d16e0f0d7be6d053db1
3,643,450
def extractRecords(getRecordsResponse): """Returns a list of etrees of the individual records of a getRecords response""" recs = getRecordsResponse.xpath( '/csw:GetRecordsResponse/csw:SearchResults//csw:Record', namespaces={'csw': ns_csw}) return recs
3de69fc99f77c4d06346aa82121cc936e16a06b4
3,643,452
from typing import Set def tagify(tail=u'', head=u'', sep=u'.'): """ Returns namespaced event tag string. Tag generated by joining with sep the head and tail in that order head and tail may be a string or a list, tuple, or Set of strings If head is a list, tuple or Set Then join with sep all elements of head individually Else join in whole as string prefix If tail is a list, tuple or Set Then join with sep all elements of tail individually Else join in whole as string suffix If either head or tail is empty then do not exhibit in tag """ if isinstance(head, (list, tuple, Set)): # list like so expand parts = list(head) else: # string like so put in list parts = [head] if isinstance(tail, (list, tuple, Set)): # listlike so extend parts parts.extend(tail) else: # string like so append parts.append(tail) return sep.join([part for part in parts if part])
ddebdc0c4224db428a4338fd1e4c61137ac2d5c5
3,643,453
def get_fn_data(src_db, fn_table, year=None): """Get the data and fields from the query in the src database for the fish net table specified by fn_table. Returns list of dictionaries - each element represents a single row returned by the query. Arguments: - `src_db`: full path the source database. - `fn_table`: the name of the stored query that returns the data for the specified fish net table """ if year: sql = "execute get_{} @yr='{}'".format(fn_table, year) else: sql = "execute get_{}".format(fn_table) constring = "DRIVER={{Microsoft Access Driver (*.mdb, *.accdb)}};DBQ={}" with pyodbc.connect(constring.format(src_db)) as src_conn: src_cur = src_conn.cursor() rs = src_cur.execute(sql) data = rs.fetchall() flds = [x[0].lower() for x in src_cur.description] records = [] for record in data: records.append({k: v for k, v in zip(flds, record)}) return records
60d48e0b7727ccd25e4b91bf59f1f505ddbc3127
3,643,454
import collections def convert_example_to_feature(example, tokenizer, max_seq_length=512, doc_stride=384, max_query_length=125, is_training=True, cls_token_at_end=False, cls_token='[CLS]', sep_token='[SEP]', pad_token=0, sequence_a_segment_id=0, sequence_b_segment_id=1, cls_token_segment_id=0, pad_token_segment_id=0, mask_padding_with_zero=True, sequence_a_is_doc=False): """Convert a single QuacExample to features (model input)""" query_tokens = tokenizer.tokenize(example.question_text) if len(query_tokens) > max_query_length: query_tokens = query_tokens[-max_query_length:] tok_to_orig_index = [] orig_to_tok_index = [] all_doc_tokens = [] for (i, token) in enumerate(example.doc_tokens): orig_to_tok_index.append(len(all_doc_tokens)) sub_tokens = tokenizer.tokenize(token) for sub_token in sub_tokens: tok_to_orig_index.append(i) all_doc_tokens.append(sub_token) tok_start_position = None tok_end_position = None if is_training and example.is_impossible: tok_start_position = -1 tok_end_position = -1 if is_training and not example.is_impossible: tok_start_position = orig_to_tok_index[example.start_position] if example.end_position < len(example.doc_tokens) - 1: tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 else: tok_end_position = len(all_doc_tokens) - 1 (tok_start_position, tok_end_position) = _improve_answer_span( all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.orig_answer_text) # The -3 accounts for [CLS], [SEP] and [SEP] max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 assert max_tokens_for_doc >= 384, max_tokens_for_doc # We can have documents that are longer than the maximum sequence length. # To deal with this we do a sliding window approach, where we take chunks # of the up to our max length with a stride of `doc_stride`. # we set the doc_stride to 384, which is the max length of evidence text, # meaning that each evidence has exactly one _DocSpan _DocSpan = collections.namedtuple( # pylint: disable=invalid-name "DocSpan", ["start", "length"]) doc_spans = [] start_offset = 0 while start_offset < len(all_doc_tokens): length = len(all_doc_tokens) - start_offset if length > max_tokens_for_doc: length = max_tokens_for_doc doc_spans.append(_DocSpan(start=start_offset, length=length)) if start_offset + length == len(all_doc_tokens): break start_offset += min(length, doc_stride) assert len(doc_spans) == 1, (max_tokens_for_doc, example) # if len(doc_spans) > 1: # print(len(doc_spans), example) # doc_spans = [doc_spans[0]] for (doc_span_index, doc_span) in enumerate(doc_spans): tokens = [] token_to_orig_map = {} token_is_max_context = {} segment_ids = [] # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer) # Original TF implem also keep the classification token (set to 0) (not sure why...) p_mask = [] # CLS token at the beginning if not cls_token_at_end: tokens.append(cls_token) segment_ids.append(cls_token_segment_id) p_mask.append(0) cls_index = 0 # XLNet: P SEP Q SEP CLS # Others: CLS Q SEP P SEP if not sequence_a_is_doc: # Query tokens += query_tokens segment_ids += [sequence_a_segment_id] * len(query_tokens) p_mask += [1] * len(query_tokens) # SEP token tokens.append(sep_token) segment_ids.append(sequence_a_segment_id) p_mask.append(1) # Paragraph for i in range(doc_span.length): split_token_index = doc_span.start + i token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index] is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index) token_is_max_context[len(tokens)] = is_max_context tokens.append(all_doc_tokens[split_token_index]) if not sequence_a_is_doc: segment_ids.append(sequence_b_segment_id) else: segment_ids.append(sequence_a_segment_id) p_mask.append(0) paragraph_len = doc_span.length if sequence_a_is_doc: # SEP token tokens.append(sep_token) segment_ids.append(sequence_a_segment_id) p_mask.append(1) tokens += query_tokens segment_ids += [sequence_b_segment_id] * len(query_tokens) p_mask += [1] * len(query_tokens) # SEP token tokens.append(sep_token) segment_ids.append(sequence_b_segment_id) p_mask.append(1) # CLS token at the end if cls_token_at_end: tokens.append(cls_token) segment_ids.append(cls_token_segment_id) p_mask.append(0) cls_index = len(tokens) - 1 # Index of classification token input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(pad_token) input_mask.append(0 if mask_padding_with_zero else 1) segment_ids.append(pad_token_segment_id) p_mask.append(1) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length span_is_impossible = example.is_impossible start_position = None end_position = None if is_training and not span_is_impossible: # For training, if our document chunk does not contain an annotation # we throw it out, since there is nothing to predict. doc_start = doc_span.start doc_end = doc_span.start + doc_span.length - 1 out_of_span = False if not (tok_start_position >= doc_start and tok_end_position <= doc_end): out_of_span = True if out_of_span: start_position = 0 end_position = 0 span_is_impossible = True else: if sequence_a_is_doc: doc_offset = 0 else: doc_offset = len(query_tokens) + 2 start_position = tok_start_position - doc_start + doc_offset end_position = tok_end_position - doc_start + doc_offset if is_training and span_is_impossible: start_position = cls_index end_position = cls_index if False: logger.info("*** Example ***") logger.info("unique_id: %s" % (example.example_id)) logger.info("example_id: %s" % (example.example_id)) logger.info("qid of the example: %s" % (example.qas_id)) logger.info("doc_span_index: %s" % (doc_span_index)) logger.info("tokens: %s" % " ".join(tokens)) logger.info("token_to_orig_map: %s" % " ".join([ "%d:%d" % (x, y) for (x, y) in token_to_orig_map.items()])) logger.info("token_is_max_context: %s" % " ".join([ "%d:%s" % (x, y) for (x, y) in token_is_max_context.items() ])) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info( "input_mask: %s" % " ".join([str(x) for x in input_mask])) logger.info( "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) if is_training and span_is_impossible: logger.info("impossible example") if is_training and not span_is_impossible: answer_text = " ".join(tokens[start_position:(end_position + 1)]) logger.info("start_position: %d" % (start_position)) logger.info("end_position: %d" % (end_position)) logger.info("retrieval_label: %d" % (example.retrieval_label)) logger.info( "answer: %s" % (answer_text)) feature = InputFeatures( unique_id=example.example_id, example_id=example.example_id, doc_span_index=doc_span_index, tokens=tokens, token_to_orig_map=token_to_orig_map, token_is_max_context=token_is_max_context, input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, cls_index=cls_index, p_mask=p_mask, paragraph_len=paragraph_len, start_position=start_position, end_position=end_position, is_impossible=span_is_impossible, retrieval_label=example.retrieval_label) return feature
1db90a014da443e143411276cbbf0e29a9872b7f
3,643,455
def make_pickle(golfed=False): """Returns the pickle-quine. If "golfed" is true, we return the minimized version; if false we return the one that's easier to understand. """ part_1 = b''.join(PART_1) part_2 = b''.join(GOLFED_PART_2 if golfed else PART_2) # We tack the length onto part 1: length = len(part_1) + 1 + len(part_2) part_1 = part_1 + b'%c' % length # Now glue everything together. the_string = part_1 + part_2 return part_1 + the_string + part_2
79fdc182ef3487090a8acd61c44b46d4b7cc5493
3,643,456
def classify_design_space(action: str) -> int: """ The returning index corresponds to the list stored in "count": [sketching, 3D features, mating, visualizing, browsing, other organizing] Formulas for each design space action: sketching = "Add or modify a sketch" + "Copy paste sketch" 3D features = "Commit add or edit of part studio feature" + "Delete part studio feature" - "Add or modify a sketch" mating = "Add assembly feature" + "Delete assembly feature" + "Add assembly instance" + "Delete assembly instance" visualizing = "Start assembly drag" + "Animate action called" browsing = Opening a tab + Creating a tab + Deleting a tab + Renaming a tab other organizing = "Create version" + "Cancel Operation" + "Undo Redo Operation" + "Merge branch" + "Branch workspace" + "Update version" :param action: the action to be classified :return: the index of the action type that this action is accounted for; if the action does not belong to any category, return -1 Note: "Add or modify a sketch" is special (+1 for sketching and -1 for 3D features), return -10 """ # Creating a sketch is special as it affects both the sketching and the 3D features counts if action == "Add or modify a sketch": return -10 # Sketching elif action == "Copy paste sketch": return 0 # 3D features elif action in ["Commit add or edit of part studio feature", "Delete part studio feature"]: return 1 # Mating elif action in ["Add assembly feature", "Delete assembly feature", "Add assembly instance" "Delete assembly instance"]: return 2 # Visualizing elif action in ["Start assembly drag", "Animate action called"]: return 3 # Browsing elif "Tab" in action and ("opened" in action or "created" in action or "deleted" in action or "renamed" in action): return 4 # Other organizing elif action in ["Create version", "Cancel Operation", "Undo Redo Operation", "Merge branch", "Branch workspace", "Update version"]: return 5 # Not classified (Optional: print out the unclassified actions) else: return -1
22dc68aa23258691b0d4b9f1b27a9e8451b275d9
3,643,457
import hashlib def get_sha256_hash(plaintext): """ Hashes an object using SHA256. Usually used to generate hash of chat ID for lookup Parameters ---------- plaintext: int or str Item to hash Returns ------- str Hash of the item """ hasher = hashlib.sha256() string_to_hash = str(plaintext) hasher.update(string_to_hash.encode('utf-8')) hash = hasher.hexdigest() return hash
79735973b8ad73823662cc428513ef393952b681
3,643,458
def get_bit_coords(dtype_size): """Get coordinates for bits assuming float dtypes.""" if dtype_size == 16: coords = ( ["±"] + [f"e{int(i)}" for i in range(1, 6)] + [f"m{int(i-5)}" for i in range(6, 16)] ) elif dtype_size == 32: coords = ( ["±"] + [f"e{int(i)}" for i in range(1, 9)] + [f"m{int(i-8)}" for i in range(9, 32)] ) elif dtype_size == 64: coords = ( ["±"] + [f"e{int(i)}" for i in range(1, 12)] + [f"m{int(i-11)}" for i in range(12, 64)] ) else: raise ValueError(f"dtype of size {dtype_size} neither known nor implemented.") return coords
6400017e47506613cf15162425843ce2b19eed3e
3,643,459
from klpyastro.utils import obstable def create_record(user_inputs): """ Create a ObsRecord from the informations gathered from the users. :param user_inputs: Dictionary with all the values (as strings) required to fully populate a ObsRecord object. :type user_inputs: dict :rtype: ObsRecord object """ record = obstable.ObsRecord() record.targetname = user_inputs['targetname'] record.rootname = user_inputs['rootname'] record.band = user_inputs['band'] record.grism = user_inputs['grism'] record.datatype = user_inputs['datatype'] record.applyto = user_inputs['applyto'] record.filerange = user_inputs['filerange'] record.exptime = float(user_inputs['exptime']) record.lnrs = int(user_inputs['lnrs']) record.rdmode = user_inputs['rdmode'] return record
8fc1a31a24ac7663b405074410d1c025fbcd7d62
3,643,460
import itertools def best_wild_hand(hand): """best_hand но с джокерами""" non_jokers = list(filter(lambda x: x[0] != '?', hand)) jokers = filter(lambda x: x[0] == '?', hand) jokers_variations = itertools.product( *[joker_variations(joker) for joker in jokers] ) best_hands = [] for variations in jokers_variations: full_hand = itertools.chain(variations, non_jokers) best_hands.append(best_hand(full_hand)) return max((hand_rank(h), h) for h in best_hands)[1]
86cb58dba0338c481ce516657118cdc20260ebf3
3,643,461
def GetTestMetadata(test_metadata_file=FAAS_ROOT+"/synthetic_workload_invoker/test_metadata.out"): """ Returns the test start time from the output log of SWI. """ test_start_time = None with open(test_metadata_file) as f: lines = f.readlines() test_start_time = lines[0] config_file = lines[1] invoked_actions = int(lines[2][:-1]) print('Invocations by Workload Invoker: ' + str(invoked_actions)) try: return int(test_start_time[:-1]), config_file[:-1] except: logger.error("Error reading the test metadata!") return None, None
668e214452bb100885a8631b5d900eb7ca90e43b
3,643,462
import torch def gen_geo(num_nodes, theta, lambd, source, target, cutoff, seed=None): """Generates a random graph with threshold theta consisting of 'num_nodes' and paths with maximum length 'cutoff' between 'source' adn target. Parameters ---------- num_nodes : int Number of nodes. theta : float Threshold of graph. lambd : float Weights of graph are generated randomly from exp(lambd) distribution. source : int Origin of path. Must be in range(0, num_nodes). target : int Destination of path. Must be in range(0, num_nodes). cutoff : int Maximum path length. seed : int Set random seed if not None. Returns ------- object of type graph Generated graph. """ file_name = './saved_items/graph_N' + str(num_nodes) + '_cutoff' + str(cutoff) if seed != None: np.random.seed(seed) rand.seed(seed) torch.manual_seed(seed) weights = { node: rand.expovariate(lambd) for node in range(num_nodes)} graph = geo_thresh(num_nodes, theta, weight=weights) for (ni, nj) in graph.edges(): graph.edges[ni,nj]['weight'] = weights[ni] + weights[nj] plt.figure(figsize=(10,5)) nx.draw(graph, with_labels=True, font_weight='bold') plt.savefig('./figures/graph_N' + str(num_nodes) + str(".png"), dpi=500) plt.show() save_obj(graph, file_name) paths = nx.all_simple_paths(graph, source=source, target=target, cutoff=cutoff) paths = list(paths) save_obj(paths, file_name + '_paths') print('Paths length: ', len(paths)) return graph
5d3363aab4e13dd8690277453f603fe707c00d41
3,643,463
import re def FilterExceptions(image_name, errors): """Filter out the Application Verifier errors that have exceptions.""" exceptions = _EXCEPTIONS.get(image_name, []) def _HasNoException(error): # Iterate over all the exceptions. for (severity, layer, stopcode, regexp) in exceptions: # And see if they match, first by type. if (error.severity == severity and error.layer == layer and error.stopcode == stopcode): # And then by regexpr match to the trace symbols. for trace in error.trace: if trace.symbol and re.match(regexp, trace.symbol): return False return True filtered_errors = filter(_HasNoException, errors) error_count = len(filtered_errors) filtered_count = len(errors) - error_count if error_count: suffix = '' if error_count == 1 else 's' filtered_errors.append( 'Error: Encountered %d AppVerifier exception%s for %s.' % (error_count, suffix, image_name)) if filtered_count: suffix1 = '' if filtered_count == 1 else 's' suffix2 = '' if len(exceptions) == 1 else 's' filtered_errors.append( 'Warning: Filtered %d AppVerifier exception%s for %s using %d rule%s.' % (filtered_count, suffix1, image_name, len(exceptions), suffix2)) return (error_count, filtered_errors)
37b5febe4da731a426c2cd3ef9d6aeb1f28a802c
3,643,464
from typing import Optional def dim(text: str, reset_style: Optional[bool] = True) -> str: """Return text in dim""" return set_mode("dim", False) + text + (reset() if reset_style else "")
cb180649913760b71b2857b61e264b6a17207433
3,643,465
def jaccard(list1, list2): """calculates Jaccard distance from two networks\n | Arguments: | :- | list1 (list or networkx graph): list containing objects to compare | list2 (list or networkx graph): list containing objects to compare\n | Returns: | :- | Returns Jaccard distance between list1 and list2 """ intersection = len(list(set(list1).intersection(list2))) union = (len(list1) + len(list2)) - intersection return 1- float(intersection) / union
1056c3d5a592bea9a575c24e947a91968b931000
3,643,467
def default_argument_preprocessor(args): """Return unmodified args and an empty dict for extras""" extras = {} return args, extras
2031dde70dbe54beb933e744e711a0bf8ecaed99
3,643,468
import random def early_anomaly(case: pd.DataFrame) -> pd.DataFrame: """ A sequence of 2 or fewer events executed too early, which is then skipped later in the case Parameters ----------------------- case: pd.DataFrame, Case to apply anomaly Returns ----------------------- Case with the applied early anomaly """ case = case.reset_index(drop=True) timestamps = case['timestamp'] sequence_size = random.choice([1, 2]) if sequence_size == 1: original_position = random.choice(range(1, len(case))) activities = case.iloc[[original_position]] case = case.drop(original_position) if original_position == 1: anomaly_position = 0 else: anomaly_position = random.choice(range(0, original_position-1)) description = activities['activity'].values[0] + ' was originally executed at position ' + str(original_position+1) + ' and changed to position ' + str(anomaly_position+1) else: original_position = random.choice(range(1, len(case)-1)) activities = case.iloc[original_position:original_position+2] case = case.drop([original_position, original_position+1]) if original_position == 1: anomaly_position = 0 else: anomaly_position = random.choice(range(0, original_position-1)) description = activities['activity'].values[0] + ' and ' + activities['activity'].values[1] + ' were originally executed at positions ' + str(original_position+1) + ' and ' + str(original_position+2) + ' and changed to positions ' + str(anomaly_position+1) + ' and ' + str(anomaly_position+2) case = pd.concat([case.iloc[:anomaly_position], activities, case.iloc[anomaly_position:]], sort=False).reset_index(drop=True) case['timestamp'] = timestamps case['label'] = 'early' case['description'] = description return case
0c5f0b0fb3336331737bd9f80712176476110ac9
3,643,470
def parse_cmd(script, *args): """Returns a one line version of a bat script """ if args: raise Exception('Args for cmd not implemented') # http://www.microsoft.com/resources/documentation/windows/xp/all/proddocs/en-us/cmd.mspx?mfr=true oneline_cmd = '&&'.join(script.split('\n')) oneline_cmd = 'cmd.exe /c "%s"' % oneline_cmd return oneline_cmd
b3355b20af2ca1ab2e996643ae0918a2d387760f
3,643,472
def expected_inheritance(variant_obj): """Gather information from common gene information.""" manual_models = set() for gene in variant_obj.get('genes', []): manual_models.update(gene.get('manual_inheritance', [])) return list(manual_models)
29bf223249e29942803cef8468dbd8bd04979e81
3,643,473
def player_stats_game(data) -> defaultdict: """Individual Game stat parser. Directs parsing to the proper player parser (goalie or skater). Receives the player_id branch. Url.GAME Args: data (dict): dict representing JSON object. Returns: defaultdict: Parsed Data. """ # if the stats dict is empty it means they're scratched if not data['stats']: return None if data['position']['abbreviation'] == 'G': return goalie_stats_game(data['stats']['goalieStats']) else: return skater_stats_game(data['stats']['skaterStats'])
e39e4e9fb4a3d06421639e9466d29724318484ef
3,643,474
def about(request): """ View function for about page """ return render( request, 'about.html', )
5bf7a52de1218718041ec7a05a749c623e19074e
3,643,475
def getTimeDeltaFromDbStr(timeStr: str) -> dt.timedelta: """Convert db time string in reporting software to time delta object Args: timeStr (str): The string that represents time, like 14:25 or 15:23:45 Returns: dt.timedelta: time delta that has hours and minutes components """ if pd.isnull(timeStr): return dt.timedelta(seconds=0) elif not(':' in timeStr): print('could parse time string {0}'.format(timeStr)) return dt.timedelta(seconds=0) else: try: timeSegs = timeStr.split(':') timeSegs = timeSegs[0:2] return dt.timedelta(hours=int(timeSegs[0]), minutes=int(timeSegs[1])) except: print('could parse time string {0}'.format(timeStr)) return dt.timedelta(seconds=0)
66a78192e6cbe5240a9131c2b18e4a42187a6024
3,643,476
def colorBool(v) -> str: """Convert True to 'True' in green and False to 'False' in red """ if v: return colored(str(v),"green") else: return colored(str(v),"red")
8c196bccc5bb1970cc752a495117bcc74ed4f8f6
3,643,477
from typing import List def bootstrap( tokens: List[str], measure: str = "type_token_ratio", window_size: int = 3, ci: bool = False, raw=False, ): """calculate bootstrap for lex diversity measures as explained in Evert et al. 2017. if measure='type_token_ratio' it calculates standardized type-token ratio :param ci: additionally calculate and return the confidence interval returns a tuple :param raw: return the raw results """ results = [] measures = dict( type_token_ratio=type_token_ratio, guiraud_r=guiraud_r, herdan_c=herdan_c, dugast_k=dugast_k, maas_a2=maas_a2, dugast_u=dugast_u, tuldava_ln=tuldava_ln, brunet_w=brunet_w, cttr=cttr, summer_s=summer_s, sichel_s=sichel_s, michea_m=michea_m, honore_h=honore_h, entropy=entropy, yule_k=yule_k, simpson_d=simpson_d, herdan_vm=herdan_vm, hdd=hdd, orlov_z=orlov_z, mtld=mtld, ) # tl_vs: txt_len, vocab_size # vs_fs: vocab_size, freq_spectrum # tl_vs_fs: txt_len, vocab_size, freq_spectrum # tl_fs: txt_len, freq_spectrum # t: tokens classes = dict( tl_vs=( "type_token_ratio", "guiraud_r", "herdan_c", "dugast_k", "maas_a2", "dugast_u", "tuldava_ln", "brunet_w", "cttr", "summer_s", ), vs_fs=("sichel_s", "michea_m"), tl_vs_fs=("honore_h", "herdan_vm", "orlov_z"), tl_fs=("entropy", "yule_k", "simpson_d", "hdd"), t=("mtld",), ) measure_to_class = {m: c for c, v in classes.items() for m in v} func = measures[measure] cls = measure_to_class[measure] for i in range(int(len(tokens) / window_size)): chunk = tokens[i * window_size : (i * window_size) + window_size] txt_len, vocab_size, freq_spectrum = preprocess(chunk, fs=True) if cls == "tl_vs": result = func(txt_len, vocab_size) elif cls == "vs_fs": result = func(vocab_size, freq_spectrum) elif cls == "tl_vs_fs": result = func(txt_len, vocab_size, freq_spectrum) elif cls == "tl_fs": result = func(txt_len, freq_spectrum) elif cls == "t": result = func(chunk) results.append(result) if raw: return results if ci: return (np.mean(results), _sttr_ci(results)) return np.mean(results)
d86cff5edd61698b1adee14a5c2fb800b4b76608
3,643,478
def by_label(move_data, value, label_name, filter_out=False, inplace=False): """ Filters trajectories points according to specified value and collum label. Parameters ---------- move_data : dataframe The input trajectory data value : The type_ of the feature values to be use to filter the trajectories Specifies the value used to filter the trajectories points label_name : String Specifes the label of the column used in the filtering filter_out : boolean, optional(false by default) If set to True, it will return trajectory points with feature value different from the value specified in the parameters The trajectories points with the same feature value as the one especifed in the parameters. inplace : boolean, optional(false by default) if set to true the original dataframe will be altered to contain the result of the filtering, otherwise a copy will be returned. Returns ------- move_data : dataframe or None Returns dataframe with trajectories points filtered by label. """ try: filter_ = move_data[label_name] == value if filter_out: filter_ = ~filter_ return move_data.drop(index=move_data[~filter_].index, inplace=inplace) except Exception as e: raise e
3d772f741539009b756744539f4a524e6ad402ea
3,643,479
import numpy def make_pyrimidine(residue, height = 0.4, scale = 1.2): """Creates vertices and normals for pyrimidines:Thymine Uracil Cytosine""" atoms = residue.atoms names = [name.split("@")[0] for name in atoms.name] idx=names.index('N1'); N1 = numpy.array(atoms[idx].coords) idx=names.index('C2'); C2 = numpy.array(atoms[idx].coords) idx=names.index('N3'); N3 = numpy.array(atoms[idx].coords) idx=names.index('C4'); C4 = numpy.array(atoms[idx].coords) idx=names.index('C5'); C5 = numpy.array(atoms[idx].coords) idx=names.index('C6'); C6 = numpy.array(atoms[idx].coords) N1_C2 = C2-N1 N1_C6 = C6-N1 C2_C6 = height*norm(C6-C2) normal = height*numpy.array(crossProduct(N1_C2, N1_C6, normal=True)) center = (N1+C2+N3+C4+C5+C6)/6.0 vertices = numpy.zeros((14,3), float) vertices[0] = scale*(C2 - normal - center) + center vertices[1] = scale*(N3 - normal - center) + center vertices[2] = scale*(C4 - normal - center) + center vertices[3] = scale*(C5 - normal - center) + center vertices[4] = scale*(C6 - normal - center) + center vertices[5] = scale*(C2 + normal - center) + center vertices[6] = scale*(N3 + normal - center) + center vertices[7] = scale*(C4 + normal - center) + center vertices[8] = scale*(C5 + normal - center) + center vertices[9] = scale*(C6 + normal - center) + center vertices[10] = scale*(N1 - C2_C6 - normal - center) + center vertices[11] = scale*(N1 - C2_C6 + normal - center) + center vertices[12] = scale*(N1 + C2_C6 + normal - center) + center vertices[13] = scale*(N1 + C2_C6 - normal - center) + center faces = numpy.array([[13,4,3,2,1,0,10], [11,5,6,7,8,9,12], [0,5,11,10,10,10,10], [1,6,5,0,0,0,0,], [2,7,6,1,1,1,1], [3,8,7,2,2,2,2], [4,9,8,3,3,3,3], [13,12,9,4,4,4,4]]) return vertices, faces
eac8e9bd0cc6abeefa5b8a6bad299ff6a0c6b9d8
3,643,480
def get_props(filepath, m_co2=22, m_poly=2700/123, N_A=6.022E23, sigma_co2=2.79E-8, sort=False): """ Computes important physical properties from the dft.input file, such as density of CO2 in the CO2-rich phase, solubility of CO2 in the polyol-rich phase, and specific volume of the polyol-rich phase. The dft.input file is structured as: p \t gsrho1b \t gsrho1a \t 10^-gsrho2b \t gsrho2a. PARAMETERS ---------- filepath : string Filepath to file containing densities and pressures (usually dft.input) m_co2 : float mass of one bead of CO2 in PC-SAFT model [amu/bead] (= Mw / N) m_poly : float mass of one bead of polyol in PC-SAFT model [amu/bead] (= Mw / N) N_A : float Avogadro's number (molecules per mol) sigma_co2 : float sigma parameter for co2 [cm] sort : bool If True, sorts solubility data in terms of increasing pressure RETURNS ------- p : list of floats pressures corresponding to the solubilities [MPa] props : tuple of lists of floats Tuple of physical properties calculated (lists of floats): rho_co2 : density of CO2 in CO2-rich phase [g/mL] solub : solubility of CO2 in polyol-rich phase [w/w] spec_vol : specific volume of polyol-rich phase [mL/g] """ # loads data data = np.genfromtxt(filepath, delimiter='\t') # extracts pressure [MPa] from first column p = data[:,0] # extracts the density of CO2 in the co2-rich phase [beads/sigma^3] rho_co2_v = data[:,1] # extracts the density of CO2 in the polyol-rich phase [beads/sigma^3] rho_co2_l = data[:,2] # extracts the density of polyol in the polyol-rich phase [beads/sigma^3] rho_poly_l = data[:,4] # conversions from beads/sigma^3 to g/mL conv_co2 = m_co2/N_A/sigma_co2**3 conv_poly = m_poly/N_A/sigma_co2**3 # computes density of CO2 in the CO2-rich phase [g/mL] rho_co2 = rho_co2_v*conv_co2 # computes solubility of CO2 in the polyol-rich phase [w/w] solub = rho_co2_l*conv_co2 / (rho_co2_l*conv_co2 + rho_poly_l*conv_poly) # computes specific volume of the polyol-rich phase [mL/g] spec_vol = 1 / (rho_co2_l*conv_co2 + rho_poly_l*conv_poly) # sorts data if requested if sort: inds_sort = np.argsort(p) p = p[inds_sort] rho_co2 = rho_co2[inds_sort] solub = solub[inds_sort] spec_vol = spec_vol[inds_sort] props = (rho_co2, solub, spec_vol) return p, props
2aec573795a40c6c95e19ea9ae531abca47128e8
3,643,481
def get_genotype(chrom, rsid): """ """ geno_path = ('/home/hsuj/lustre/geno/' 'CCF_1000G_Aug2013_Chr{0}.dose.double.ATB.RNASeq_MEQTL.txt') geno_gen = pd.read_csv(geno_path.format(str(chrom)), sep=" ", chunksize = 10000) for i in geno_gen: if rsid in i.index: break else: pass return(i)
6269aace777e5870e827152158ab70b73a44f401
3,643,482
import time def task_dosomething(storage): """ Task that gets launched to handle something in the background until it is completed and then terminates. Note that this task doesn't return until it is finished, so it won't be listening for Threadify pause or kill requests. """ # An important task that we want to run in the background. for i in range(10): print(i, end="") time.sleep(1) return False
9eabf3977c53932de8d775c21e4a1209003e0892
3,643,483
def highway(input_, size, num_layers=1, bias=-2.0, f=tf.nn.relu, scope='Highway'): """Highway Network (cf. http://arxiv.org/abs/1505.00387). t = sigmoid(Wy + b) z = t * g(Wy + b) + (1 - t) * y where g is nonlinearity, t is transform gate, and (1 - t) is carry gate. """ with tf.variable_scope(scope): for idx in xrange(num_layers): g = f(linear(input_, size, scope='highway_lin_%d' % idx)) t = tf.sigmoid(linear(input_, size, scope='highway_gate_%d' % idx) + bias) output = t * g + (1. - t) * input_ input_ = output return output
dd90cd6107d5d69596c18d46bbef990cec8b1112
3,643,484
import functools def convert_to_entry(func): """Wrapper function for converting dicts of entries to HarEnrty Objects""" @functools.wraps(func) def inner(*args, **kwargs): # Changed to list because tuple does not support item assignment changed_args = list(args) # Convert the dict (first argument) to HarEntry if isinstance(changed_args[0], dict): changed_args[0] = HarEntry(changed_args[0]) return func(*tuple(changed_args), **kwargs) return inner
a5be9b430a47cb9c0c448e8ba963538fd6a435dc
3,643,485
def transform(record: dict, key_ref: dict, country_ref: pd.DataFrame, who_coding: pd.DataFrame, no_update_phrase: pd.DataFrame): """ Apply transformations to OXCGRT records. Parameters ---------- record : dict Input record. key_ref : dict Reference for key mapping. country_ref : pd.DataFrame Reference for WHO accepted country names. who_coding : pd.DataFrame Reference for WHO coding. no_update_phrase : pd.DataFrame Reference for "no update" phrases. Returns ------- dict Record with transformations applied. """ # 1. generator function of new record with correct keys (shared) new_record = utils.generate_blank_record() # 2. replace data in new record with data from old record using column # reference (shared) record = utils.apply_key_map(new_record, record, key_ref) # 3. Assign unique ID (shared) # record = utils.assign_id(record) if record["prov_measure"] == "H8_Protection of elderly people": return None # 4. Handle date formatting record = utils.parse_date(record) # 8. replace sensitive country names record = utils.replace_sensitive_regions(record) # shift areas that should be countries. record = utils.replace_country(record, 'United States', 'Virgin Islands') # 7. Make manual country name changes record = utils.replace_conditional(record, 'country_territory_area', 'Virgin Islands', 'US Virgin Islands') record = utils.replace_conditional(record, 'country_territory_area', 'United States Virgin Islands', 'US Virgin Islands') record = utils.replace_conditional(record, 'country_territory_area', 'Eswatini', 'Swaziland') record = utils.replace_conditional(record, 'country_territory_area', 'South Korea', 'Korea') # 9. assign ISO code record['iso'] = countrycode(codes=record['country_territory_area'], origin='country_name', target='iso3c') # 10. check missing ISO check.check_missing_iso(record) # Remove records where there is no data in prov_subcategory if record['prov_subcategory'] == 0: return(None) # Removes information in flag variables for now record['prov_subcategory'] = int(record['prov_subcategory']) # 11. Join WHO accepted country names (shared) record = utils.assign_who_country_name(record, country_ref) record = financial_measures(record) # 12. Join who coding from lookup (shared) record = utils.assign_who_coding(record, who_coding) # 13. check for missing WHO codes (shared) check.check_missing_who_code(record) # 16. Add WHO PHSM admin_level values record = utils.add_admin_level(record) record = utils.remove_tags(record) # 17. Remove update records record = assign_comment_links(record) # Filter out records with "no update" phrases record = label_update_phrase(record, list(no_update_phrase['phrase'])) return(record)
2d115f8d64731c5ca88807845d09085b4f07acfd
3,643,486
from google.cloud import vision import io def detect_text(path): """Detects text in the file.""" client = vision.ImageAnnotatorClient() with io.open(path, 'rb') as image_file: content = image_file.read() image = vision.Image(content=content) response = client.text_detection(image=image) texts = response.text_annotations for text in texts: return text.description if response.error.message: raise Exception( '{}\nFor more info on error messages, check: ' 'https://cloud.google.com/apis/design/errors'.format( response.error.message))
6dea35d84f538322eed74c9c7c1f9d7a4882dd33
3,643,488
def file_util_is_ext(path, ext): """判断是否指定后缀文件,ext不包含点""" if file_util_get_ext(path) == ext: return True else: return False
27389af32333036b998a421ed35952705092ade6
3,643,489
def load_tract(repo, tract, patches=None, **kwargs): """Merge catalogs from forced-photometry coadds across available filters. Parameters -- tract: int Tract of sky region to load repo: str File location of Butler repository+rerun to load. patches: list of str List of patches. If not specified, will default to '0,0'--'7,7'. Returns -- Pandas DataFrame of merged catalog """ butler = Butler(repo) if patches is None: # Extract the patches for this tract from the skymap skymap = butler.get(datasetType='deepCoadd_skyMap') patches = ['%d,%d' % patch.getIndex() for patch in skymap[tract]] merged_tract_cat = pd.DataFrame() for patch in patches: this_patch_merged_cat = load_patch(butler, tract, patch, **kwargs) merged_tract_cat.append(this_patch_merged_cat) return merged_tract_cat
f989c947dec96426b15219ab224364c96f65d1fb
3,643,490
from datetime import datetime def calculate_delta(arg1, arg2): """ Calculates and returns a `datetime.timedelta` object representing the difference between arg1 and arg2. Arguments must be either both `datetime.date`, both `datetime.time`, or both `datetime.datetime`. The difference is absolute, so the order of the arguments doesn't matter. """ if arg1 > arg2: arg1, arg2 = arg2, arg1 if isinstance(arg1, datetime.date) and isinstance(arg1, datetime.date): return ( datetime.datetime(arg2.year, arg2.month, arg2.day) - datetime.datetime(arg1.year, arg1.month, arg1.day) ) if isinstance(arg1, datetime.time) and isinstance(arg1, datetime.time): return ( datetime.datetime(1, 1, 1, arg2.hour, arg2.minute, arg1.second) - datetime.datetime(1, 1, 1, arg1.hour, arg1.minute, arg1.second) ) if isinstance(arg1, datetime.datetime) and isinstance(arg1, datetime.datetime): return arg2 - arg1 raise TypeError( f'Cannot calculate delta between values of types ' f'{type(arg1)} and {type(arg2)} because they are not equivalent' )
f6b3f0b86bd73be7d1702ba8893cd70d99b0b321
3,643,491
import yaml def create_model_config(model_dir: str, config_path: str = None): """Creates a new configuration file in the model directory and returns the config.""" # read the config file config_content = file_io.read_file_to_string(root_dir(config_path)) # save the config file to the model directory write_model_config(model_dir, config_content) # load config config = yaml.safe_load(config_content) return config
c695ee36b6dec24ef17179adbf40e81aff708082
3,643,492
def get_deployment_physnet_mtu(): """Retrieves global physical network MTU setting. Plugins should use this function to retrieve the MTU set by the operator that is equal to or less than the MTU of their nodes' physical interfaces. Note that it is the responsibility of the plugin to deduct the value of any encapsulation overhead required before advertising it to VMs. Note that this function depends on the global_physnet_mtu config option being registered in the global CONF. :returns: The global_physnet_mtu from the global CONF. """ return cfg.CONF.global_physnet_mtu
161e7f87e2a68643f81e2b62061d65251a1249de
3,643,493
def _path(path): """Helper to build an OWFS path from a list""" path = "/" + "/".join(str(x) for x in path) return path.encode("utf-8") + b"\0"
d38937deb459bb9bf393402efc31a90a285d4a6d
3,643,494
import time def current_milli_time(): """Return the current time in milliseconds""" return int(time.time() * 1000)
66605d2e23df2c428c70af75247e2b22a2795363
3,643,495
from typing import Callable from typing import Sequence from typing import Dict from typing import Optional def _loo_jackknife( func: Callable[..., NDArray], nobs: int, args: Sequence[ArrayLike], kwargs: Dict[str, ArrayLike], extra_kwargs: Optional[Dict[str, ArrayLike]] = None, ) -> NDArray: """ Leave one out jackknife estimation Parameters ---------- func : callable Function that computes parameters. Called using func(*args, **kwargs) nobs : int Number of observation in the data args : list List of positional inputs (arrays, Series or DataFrames) kwargs : dict List of keyword inputs (arrays, Series or DataFrames) Returns ------- ndarray Array containing the jackknife results where row i corresponds to leaving observation i out of the sample """ results = [] for i in range(nobs): items = np.r_[0:i, i + 1 : nobs] args_copy = [] for arg in args: if isinstance(arg, (pd.Series, pd.DataFrame)): args_copy.append(arg.iloc[items]) else: args_copy.append(arg[items]) kwargs_copy = {} for k, v in kwargs.items(): if isinstance(v, (pd.Series, pd.DataFrame)): kwargs_copy[k] = v.iloc[items] else: kwargs_copy[k] = v[items] if extra_kwargs is not None: kwargs_copy.update(extra_kwargs) results.append(func(*args_copy, **kwargs_copy)) return np.array(results)
83e39e97e08ef4d16f2c48a084c5ed40d0fbc0ad
3,643,497
from Bio.SeqIO.QualityIO import solexa_quality_from_phred def _fastq_illumina_convert_fastq_solexa(in_handle, out_handle, alphabet=None): """Fast Illumina 1.3+ FASTQ to Solexa FASTQ conversion (PRIVATE). Avoids creating SeqRecord and Seq objects in order to speed up this conversion. """ # Map unexpected chars to null mapping = "".join([chr(0) for ascii in range(0, 64)] + [chr(64 + int(round(solexa_quality_from_phred(q)))) for q in range(0, 62 + 1)] + [chr(0) for ascii in range(127, 256)]) assert len(mapping) == 256 return _fastq_generic(in_handle, out_handle, mapping)
06422e23bb005756742207e63ec1d8dc603ba5b2
3,643,498
def pull_branch(c: InvokeContext, repo: Repo, directory: str, branch_name: str) -> CommandResult: """ Change to the repo directory and pull master. :argument c: InvokeContext :argument repo: Repo the repo to pull :argument directory: str the directory to change to :argument branch_name: str the branch to pull """ project_path = _generate_path(directory, repo.folder_name) cmd = f"cd {project_path} && git checkout {branch_name} && git pull" return _run_command(c, cmd)
5c21bdbbe91f5f82b40645a3449d373f6c464717
3,643,499
def sizeof_fmt(num, suffix='B'): """Return human readable version of in-memory size. Code from Fred Cirera from Stack Overflow: https://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size """ for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: if abs(num) < 1024.0: return "%3.1f%s%s" % (num, unit, suffix) num /= 1024.0 return "%.1f%s%s" % (num, 'Yi', suffix)
1aeace0d5ad8ca712704a8ee58e1e206e5e61b56
3,643,500
def readFPs(filepath): """Reads a list of fingerprints from a file""" try: myfile = open(filepath, "r") except: raise IOError("file does not exist:", filepath) else: fps = [] for line in myfile: if line[0] != "#": # ignore comments line = line.rstrip().split() fps.append(line[0]) return fps
96d483360c411a27a3b570875f61344ef4dae573
3,643,501
def validate_take_with_convert(convert, args, kwargs): """ If this function is called via the 'numpy' library, the third parameter in its signature is 'axis', which takes either an ndarray or 'None', so check if the 'convert' parameter is either an instance of ndarray or is None """ if isinstance(convert, ndarray) or convert is None: args = (convert,) + args convert = True validate_take(args, kwargs, max_fname_arg_count=3, method="both") return convert
dacaf4aa6fd5ff9fa577a217c0209d75785abbaf
3,643,502
from typing import List def load_operators_expr() -> List[str]: """Returns clip loads operators for std.Expr as a list of string.""" abcd = list(ascii_lowercase) return abcd[-3:] + abcd[:-3]
49ba476bebbb6b202b7021458e70e6b1fb927810
3,643,503
def findScanNumberString(s): """If s contains 'NNNN', where N stands for any digit, return the string beginning with 'NNNN' and extending to the end of s. If 'NNNN' is not found, return ''.""" n = 0 for i in range(len(s)): if s[i].isdigit(): n += 1 else: n = 0 if n == 4: return s[i-3:] return ''
fd5973383bcf8b74573408d95d4f0065dfbda32f
3,643,504
import urllib def parseWsUrl(url): """ Parses as WebSocket URL into it's components and returns a tuple (isSecure, host, port, resource, path, params). isSecure is a flag which is True for wss URLs. host is the hostname or IP from the URL. port is the port from the URL or standard port derived from scheme (ws = 80, wss = 443). resource is the /resource name/ from the URL, the /path/ together with the (optional) /query/ component. path is the /path/ component properly unescaped. params is the /query) component properly unescaped and returned as dictionary. :param url: A valid WebSocket URL, i.e. ws://localhost:9000/myresource?param1=23&param2=666 :type url: str :returns: tuple -- A tuple (isSecure, host, port, resource, path, params) """ parsed = urlparse.urlparse(url) if parsed.scheme not in ["ws", "wss"]: raise Exception("invalid WebSocket scheme '%s'" % parsed.scheme) if parsed.port is None or parsed.port == "": if parsed.scheme == "ws": port = 80 else: port = 443 else: port = int(parsed.port) if parsed.fragment is not None and parsed.fragment != "": raise Exception("invalid WebSocket URL: non-empty fragment '%s" % parsed.fragment) if parsed.path is not None and parsed.path != "": ppath = parsed.path path = urllib.unquote(ppath) else: ppath = "/" path = ppath if parsed.query is not None and parsed.query != "": resource = ppath + "?" + parsed.query params = urlparse.parse_qs(parsed.query) else: resource = ppath params = {} return (parsed.scheme == "wss", parsed.hostname, port, resource, path, params)
149db7e862f832baf7591fb173cd53d5259cfbba
3,643,505
def load_image(filename): """Loads an image, reads it and returns image size, dimension and a numpy array of this image. filename: the name of the image """ try: img = cv2.imread(filename) print("(H, W, D) = (height, width, depth)") print("shape: ",img.shape) h, w, d = img.shape print('this is the width', w) print('this is the height', h) #size = h * w except Exception as e: print(e) print ("Unable to load image") return img.shape, img
2f27d15cd12fcdf4656291a7349883e8d63ff7cf
3,643,506
def add_manipulable(key, manipulable): """ add a ArchipackActiveManip into the stack if not already present setup reference to manipulable return manipulators stack """ global manips if key not in manips.keys(): # print("add_manipulable() key:%s not found create new" % (key)) manips[key] = ArchipackActiveManip(key) manips[key].manipulable = manipulable return manips[key].stack
3d3709758a96edec261141291950d28d2079ae19
3,643,507
def get_wave_data_type(sample_type_id): """Creates an SDS type definition for WaveData""" if sample_type_id is None or not isinstance(sample_type_id, str): raise TypeError('sample_type_id is not an instantiated string') int_type = SdsType('intType', SdsTypeCode.Int32) double_type = SdsType('doubleType', SdsTypeCode.Double) # WaveData uses Order as the key, or primary index order_property = SdsTypeProperty('Order', True, int_type) tau_property = SdsTypeProperty('Tau', False, double_type) radians_property = SdsTypeProperty('Radians', False, double_type) sin_property = SdsTypeProperty('Sin', False, double_type) cos_property = SdsTypeProperty('Cos', False, double_type) tan_property = SdsTypeProperty('Tan', False, double_type) sinh_property = SdsTypeProperty('Sinh', False, double_type) cosh_property = SdsTypeProperty('Cosh', False, double_type) tanh_property = SdsTypeProperty('Tanh', False, double_type) # Create an SdsType for WaveData Class wave = SdsType(sample_type_id, SdsTypeCode.Object, [order_property, tau_property, radians_property, sin_property, cos_property, tan_property, sinh_property, cosh_property, tanh_property], 'WaveDataSample', 'This is a sample SDS type for storing WaveData type events') return wave
e86d693ac1405b7f440065cbc5eced33adcc666f
3,643,508
import random def _spec_augmentation(x, warp_for_time=False, num_t_mask=2, num_f_mask=2, max_t=50, max_f=10, max_w=80): """ Deep copy x and do spec augmentation then return it Args: x: input feature, T * F 2D num_t_mask: number of time mask to apply num_f_mask: number of freq mask to apply max_t: max width of time mask max_f: max width of freq mask max_w: max width of time warp Returns: augmented feature """ y = np.copy(x) max_frames = y.shape[0] max_freq = y.shape[1] # time warp if warp_for_time and max_frames > max_w * 2: center = random.randrange(max_w, max_frames - max_w) warped = random.randrange(center - max_w, center + max_w) + 1 left = Image.fromarray(x[:center]).resize((max_freq, warped), BICUBIC) right = Image.fromarray(x[center:]).resize((max_freq, max_frames - warped), BICUBIC) y = np.concatenate((left, right), 0) # time mask for i in range(num_t_mask): start = random.randint(0, max_frames - 1) length = random.randint(1, max_t) end = min(max_frames, start + length) y[start:end, :] = 0 # freq mask for i in range(num_f_mask): start = random.randint(0, max_freq - 1) length = random.randint(1, max_f) end = min(max_freq, start + length) y[:, start:end] = 0 return y
caa4a9010254e13be36e2359d7437cd9f2ced084
3,643,509
def deg2rad(x, dtype=None): """ Converts angles from degrees to radians. Args: x (Tensor): Angles in degrees. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the output Tensor. Returns: Tensor, the corresponding angle in radians. This is a tensor scalar if `x` is a tensor scalar. Raises: TypeError: if `x` is not a tensor. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore.numpy as np >>> x = np.asarray([1, 2, 3, -4, -5]) >>> output = np.deg2rad(x) >>> print(output) [ 0.01745329 0.03490658 0.05235988 -0.06981317 -0.08726647] """ _check_input_tensor(x) def convert(a): return a * pi / 180.0 return _apply_tensor_op(convert, x, dtype=dtype)
9e7ff9f5242e5b2eede27b06eb7eb64ba84bbc69
3,643,510
import math def point_in_ellipse(origin, point, a, b, pa_rad, verbose=False): """ Identify if the point is inside the ellipse. :param origin A SkyCoord defining the centre of the ellipse. :param point A SkyCoord defining the point to be checked. :param a The semi-major axis in arcsec of the ellipse :param b The semi-minor axis in arcsec of the ellipse :param pa_rad The position angle of the ellipse. This is the angle of the major axis measured in radians East of North (or CCW from the y axis). """ # Convert point to be in plane of the ellipse, accounting for distortions at high declinations p_ra_dist = (point.icrs.ra.degree - origin.icrs.ra.degree)* math.cos(origin.icrs.dec.rad) p_dec_dist = point.icrs.dec.degree - origin.icrs.dec.degree # Calculate the angle and radius of the test opoint relative to the centre of the ellipse # Note that we reverse the ra direction to reflect the CCW direction radius = math.sqrt(p_ra_dist**2 + p_dec_dist**2) diff_angle = (math.pi/2 + pa_rad) if p_dec_dist == 0 else math.atan(p_ra_dist / p_dec_dist) - pa_rad # Obtain the point position in terms of the ellipse major and minor axes minor = radius * math.sin(diff_angle) major = radius * math.cos(diff_angle) if verbose: print ('point relative to ellipse centre angle:{} deg radius:{:.4f}" maj:{:.2f}" min:{:.2f}"'.format(math.degrees(diff_angle), radius*3600, major*3600, minor*3600)) a_deg = a / 3600.0 b_deg = b / 3600.0 # Calc distance from origin relative to a and b dist = math.sqrt((major / a_deg) ** 2 + (minor / b_deg) ** 2) if verbose: print("Point %s is %f from ellipse %f, %f, %f at %s." % (point, dist, a, b, math.degrees(pa_rad), origin)) return round(dist,3) <= 1.0
9c4b056c205b8d25e80211adb0eeb1cdfaf4c11c
3,643,511
def isNumberString(value): """ Checks if value is a string that has only digits - possibly with leading '+' or '-' """ if not value: return False sign = value[0] if (sign == '+') or (sign == '-'): if len(value) <= 1: return False absValue = value[1:] return absValue.isdigit() else: if len(value) <= 0: return False else: return value.isdigit()
06feaab112e184e6a01c2b300d0e4f1a88f2250e
3,643,512
def vaseline(tensor, shape, alpha=1.0, time=0.0, speed=1.0): """ """ return value.blend(tensor, center_mask(tensor, bloom(tensor, shape, 1.0), shape), alpha)
75e61b21e9ffc1f13a8958ee92d0940596ae116b
3,643,513
from typing import Union from typing import Dict from typing import Any from typing import List def _func_length(target_attr: Union[Dict[str, Any], List[Any]], *_: Any) -> int: """Function for returning the length of a dictionary or list.""" return len(target_attr)
b66a883c763c93d9a62a7c09324ab8671d325d05
3,643,514
from typing import Optional def import_places_from_swissnames3d( projection: str = "LV95", file: Optional[TextIOWrapper] = None ) -> str: """ import places from SwissNAMES3D :param projection: "LV03" or "LV95" see http://mapref.org/CoordinateReferenceFrameChangeLV03.LV95.html#Zweig1098 :param file: path to local unzipped file. if provided, the `projection` parameter will be ignored. """ try: file = file or get_swissnames3d_remote_file(projection=projection) except HTTPError as error: return f"Error downloading {PLACE_DATA_URL}: {error}. " except ConnectionError: return f"Error connecting to {PLACE_DATA_URL}. " with file: count = get_csv_line_count(file, header=True) data = parse_places_from_csv(file, projection=projection) source_info = f"SwissNAMES3D {projection}" return save_places_from_generator(data, count, source_info)
cc90f3da95bf84ff3dd854de310a6690a28fd750
3,643,515
def _generate_data(size): """ For testing reasons only """ # return FeatureSpec('dummy', name=None, data='x' * size) return PlotSpec(data='x' * size, mapping=None, scales=[], layers=[])
62cbbe947b4d20726f24503c38b9ba2c5d8bdc82
3,643,517
def configuration_filename(feature_dir, proposed_splits, split, generalized): """Calculates configuration specific filenames. Args: feature_dir (`str`): directory of features wrt to dataset directory. proposed_splits (`bool`): whether using proposed splits. split (`str`): train split. generalized (`bool`): whether GZSL setting. Returns: `str` containing arguments in appropriate form. """ return '{}{}_{}{}.pt'.format( feature_dir, ('_proposed_splits' if proposed_splits else ''), split, '_generalized' if generalized else '', )
a3fc2c23746be7ed17f91820dd30a8156f91940c
3,643,518
import array def gammaBGRbuf( buf: array, gamma: float) -> array: """Apply a gamma adjustment to a BGR buffer Args: buf: unsigned byte array holding BGR data gamma: float gamma adjust Returns: unsigned byte array holding gamma adjusted BGR data """ applygammaBGRbuf(buf, gamma) return buf
2d32f2ae0f1aae12f2ed8597f99b5cd5547ea108
3,643,519
def sentence_avg_word_length(df, new_col_name, col_with_lyrics): """ Count the average word length in a dataframe lyrics column, given a column name, process it, and save as new_col_name Parameters ---------- df : dataframe new_col_name : name of new column col_with_lyric: column with lyrics Returns return dataframe with new column """ df[new_col_name] = df[col_with_lyrics].apply(_sentence_avg_word_length) return df
50dd7cb7145f5c6b39d3e8199f294b788ca361c0
3,643,520
def to_sigmas(t,p,w_1,w_2,w_3): """Given t = sin(theta), p = sin(phi), and the stds this computes the covariance matrix and its inverse""" p2 = p*p t2 = t*t tc2 = 1-t2 pc2 = 1-p2 tc= np.sqrt(tc2) pc= np.sqrt(pc2) s1,s2,s3 = 1./(w_1*w_1),1./(w_2*w_2),1./(w_3*w_3) a = pc2*tc2*s1 + t2*s2 + p2*tc2*s3 b = pc2*t2*s1 + tc2*s2 + p2*t2*s3 c = p2*s1 + pc2*s3 d = tc*t*(pc2*s1 - s2 + p2*s3) e = p*pc*tc*(s3 - s1) f = p*pc*t*(s3 - s1) sigma_inv = np.array([[a, d, e], [d, b, f], [e, f, c]]) sigma = np.array([[(b*c - f ** 2)/(a*b*c - c*d ** 2 - b*e ** 2 + 2*d*e*f - a*f ** 2), (-(c*d) + e*f)/(a*b*c - c*d ** 2 - b*e ** 2 + 2*d*e*f - a*f ** 2), (-(b*e) + d*f)/(a*b*c - c*d ** 2 - b*e ** 2 + 2*d*e*f - a*f ** 2)], [(-(c*d) + e*f)/(a*b*c - c*d ** 2 - b*e ** 2 + 2*d*e*f - a*f ** 2), (a*c - e ** 2)/(a*b*c - c*d ** 2 - b*e ** 2 + 2*d*e*f - a*f ** 2), (d*e - a*f)/(a*b*c - c*d ** 2 - b*e ** 2 + 2*d*e*f - a*f ** 2)], [(-(b*e) + d*f)/(a*b*c - c*d ** 2 - b*e ** 2 + 2*d*e*f - a*f ** 2), (d*e - a*f)/(a*b*c - c*d ** 2 - b*e ** 2 + 2*d*e*f - a*f ** 2), (a*b - d ** 2)/(a*b*c - c*d ** 2 - b*e ** 2 + 2*d*e*f - a*f ** 2)]]) return sigma,sigma_inv
e6144c8d3313e25cd701f703703309820c60032e
3,643,521
def fetch_atlas_pauli_2017(version='prob', data_dir=None, verbose=1): """Download the Pauli et al. (2017) atlas with in total 12 subcortical nodes. Parameters ---------- version: str, optional (default='prob') Which version of the atlas should be download. This can be 'prob' for the probabilistic atlas or 'det' for the deterministic atlas. data_dir : str, optional (default=None) Path of the data directory. Used to force data storage in a specified location. verbose : int verbosity level (0 means no message). Returns ------- sklearn.datasets.base.Bunch Dictionary-like object, contains: - maps: 3D Nifti image, values are indices in the list of labels. - labels: list of strings. Starts with 'Background'. - description: a short description of the atlas and some references. References ---------- https://osf.io/r2hvk/ `Pauli, W. M., Nili, A. N., & Tyszka, J. M. (2018). A high-resolution probabilistic in vivo atlas of human subcortical brain nuclei. Scientific Data, 5, 180063-13. http://doi.org/10.1038/sdata.2018.63`` """ if version == 'prob': url_maps = 'https://osf.io/w8zq2/download' filename = 'pauli_2017_labels.nii.gz' elif version == 'labels': url_maps = 'https://osf.io/5mqfx/download' filename = 'pauli_2017_prob.nii.gz' else: raise NotImplementedError('{} is no valid version for '.format(version) + \ 'the Pauli atlas') url_labels = 'https://osf.io/6qrcb/download' dataset_name = 'pauli_2017' data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) files = [(filename, url_maps, {'move':filename}), ('labels.txt', url_labels, {'move':'labels.txt'})] atlas_file, labels = _fetch_files(data_dir, files) labels = np.loadtxt(labels, dtype=str)[:, 1].tolist() fdescr = _get_dataset_descr(dataset_name) return Bunch(maps=atlas_file, labels=labels, description=fdescr)
c7dbf85de92c143a221d91c3dd6f452a4d79ee2f
3,643,522
import traceback def GeometricError(ref_point_1, ref_point_2): """Deprecation notice function. Please use indicated correct function""" print(GeometricError.__name__ + ' is deprecated, use ' + geometricError.__name__ + ' instead') traceback.print_stack(limit=2) return geometricError(ref_point_1, ref_point_2)
aefd3a21ffa7123401af7ac2b106bc4efde624b5
3,643,523
def svn_fs_open2(*args): """svn_fs_open2(char const * path, apr_hash_t fs_config, apr_pool_t result_pool, apr_pool_t scratch_pool) -> svn_error_t""" return _fs.svn_fs_open2(*args)
433e8fe01d5b6c3c7b66f8caa3c50e8386e99e92
3,643,524
def config(workspace): """Return a config object.""" return Config(workspace.root_uri, {})
6d02a61f4653742b90838a773458944a581f8ed4
3,643,525
from typing import List from typing import Optional def longest_sequence_index(sequences: List[List[XmonQubit]]) -> Optional[int]: """Gives the position of a longest sequence. Args: sequences: List of node sequences. Returns: Index of the longest sequence from the sequences list. If more than one longest sequence exist, the first one is returned. None is returned for empty list. """ if sequences: return max(range(len(sequences)), key=lambda i: len(sequences[i])) return None
32aafa324daea819e48bc14516a8532c110c0362
3,643,526
def subset_raster(rast, band=1, bbox=None, logger=None): """ :param rast: The rasterio raster object :param band: The band number you want to contour. Default: 1 :param bbox: The bounding box in which to generate contours. :param logger: The logger object to use for this tool :return: A dict with the keys 'raster', 'array', 'affine', 'min', and 'max'. Raster is the original rasterio object, array is the numpy array, affine is the transformation for the bbox, min/max are the min/max values within the bbox. """ # Affine transformations between raster and world coordinates. # See https://github.com/sgillies/affine # See https://github.com/mapbox/rasterio/blob/master/docs/windowed-rw.rst a = rast.affine # Convert from pixel coordinates to world coordinates reverse_affine = ~a # Convert from world coordinates to pixel coordinates # Copy the metadata kwargs = rast.meta.copy() # Read the band if bbox is not None: bbox = list(bbox) if len(bbox) != 4: logger.error('BBOX is not of length 4. Should be (xmin, ymin, xmax, ymax)') raise ValueError('BBOX is not of length 4. Should be (xmin, ymin, xmax, ymax)') # Restrict to the extent of the original raster if our requested # bbox is larger than the raster extent min_x = bbox[0] min_y = bbox[1] max_x = bbox[2] max_y = bbox[3] if min_x < rast.bounds[0]: min_x = rast.bounds[0] if min_y < rast.bounds[1]: min_y = rast.bounds[1] if max_x > rast.bounds[2]: max_x = rast.bounds[2] if max_y > rast.bounds[3]: max_y = rast.bounds[3] bbox = (min_x, min_y, max_x, max_y) # Convert the bounding box (world coordinates) to pixel coordinates # window = ((row_start, row_stop), (col_start, col_stop)) window_bl = world_to_pixel_coords(rast.affine, [(bbox[0], bbox[1]),]) window_tr = world_to_pixel_coords(rast.affine, [(bbox[2], bbox[3]),]) window_rows = [int(window_bl[0, 1]), int(window_tr[0, 1])] window_cols = [int(window_bl[0, 0]), int(window_tr[0, 0])] window = ( (min(window_rows), max(window_rows)), (min(window_cols), max(window_cols))) # print('') # print(window[0]) # print(window[1]) kwargs.update({ 'height': abs(window[0][1] - window[0][0]), 'width': abs(window[1][1] - window[1][0]), 'affine': rast.window_transform(window) }) else: window = None # Read the data but only the window we set rast_band = rast.read(band, window=window, masked=True) rast_a = kwargs['affine'] return { 'crs': rast.crs, 'array': rast_band, 'affine': rast_a, 'min': rast_band.min(), 'max': rast_band.max() }
62bb0bc292fa2a9d09dc746ce329394cf9dd2fcb
3,643,527
def extract_date_features(df): """Expand datetime values into individual features.""" for col in df.select_dtypes(include=['datetime64[ns]']): print(f"Now extracting features from column: '{col}'.") df[col + '_month'] = pd.DatetimeIndex(df[col]).month df[col + '_day'] = pd.DatetimeIndex(df[col]).day df[col + '_weekday'] = pd.DatetimeIndex(df[col]).weekday df.drop(columns=[col], inplace=True) print("Done!") return df
8726cf0d160de11dfbad701d6a0c7fb3113691f6
3,643,528
import copy def record_setitem(data, attr, value): """Implement `record_setitem`.""" data2 = copy(data) py_setattr(data2, attr, value) return data2
52af700d8d282a411e37de83a7ddfab7f3b9de82
3,643,529
from typing import Optional def get_git_branch() -> Optional[str]: """Get the git branch.""" return _run("git", "branch", "--show-current")
dee21ab7e6d9800160e161ae32fad3f9c6c6a8fb
3,643,530
def open_image(path, verbose=True, squeeze=False): """ Open a NIfTI-1 image at the given path. The image might have an arbitrary number of dimensions; however, its first three axes are assumed to hold its spatial dimensions. Parameters ---------- path : str The path of the file to be loaded. verbose : bool, optional If `True` (default), print some meta data of the loaded file to standard output. squeeze : bool, optional If `True`, remove trailing dimensions of the image volume if they contains a single entry only (default is `False`). Note that in this case it has not been tested whether the coordinate transformations from the NIfTI-1 header still apply. Returns ------- Volume The resulting 3D image volume, with the ``src_object`` attribute set to the respective ``nibabel.nifti1.Nifti1Image`` instance and the desired anatomical world coordinate system ``system`` set to "RAS". Relies on the NIfTI header's `get_best_affine()` method to dermine which transformation matrix to use (qform or sform). Raises ------ IOError If something goes wrong. """ # According to the NIfTI-1 specification [1]_, the world coordinate system of NIfTI-1 files is always RAS. src_system = "RAS" try: src_object = nibabel.nifti1.load(path) except Exception as e: raise IOError(e) voxel_data = np.asanyarray(src_object.dataobj) if isinstance(voxel_data, np.memmap): voxel_data.mode = "c" # Make sure that no changes happen to data on disk: copy on write hdr = src_object.header ndim = hdr["dim"][0] if ndim < 3: raise IOError("Currently only 3D images can be handled. The given image has {} dimension(s).".format(ndim)) if verbose: print("Loading image:", path) print("Meta data:") print(hdr) print("Image dimensions:", voxel_data.ndim) # Squeeze superfluous dimensions (according to the NIfTI-1 specification [1]_, the spatial dimensions are always # in front) if squeeze: voxel_data = __squeeze_dim(voxel_data, verbose) mat = hdr.get_best_affine() volume = Volume(src_voxel_data=voxel_data, src_transformation=mat, src_system=src_system, src_spatial_dimensions=(0, 1, 2), system="RAS", src_object=src_object) return volume
217522c5ea45b9c1cbff8053dc9668cf5473c709
3,643,531
def add_one_for_ordered_traversal(graph, node_idx, current_path=None): """ This recursive function returns an ordered traversal of a molecular graph. This traversal obeys the following rules: 1. Locations may only be visited once 2. All locations must be visted 3. Locations are visited in the order in which the shortest path is followed - If potential paths are identical in length, then the one that provides lightest total weight is followed - If the total weight of each path is identical (which would be the case for a molecule that contains any cycle) then the path the provides the lightest first atom is chosen - If the lightest first atom is identical, then............. Recursive algorithm works as follows: 1. Go from node to node until reaching a node that has no neighbors. 2. Once this node is reached, it returns itself back up the stack. 3. If a node only has a single path, this is also immediately returned up the stack. 4. Once a node is reach that has two possible paths, a choice is made between the two competing paths. The path that is the shortest is automatically chosen... But this is actually not what I want. What I want is that the path leading down is fully traversed and then the path that provides the lightest direction is gone down first If both paths are then equal in weight (such as should be the case for a cycle) then the the path that provides the most direct route to the heaviest group will be prefered. If the paths are completely identical, then it should not matter which one is chosen first from the perspective of a graph. """ if current_path == None: current_path = [] ### Make copy of input current_path current_path = [x for x in current_path] path = [node_idx] current_path += [node_idx] neighbors = graph.adj[node_idx] ### Build entire traversal list neigh_path_list = [] for entry in neighbors: # print(node_idx, entry) if entry in current_path: continue neigh_path = add_one_for_ordered_traversal(graph, entry, current_path) if len(neigh_path) > 0: neigh_path_list.append(neigh_path) # print(node_idx, entry, neigh_path) ### Only a single option if len(neigh_path_list) == 1: if len(neigh_path_list[0]) == 1: path += neigh_path_list[0] return path elif len(neigh_path_list) == 0: return [node_idx] ### If there's more than single option, then an algorithm that seeks ### to stich together the neighbor paths in a reasonable and unique way ### should be used neigh_list_sorted = _sort_neighbor_path_list(graph, neigh_path_list) # print("SORTED: ", neigh_list_sorted) path += neigh_list_sorted return path
1c923d07c6ca57d47c900fd2cc05470c4a0eef86
3,643,532
import json def get_subject_guide_for_section_params( year, quarter, curriculum_abbr, course_number, section_id=None): """ Returns a SubjectGuide model for the passed section params: year: year for the section term (4-digits) quarter: quarter (AUT, WIN, SPR, or SUM) curriculum_abbr: curriculum abbreviation course_number: course number section_id: course section identifier (optional) """ quarter = quarter.upper()[:3] url = "{}/{}/{}/{}/{}/{}/{}".format( subject_guide_url_prefix, 'course', year, quarter, quote(curriculum_abbr.upper()), course_number, section_id.upper()) headers = {'Accept': 'application/json'} response = SubjectGuideDao.getURL(url, headers) response_data = str(response.data) if response.status != 200: raise DataFailureException(url, response.status, response_data) return _subject_guide_from_json(json.loads(response.data))
fe22c43685eb36e3a0849c198e6e5621e763b7a3
3,643,534
def dense_reach_bonus(task_rew, b_pos, arm_pos, max_reach_bonus=1.5, reach_thresh=.02, reach_multiplier=all_rew_reach_multiplier): """ Convenience function for adding a conditional dense reach bonus to an aux task. If the task_rew is > 1, this indicates that the actual task is complete, and instead of giving a reach bonus, the max amount of reward given for a reach should be given (regardless of whether reach is satisfied). If it is < 1, a dense reach reward is given, and the actual task reward is given ONLY if the reach condition is satisfied. """ if task_rew > 1: total_rew = task_rew + reach_multiplier * max_reach_bonus else: reach_rew = close(reach_thresh, b_pos, arm_pos, close_rew=max_reach_bonus) new_task_rew = task_rew * int(reach_rew > 1) total_rew = reach_multiplier * reach_rew + new_task_rew return total_rew
ac1b53836a2a1fd9a4cf7c725222f0e053d65ddb
3,643,535
import re def getAllNumbers(text): """ This function is a copy of systemtools.basics.getAllNumbers """ if text is None: return None allNumbers = [] if len(text) > 0: # Remove space between digits : spaceNumberExists = True while spaceNumberExists: text = re.sub('(([^.,0-9]|^)[0-9]+) ([0-9])', '\\1\\3', text, flags=re.UNICODE) if re.search('([^.,0-9]|^)[0-9]+ [0-9]', text) is None: spaceNumberExists = False numberRegex = '[-+]?[0-9]+[.,][0-9]+|[0-9]+' allMatchIter = re.finditer(numberRegex, text) if allMatchIter is not None: for current in allMatchIter: currentFloat = current.group() currentFloat = re.sub("\s", "", currentFloat) currentFloat = re.sub(",", ".", currentFloat) currentFloat = float(currentFloat) if currentFloat.is_integer(): allNumbers.append(int(currentFloat)) else: allNumbers.append(currentFloat) return allNumbers
42d45d6bb7a5ae1b25d2da6eadb318c3388923d6
3,643,536
def optimal_string_alignment_distance(s1, s2): """ This is a variation of the Damerau-Levenshtein distance that returns the strings' edit distance taking into account deletion, insertion, substitution, and transposition, under the condition that no substring is edited more than once. Args: s1 (str): Sequence 1. s2 (str): Sequence 2. Returns: float: Optimal String Alignment Distance. Examples: >>> rltk.optimal_string_alignment_distance('abcd', 'acbd') 1 >>> rltk.optimal_string_alignment_distance('ca', 'abc') 3 """ utils.check_for_none(s1, s2) utils.check_for_type(str, s1, s2) # s1 = utils.unicode_normalize(s1) # s2 = utils.unicode_normalize(s2) n1, n2 = len(s1), len(s2) dp = [[0] * (n2 + 1) for _ in range(n1 + 1)] for i in range(0, n1 + 1): dp[i][0] = i for j in range(0, n2 + 1): dp[0][j] = j for i in range(1, n1 + 1): for j in range(1, n2 + 1): cost = 0 if s1[i - 1] == s2[j - 1] else 1 dp[i][j] = min(dp[i][j - 1] + 1, dp[i - 1][j] + 1, dp[i - 1][j - 1] + cost) if (i > 1 and j > 1 and s1[i - 1] == s2[j - 2] and s1[i - 2] == s2[j - 1]): dp[i][j] = min(dp[i][j], dp[i - 2][j - 2] + cost) return dp[n1][n2]
9c05cfd3217619e76dd1e6063aa1aa689dc1a0ef
3,643,537
def test_sanitize_callable_params(): """Callback function are not serializiable. Therefore, we get them a chance to return something and if the returned type is not accepted, return None. """ opt = "--max_epochs 1".split(" ") parser = ArgumentParser() parser = Trainer.add_argparse_args(parent_parser=parser) params = parser.parse_args(opt) def return_something(): return "something" params.something = return_something def wrapper_something(): return return_something params.wrapper_something_wo_name = lambda: lambda: "1" params.wrapper_something = wrapper_something params = _convert_params(params) params = _flatten_dict(params) params = _sanitize_callable_params(params) assert params["gpus"] == "None" assert params["something"] == "something" assert params["wrapper_something"] == "wrapper_something" assert params["wrapper_something_wo_name"] == "<lambda>"
d2a553a3c347d5ef0a2be10b21af6920a50697fb
3,643,538
import six def get_url(bucket_name, filename): """ Gets the uri to the object. """ client = storage.Client() bucket = client.bucket(bucket_name) blob = bucket.blob(filename) url = blob.public_url if isinstance(url, six.binary_type): url = url.decode('utf-8') return url
15e2d5ae5cfdfeb9794c9cfef1feecbc0f1e4183
3,643,539
def distance(): """ Return a random value of FRB distance, choosen from a range of observed FRB distances. - Args: None. - Returns: FRB distance in meters """ dist_m = np.random.uniform(6.4332967e24,1.6849561e26) return dist_m
c38cfa7878020bafd9fa1cafef962ed2b91bc804
3,643,540
def p10k(n, empty="-"): """ Write number as parts per ten thousand. """ if n is None or np.isnan(n): return empty elif n == 0: return "0.0‱" elif np.isinf(n): return _("inf") if n > 0 else _("-inf") return format_number(10000 * n) + "‱"
6d0ff6e5b48c62ad10207c0f8a72595201042ef4
3,643,541
from typing import Any def output_file(filename: str, *codecs: Codec, **kwargs: Any) -> Output: """ A shortcut to create proper output file. :param filename: output file name. :param codecs: codec list for this output. :param kwargs: output parameters. :return: configured ffmpeg output. """ return Output(output_file=filename, codecs=list(codecs), **kwargs)
c467331d5a2773a014f52326872b7999bf17547c
3,643,542
import warnings def convert_topology(topology, model_name, doc_string, target_opset, channel_first_inputs=None, options=None, remove_identity=True, verbose=0): """ This function is used to convert our Topology object defined in _parser.py into a ONNX model (type: ModelProto). :param topology: The Topology object we are going to convert :param model_name: GraphProto's name. Let "model" denote the returned model. The string "model_name" would be assigned to "model.graph.name." :param doc_string: A string attached to the produced model :param target_opset: number or dictionary, for example, 7 for ONNX 1.2, and 8 for ONNX 1.3, a dictionary is used to indicate different opset for different domains :param options: see :ref:`l-conv-options` :param remove_identity: removes identity nodes include '1.1.2', '1.2', and so on. :param verbose: displays information while converting :return: a ONNX ModelProto """ if target_opset is None: target_opset = get_latest_tested_opset_version() if isinstance(target_opset, dict): onnx_target_opset = target_opset.get( '', get_latest_tested_opset_version()) else: onnx_target_opset = target_opset if onnx_target_opset > get_opset_number_from_onnx(): found = get_opset_number_from_onnx() raise RuntimeError( "Parameter target_opset {} > {} is higher than the " "version of the installed onnx package. See " "https://github.com/onnx/onnx/blob/master/docs/" "Versioning.md#released-versions" ".".format(onnx_target_opset, found)) if onnx_target_opset > get_latest_tested_opset_version(): warnings.warn( "Parameter target_opset {} > {} is higher than the " "the latest tested version" ".".format( onnx_target_opset, get_latest_tested_opset_version())) container = ModelComponentContainer( target_opset, options=options, registered_models=topology.registered_models, white_op=topology.raw_model._white_op, black_op=topology.raw_model._black_op, verbose=verbose) # Traverse the graph from roots to leaves # This loop could eventually be parallelized. topology.convert_operators(container=container, verbose=verbose) container.ensure_topological_order() if len(container.inputs) == 0: raise RuntimeError("No detected inputs after conversion.") if len(container.outputs) == 0: raise RuntimeError("No detected outputs after conversion.") if verbose >= 2: print("---NODES---") for node in container.nodes: print(" %s - %s: %r -> %r" % ( node.op_type, node.name, node.input, node.output)) # Create a graph from its main components if container.target_opset_onnx < 9: # When calling ModelComponentContainer's add_initializer(...), # nothing is added into the input list. However, for ONNX target # opset < 9, initializers should also be a part of model's # (GraphProto) inputs. Thus, we create ValueInfoProto objects # from initializers (type: TensorProto) directly and then add # them into model's input list. extra_inputs = [] # ValueInfoProto list of the initializers for tensor in container.initializers: # Sometimes (especially when creating optional input values # such as RNN's initial hidden state), an initializer is also # one of the original model's input, so it has been added into # the container's input list. If this is the case, we need to # skip one iteration to avoid duplicated inputs. if tensor.name in [value_info.name for value_info in container.inputs]: continue # Initializers are always tensors so we can just call # make_tensor_value_info(...). value_info = make_tensor_value_info( tensor.name, tensor.data_type, tensor.dims) extra_inputs.append(value_info) # Before ONNX opset 9, initializers were needed to be passed in # with inputs. graph = make_graph(container.nodes, model_name, container.inputs + extra_inputs, container.outputs, container.initializers) else: # In ONNX opset 9 and above, initializers are included as # operator inputs and therefore do not need to be passed as # extra_inputs. graph = make_graph( container.nodes, model_name, container.inputs, container.outputs, container.initializers) # Add extra information related to the graph graph.value_info.extend(container.value_info) # Create model onnx_model = make_model(graph) # Update domain version opv = min(onnx_target_opset, _get_main_opset_version(onnx_model) or onnx_target_opset) if not _update_domain_version(container, onnx_model, verbose=verbose): # Main opset was not added. Doing it here. op_set = onnx_model.opset_import.add() op_set.domain = '' op_set.version = opv if verbose > 0: print('[convert_topology] +opset: name=%r, version=%s' % ( '', opv)) # Add extra information irv = OPSET_TO_IR_VERSION.get(opv, onnx_proto.IR_VERSION) onnx_model.ir_version = irv onnx_model.producer_name = utils.get_producer() onnx_model.producer_version = utils.get_producer_version() onnx_model.domain = utils.get_domain() onnx_model.model_version = utils.get_model_version() onnx_model.doc_string = doc_string # Removes many identity nodes, # the converter may introduct identity nodes # after a zipmap operator and onnx <= 1.7 does not # support that. It does not use onnxconverter-common # as the optimizer only support opset >= 9. if remove_identity: onnx_model = onnx_remove_node_identity(onnx_model) return onnx_model
139efc34473518b0403cd0bdbfc85b0b2715d576
3,643,544
def multivariate_logrank_test(event_durations, groups, event_observed=None, alpha=0.95, t_0=-1, suppress_print=False, **kwargs): """ This test is a generalization of the logrank_test: it can deal with n>2 populations (and should be equal when n=2): H_0: all event series are from the same generating processes H_A: there exist atleast one group that differs from the other. Parameters: event_durations: a (n,) numpy array the (partial) lifetimes of all individuals groups: a (n,) numpy array of unique group labels for each individual. event_observed: a (n,) numpy array of event observations: 1 if observed death, 0 if censored. Defaults to all observed. alpha: the level of signifiance desired. t_0: the final time to compare the series' up to. Defaults to all. suppress_print: if True, do not print the summary. Default False. kwargs: add keywords and meta-data to the experiment summary. Returns: summary: a print-friendly summary of the statistical test p_value: the p-value test_result: True if reject the null, (pendantically) None if we can't reject the null. """ if event_observed is None: event_observed = np.ones((event_durations.shape[0], 1)) n = max(event_durations.shape) assert n == max(event_durations.shape) == max(event_observed.shape), "inputs must be of the same length." groups, event_durations, event_observed = map(lambda x: pd.Series(np.reshape(x, (n,))), [groups, event_durations, event_observed]) unique_groups, rm, obs, _ = group_survival_table_from_events(groups, event_durations, event_observed, np.zeros_like(event_durations), t_0) n_groups = unique_groups.shape[0] # compute the factors needed N_j = obs.sum(0).values n_ij = (rm.sum(0).values - rm.cumsum(0).shift(1).fillna(0)) d_i = obs.sum(1) n_i = rm.values.sum() - rm.sum(1).cumsum().shift(1).fillna(0) ev = n_ij.mul(d_i / n_i, axis='index').sum(0) # vector of observed minus expected Z_j = N_j - ev assert abs(Z_j.sum()) < 10e-8, "Sum is not zero." # this should move to a test eventually. # compute covariance matrix V_ = n_ij.mul(np.sqrt(d_i) / n_i, axis='index').fillna(1) V = -np.dot(V_.T, V_) ix = np.arange(n_groups) V[ix, ix] = V[ix, ix] + ev # take the first n-1 groups U = Z_j.ix[:-1].dot(np.linalg.pinv(V[:-1, :-1]).dot(Z_j.ix[:-1])) # Z.T*inv(V)*Z # compute the p-values and tests test_result, p_value = chisq_test(U, n_groups - 1, alpha) summary = pretty_print_summary(test_result, p_value, U, t_0=t_0, test='logrank', alpha=alpha, null_distribution='chi squared', df=n_groups - 1, **kwargs) if not suppress_print: print(summary) return summary, p_value, test_result
2d433c4651828cc962a94802eae72e0ab68e7f0b
3,643,546
def ae(y, p): """Absolute error. Absolute error can be defined as follows: .. math:: \sum_i^n abs(y_i - p_i) where :math:`n` is the number of provided records. Parameters ---------- y : :class:`ndarray` One dimensional array with ground truth values. p : :class:`ndarray` One dimensional array with predicted values. Returns ------- float Absolute error as desribed above. """ return np.abs(y-p).sum()
6f08799429c561af37a941e0678ba0c147ba3a9c
3,643,547
def create_masks_from_plane(normal, dist, shape): """ Create a binary mask of given size based on a plane defined by its normal and a point on the plane (in voxel coordinates). Parameters ---------- dist: Distance of the plane to the origin (in voxel coordinates). normal: Normal of the plane (in voxel coordinates). shape: Shape of the mask that will be created. Returns ------- Binary mask of specified shape split in two by the given plane. """ grid_x, grid_y, grid_z = np.meshgrid(range(shape[0]), range(shape[1]), range(shape[2]), indexing='ij') position = np.column_stack((grid_x.ravel(order='F'), grid_y.ravel(order='F'), grid_z.ravel(order='F'))) # distance_from_plane = np.dot((position - np.transpose(point)), normal) distance_from_plane = np.dot(position, normal) + dist distance_vol = np.array(distance_from_plane).reshape((shape[0], shape[1], shape[2]), order='F') binary_mask = np.empty(distance_vol.shape, dtype=np.uint8) binary_mask[:, :, :] = distance_vol[:, :, :] >= 0 return binary_mask
c6f3995a12aa98f960364332195ac5caeb1d6fe4
3,643,548
from typing import List def untokenize(tokens: List[str], lang: str = "fr") -> str: """ Inputs a list of tokens output string. ["J'", 'ai'] >>> "J' ai" Parameters ---------- lang : string language code Returns ------- string text """ d = MosesDetokenizer(lang=lang) text: str = d.detokenize(tokens, unescape=False) return text
551ecf233b0869c4912b47ff1dee765647b07acc
3,643,549
def unwrap_cachable(func): """ Converts any HashableNodes in the argument list of a function into their standard node counterparts. """ def inner(*args, **kwargs): args, kwargs = _transform_by_type(lambda hashable: hashable.node, HashableNode, *args, **kwargs) return func(*args, **kwargs) return inner
40b8f4b62045808815c67f0a22b4d8b97c9fbb1e
3,643,551
def tuples_to_full_paths(tuples): """ For a set of tuples of possible end-to-end path [format is: (up_seg, core_seg, down_seg)], return a list of fullpaths. """ res = [] for up_segment, core_segment, down_segment in tuples: if not up_segment and not core_segment and not down_segment: continue if not _check_connected(up_segment, core_segment, down_segment): continue up_iof, up_hofs, up_mtu, up_exp = _copy_segment( up_segment, False, (core_segment or down_segment)) core_iof, core_hofs, core_mtu, core_exp = _copy_segment( core_segment, up_segment, down_segment) down_iof, down_hofs, down_mtu, down_exp = _copy_segment( down_segment, (up_segment or core_segment), False, cons_dir=True) args = [] for iof, hofs in [(up_iof, up_hofs), (core_iof, core_hofs), (down_iof, down_hofs)]: if iof: args.extend([iof, hofs]) path = SCIONPath.from_values(*args) if up_segment: up_core = list(reversed(list(up_segment.iter_asms()))) else: up_core = [] if core_segment: up_core += list(reversed(list(core_segment.iter_asms()))) if_list = _build_interface_list(up_core) if down_segment: down_core = list(down_segment.iter_asms()) else: down_core = [] if_list += _build_interface_list(down_core, cons_dir=True) mtu = _min_mtu(up_mtu, core_mtu, down_mtu) exp = min(up_exp, core_exp, down_exp) path_meta = FwdPathMeta.from_values(path, if_list, mtu, exp) res.append(path_meta) return res
f5b15e0e2483d194f6cf6c3eb8ec318aadd7b960
3,643,552
def _fileobj_to_fd(fileobj): """Return a file descriptor from a file object. Parameters: fileobj -- file object or file descriptor Returns: corresponding file descriptor Raises: ValueError if the object is invalid """ if isinstance(fileobj, int): fd = fileobj else: try: fd = int(fileobj.fileno()) except (AttributeError, TypeError, ValueError): raise ValueError('Invalid file object: {!r}'.format(fileobj) ) from None if fd < 0: raise ValueError('Invalid file descriptor: {}'.format(fd)) return fd
8b1bea4083c0ecf481c712c8b06c76257cea43db
3,643,553
def request_changes_pull_request(pull_request=None, body_or_reason=None): """ :param pull_request: :param body_or_reason: :return: """ if not pull_request: raise ValueError("you must provide pull request") if not body_or_reason: raise ValueError("you must provide request changes comment(s)") return pull_request.create_review(event=PULL_REQUEST_EVENT_REQUEST_CHANGES, body=body_or_reason)
6487b8b47a8a33882010083e97ebbd57b464311b
3,643,554
from typing import Callable from typing import Union from typing import Type from typing import Tuple def handle( func: Callable, exception_type: Union[Type[Exception], Tuple[Type[Exception]]], *args, **kwargs ): """ Call function with errors handled in cfpm's way. Before using this function, make sure all of func's errors are known and can exit saftly after an error is raised whithout cleaning up. Args: func: The function to be called. exception_type: The type(s) of the exceptions that can be handled safely. """ try: return func(*args, **kwargs) except exception_type as e: error(e)
d290fa4353a6e608b21464c33adc6f72675d9e6c
3,643,555