content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import os def list_files(dir_path): """List all sub-files in given dir_path.""" return [ os.path.join(root, f) for root, _, files in os.walk(dir_path) for f in files ]
9e9e86ce8f2665e09c2b8719cd9a0fa7ce8ee40d
691,731
import zipfile def extract_requirements_from_zip(sdist_filename): """Extract a file named **/*.egg-info/requires.txt in a .zip sdist. Returns bytes or None. """ with zipfile.ZipFile(sdist_filename, 'r') as f: for name in f.namelist(): if name.endswith('.egg-info/requires.txt'): return f.read(name) return None
f8d9f3d8ba4fca8fa904022a6713c36038bfca86
691,732
def root(): """Root Route""" return 'Recipes Analyzer'
434b5e5f683efb57c6ac2e70f329463d84918e09
691,733
def _find_gaps_split(datagap_times: list, existing_gap_times: list): """ helper for compare_and_find_gaps. A function to use in a loop to continue splitting gaps until they no longer include any existing gaps datagap_times = [[0,5], [30,40], [70, 82], [90,100]] existing_gap_times = [[10,15], [35,45], [75,80], [85,95]] split_dgtime = [[0, 5], [30, 40], [70, 75], [80, 82], [90, 100]] Parameters ---------- datagap_times list, list of two element lists (start time, end time) for the gaps found in the new data existing_gap_times list, list of two element lists (start time, end time) for the gaps found in the existing data Returns ------- list list of two element lists (start time, end time) for the new data gaps split around the existing data gaps """ split = False split_dgtime = [] for dgtime in datagap_times: for existtime in existing_gap_times: # datagap contains an existing gap, have to split the datagap if (dgtime[0] <= existtime[0] <= dgtime[1]) and (dgtime[0] <= existtime[1] <= dgtime[1]): split_dgtime.append([dgtime[0], existtime[0]]) split_dgtime.append([existtime[1], dgtime[1]]) split = True break if not split: split_dgtime.append(dgtime) else: split = False return split_dgtime
af1aaafa27725a033f9d34ff6f10c4288c9f96d9
691,734
def MRR(predictions, target): """ Compute mean reciprocal rank. :param predictions: 2d list [batch_size x num_candidate_paragraphs] :param target: 2d list [batch_size x num_candidate_paragraphs] :return: mean reciprocal rank [a float value] """ assert predictions.shape == target.shape assert predictions.ndim == target.ndim == 2 nrow, ncolumn = target.shape[0], target.shape[1] total_reciprocal_rank = 0 for i in range(nrow): for j in range(ncolumn): if target[i, predictions[i, j]] == 1: total_reciprocal_rank += 1.0 / (j + 1) break return total_reciprocal_rank / nrow
34b156fc3a38f23b6ad3ffae589c9afc773ec1ab
691,736
def isIn(obj, objs): """ Checks if the object is in the list of objects safely. """ for o in objs: if o is obj: return True try: if o == obj: return True except Exception: pass return False
0b19e6ac4d2ac2b290b0fe62bfd862c870708eac
691,737
def sem_of_rule(rule): """ Given a grammatical rule, this function returns the semantic part of it. """ return rule[1][1]
9746ad4c83e681f55c1497ea514637c293074b27
691,740
def htk_to_ms(htk_time): """ Convert time in HTK (100 ns) units to 5 ms """ if type(htk_time)==type("string"): htk_time = float(htk_time) return htk_time / 50000.0
5e177e8e5644e4171296826bc62b71f9803889a3
691,741
def find_diff_of_numbers(stat1, stat2): """ Finds the difference between two stats. If there is no difference, returns "unchanged". For ints/floats, returns stat1 - stat2. :param stat1: the first statistical input :type stat1: Union[int, float] :param stat2: the second statistical input :type stat2: Union[int, float] :return: the difference of the stats """ diff = "unchanged" if stat1 is None and stat2 is None: pass elif stat1 is None or stat2 is None: diff = [stat1, stat2] elif stat1 != stat2: diff = stat1 - stat2 return diff
b316c702e2a5d63a6dad4beac1dd59939a544aca
691,742
def set_column_constant_lists(): """ Return column lists so they only need to be calculated once """ feature_bases = ["goals", "shots", "shotsOnTarget", "corners", "fouls", "yellowCards", "redCards"] home_columns = [] away_columns = [] for for_against in ["for", "against"]: for feature_base in feature_bases: home_column = "home_{}_{}".format(feature_base, for_against) away_column = "away_{}_{}".format(feature_base, for_against) home_columns.append(home_column) away_columns.append(away_column) mean_columns = ["{}_mean".format(col) for col in home_columns + away_columns] premier_columns = ["{}_premier".format(col) for col in mean_columns] return (feature_bases, home_columns, away_columns, mean_columns, premier_columns)
17a386e62648ebbcc05c39c8055f39858e84f1e9
691,743
import struct import array import functools import operator import numpy def parse_idx(fd): """Parse an IDX file, and return it as a numpy array. https://github.com/datapythonista/mnist/blob/master/mnist/__init__.py Creadit: @datapythonista, Marc Garcia Parameters ---------- fd : file File descriptor of the IDX file to parse endian : str Byte order of the IDX file. See [1] for available options Returns ------- data : numpy.ndarray Numpy array with the dimensions and the data in the IDX file 1. https://docs.python.org/3/library/struct.html #byte-order-size-and-alignment """ DATA_TYPES = {0x08: 'B', # unsigned byte 0x09: 'b', # signed byte 0x0b: 'h', # short (2 bytes) 0x0c: 'i', # int (4 bytes) 0x0d: 'f', # float (4 bytes) 0x0e: 'd'} # double (8 bytes) header = fd.read(4) if len(header) != 4: raise RuntimeError('Invalid IDX file, ' 'file empty or does not contain a full header.') zeros, data_type, num_dimensions = struct.unpack('>HBB', header) if zeros != 0: raise RuntimeError('Invalid IDX file, ' 'file must start with two zero bytes. ' 'Found 0x%02x' % zeros) try: data_type = DATA_TYPES[data_type] except KeyError: raise RuntimeError('Unknown data type ' '0x%02x in IDX file' % data_type) dimension_sizes = struct.unpack('>' + 'I' * num_dimensions, fd.read(4 * num_dimensions)) data = array.array(data_type, fd.read()) data.byteswap() # looks like array.array reads data as little endian expected_items = functools.reduce(operator.mul, dimension_sizes) if len(data) != expected_items: raise RuntimeError('IDX file has wrong number of items. ' 'Expected: %d. Found: %d' % (expected_items, len(data))) return numpy.array(data).reshape(dimension_sizes)
f1f3e4805c495c72a443b76e4f983898be420522
691,744
def pylong_join(count, digits_ptr='digits', join_type='unsigned long'): """ Generate an unrolled shift-then-or loop over the first 'count' digits. Assumes that they fit into 'join_type'. (((d[2] << n) | d[1]) << n) | d[0] """ return ('(' * (count * 2) + "(%s)" % join_type + ' | '.join( "%s[%d])%s)" % (digits_ptr, _i, " << PyLong_SHIFT" if _i else '') for _i in range(count-1, -1, -1)))
b3cda375fc2fbc922fcb7ecb7a4faa7bc581f7d8
691,745
def wc(iterable): """ wc(iter: iterable) return size of "iter" args: iter = [[1,2], [2,3], [3,4]] iter = {} return: 3 0 """ i = 0 for x in iterable: i += 1 return i
165a95a2ea693e5902a4a4e5651228a86f24b17b
691,746
import argparse def parse_args(): """Parse command line arguments. """ a_p = argparse.ArgumentParser() a_p.add_argument('--ca_etcd', default="/EII/Certificates/rootca/cacert.pem", help='ca Certificate') a_p.add_argument('--etcd_root_cert', default="/EII/Certificates/root/root_client_certificate.pem", help='root cert') a_p.add_argument('--etcd_root_key', default="/EII/Certificates/root/root_client_key.pem", help='root key') a_p.add_argument('--etcd_endpoints', default="127.0.0.1:2379", help='etcd_host:port') return a_p.parse_args()
d2cc9cedc9ed0d007c79ee515a7e0a6216c7c874
691,747
def two_sum2(nums, target): """通过hash,把遍历过的数据存在字典中,对比时从字典取值 :param nums 被查找的列表 :type nums: list [int] :param target 要找到的目标数字 :type target: int :rtype: List[int] :return 两个数字的下标列表 """ hash_data = {} for val in nums: interval = target - val index = nums.index(val) if interval in hash_data: return [hash_data[interval], index] hash_data[val] = index return None
0b11c34f20ab498c186f333c32701abe72810a78
691,749
import hashlib def hexdigest(s): """ Returns the sha256 hexdigest of a string after encoding. """ return hashlib.sha256(s.encode("utf-8")).hexdigest()
b4ded415c5e7bdf970d51c5973ea9b658ef70fe0
691,750
def factor_first_event(match_info, event_list, team_key): """ Creates factor for an event in event_list Arguments: event_list: list of 'Event' objects team_key: string of the event type in the 'Team' object, e.g. 'firstTower' Returns: -1 if no event did not happen yet 0 if red team did event 1 if blue team did event """ if len(event_list) > 0: first_event_team = match_info['teams'][0][team_key] return int(first_event_team) else: return -1
0691915ddc4fd81775068fa6a1fcda341cbedc3d
691,753
def get_env_vars_snippet(env_vars): """Generates a Lambda CloudFormation snippet for the environment variables.""" snippet = "" if env_vars: snippet = \ """ Environment: Variables: """ for key, value in env_vars.iteritems(): snippet += '{}{}: "{}"\n'.format(" " * 10, key, value) return snippet
0a9d86a99e672926b19b9c2284df2b8e8ef1c177
691,754
def find_closest_pair(points): """Returns a list of Point objects that are closest together in the input list of Point objects.""" min_dist = -1 pair = [] for i in range(len(points)): for j in range(i+1, len(points)): p1 = points[i] p2 = points[j] if min_dist == -1: min_dist = p1.get_distance(p2) pair = [p1, p2] else: dist = p1.get_distance(p2) if dist < min_dist: min_dist = dist pair = [p1, p2] return pair
fbb254fa12b0067bfc87a4abf64cf9be67de4c4a
691,755
import re def str2num(s, tfunc=None): """Extracts numbers in a string. Parameters ---------- s : str The string. tfunc : None, optional formating function. Returns ------- list The number list. """ numstr = re.findall(r'-?\d+\.?\d*e*E?-?\d*', s) if tfunc is None: return numstr else: if tfunc == 'auto': numlist = [] for num in numstr: if num.find('.') > -1 or num.find('e') > -1: numlist.append(float(num)) else: numlist.append(int(num)) return numlist else: return [tfunc(i) for i in numstr]
aa735e99251ee681fd4eb94d160a5eaac13648e1
691,756
def escape_like(string, escape_char="\\"): """ Escape the string paremeter used in SQL LIKE expressions. :: from sqlalchemy_utils import escape_like query = session.query(User).filter( User.name.ilike(escape_like('John')) ) :param string: a string to escape :param escape_char: escape character """ return ( string.replace(escape_char, escape_char * 2) .replace("%", escape_char + "%") .replace("_", escape_char + "_") )
df8f805e50c5569910ad32b909db9a7db4b25b53
691,757
def create_bus(net, level, name, zone=None): """ Create a bus on a given network :param net: the given network :param level: nominal pressure level of the bus :param name: name of the bus :param zone: zone of the bus (default: None) :return: name of the bus """ try: assert level in net.LEVELS except AssertionError: msg = "The pressure level of the bus {} is not in {}".format(name, net.LEVELS) raise ValueError(msg) idx = len(net.bus.index) net.bus.loc[idx] = [name, level, zone, "NODE"] return name
920aab5009c387b53c92dbd8af64a8122abe18b3
691,758
def u2q(u1, u2, warnings=True): """ Convert the linear and quadratic terms of the quadratic limb-darkening parameterization -- called `u_1` and `u_2` in Kipping 2013 or `a` and `b` in Claret et al. 2013 -- and convert them to `q_1` and `q_2` as described in Kipping 2013: http://adsabs.harvard.edu/abs/2013MNRAS.435.2152K Parameters ---------- u1 : float Linear component of quadratic limb-darkening u2 : float Quadratic component of quadratic limb-darkening Returns ------- (q1, q2) : tuple of floats Kipping (2013) style quadratic limb-darkening parameters """ q1 = (u1 + u2)**2 q2 = 0.5*u1/(u1+u2) if warnings and (u1 < 0 or u2 < 0): print("WARNING: The quadratic limb-darkening parameters " + "u1={0:.3f} or u2={0:.3f} violate Kipping's ".format(u1, u2) + "conditions for a monotonically increasing or everywhere-" + "positive intensity profile. Returning them as is.") return q1, q2
baa934c792be8e0b72a9ede9a1431f356f9496fa
691,759
def lm1b(): """Sets up diffusion to run with LM1B.""" return { 'run_experiment.dataset_name': 'lm1b', 'datasets.load.max_length': 128, 'datasets.load.pack': True, 'discrete_diffusion_loss_fn.mask_padding': False, 'discrete_diffusion_loss_fn.normalize_without_padding': False, 'discrete_diffusion_predict_fn.mask_padding': False, }
8295280d4d67caec2cb23859c648a6036b8abbd2
691,760
def fixture_region(): """ Load the grid data from the sample earth_relief file. """ return [-52, -48, -22, -18]
00bbd32e6841c7f3fc7f991f83852619cb4b2ccb
691,761
from typing import OrderedDict def gen_trialList(*args): """Converts arbitrary number of lists into trialList datatype. trialList datatype is used to create TrialHandler. Args: (2-tuple): First value should be a list of objects (any type) used in trial. Second argument should be a string denoting name of the list. Returns: list of OrderedDict: used as keyword argument trialList in TrialHandler creation. """ if len(args)==0: raise Exception('You need to pass at least one argument.') for arg in args: if type(arg) is not tuple: raise TypeError(f'{arg} is not a tuple!') if len(arg) != 2: raise IndexError(f'{arg} should have length 2!') if type(arg[0]) is not list: raise TypeError(f'{type(arg[0])} should be a list!') if type(arg[1]) is not str: raise TypeError(f'{type(arg[1])} should be a string!') if len(set([len(arg[0]) for arg in args])) != 1: raise IndexError('All lists should be of same size.') trialList = [] keys = [arg[1] for arg in args] for x in zip(*tuple([arg[0] for arg in args])): trialList.append(OrderedDict([y for y in zip(keys, x)])) return trialList
ca4e4716b9688aa2513cc0d32162f013eb898f2f
691,762
def find_room_type(room_id, graph, namespace): """ returns the roomType of a edge in AgraphML """ for room in graph.findall(namespace + 'node'): if room_id == room.get('id'): data = room.findall(namespace + 'data') for d in data: if d.get('key') == 'roomType': return d.text
d0c11eb674957d0671c7b8ebdea7b845e2594062
691,763
def divide(x, y): """Divide 2 numbers""" return (x / y)
a46d9906da6f9c028ea3f3cb1db67c64775d0d07
691,765
import os def get_hip_file_path(filepath, hipify_caffe2): """ Returns the new name of the hipified file """ if not hipify_caffe2: return filepath dirpath, filename = os.path.split(filepath) filename_without_ext, ext = os.path.splitext(filename) if 'gpu' in filename_without_ext: filename_without_ext = filename_without_ext.replace('gpu', 'hip') else: filename_without_ext += '_hip' if ext == '.cu': ext = '.cc' return os.path.join(dirpath, 'hip', filename_without_ext + ext)
cdd3748a688338d668ab934823dedd6e8eb76a2c
691,766
def get_indexed_attestation_participants(spec, indexed_att): """ Wrapper around index-attestation to return the list of participant indices, regardless of spec phase. """ return list(indexed_att.attesting_indices)
5b37fe2628ec906879905da2ff9e433ac4bc16d3
691,768
def nths(x, n): """ Given a list of sequences, returns a list of all the Nth elements of all the contained sequences """ return [l[n] for l in x]
d37cf578d9fa7d1bdbabe951574b30ea2bb608eb
691,769
def fix_queryselector(elems): """Workaround for web components breaking querySelector.""" selectors = '").shadowRoot.querySelector("'.join(elems) return 'return document.querySelector("' + selectors + '")'
a742afcff9f8dc7e972ede32bb3b712b020a9ed7
691,770
def if_index(ifname): """ Return the interface index for *ifname* is present in the system :param str ifname: name of the network interface :returns: index of the interface, or None if not present """ try: with open("/sys/class/net/" + ifname + "/ifindex") as f: index = f.read().strip() return int(index) except IOError: raise IndexError("%s: network interface does not exist" % ifname)
824c8cdf01422c0b4c209833107fdad4b7fd2fc3
691,771
def build_keys_json_object(keys, blob_name, anchor_key, ground_truth_value, extracted_value, confidence, issuer_name, actual_accuracy, extracted_page_number): """ This function build the json object for the auto-labelling :param keys: The json object :param anchor_key: The field we are looking for :param blob_name: The name of the file we are processing :param ground_truth_value: The ground truth value for the field in questions :param confidence: The confidence score of the extracted value :param issuer_name: The unique identifier of the form :param actual_accuracy: The score we have inferred by comparing with the GT data :param extracted_value: The value extracted from the invoice :param extracted_page_number: The document page number the value was extracted from :return: The appended json dict and the found keys list """ keys[issuer_name + ':' + blob_name].append({ 'key': anchor_key, 'groundTruthValue': ground_truth_value, 'extractedValue': extracted_value, 'confidence': confidence, 'actualAccuracy': actual_accuracy, 'pageNumber': extracted_page_number }) return keys
d5821d22a13a0c7f55eb9c1b2b438e31e34c8836
691,772
def all_target_and_background_names(experiment_proto): """Determine names of all molecules for which to calculate affinities. Args: experiment_proto: selection_pb2.Experiment describing the experiment. Returns: List of strings giving names of target molecules followed by background molecules. """ targets = set() backgrounds = set() for round_proto in experiment_proto.rounds.values(): for k, v in round_proto.target_concentrations.items(): # don't add targets with a concentration of exactly zero if v: targets.add(k) for k, v in round_proto.background_concentrations.items(): if v: backgrounds.add(k) return sorted(targets) + [b for b in sorted(backgrounds) if b not in targets]
90d701e1da0ee26b27e8fa9ac178131199d4c8ea
691,773
def get_cleaned_field_name(field_name): """returns cleared field name for flat database""" ret_val = None for i in range(len(field_name)): if field_name[i].isalpha(): ret_val = field_name[i:] break return ret_val
be1619eb44ad44a7c2409da516d946d3c99eabbd
691,774
import random def generate_name(start, markov_chain, max_words=2): """Generate a new town name, given a start syllable and a Markov chain. This function takes a single start syllable or a list of start syllables, one of which is then chosen randomly, and a corresponding Markov chain to generate a new fictional town name. The number of words in the name can optionally be passed in as an argument and defaults to 2 otherwise. Note that it is possible that the generated name already exists. To avoid that, one should check whether the name exists against the set of input names. """ while True: if isinstance(start, list): # If start is a list choose a syllable randomly next_syllable = random.choice(start) else: next_syllable = start # Initialise new name new_name = next_syllable while True: # Choose next syllable from the Markov chain next_syllable = random.choice(markov_chain[next_syllable]) # Return if end of word has been reached if next_syllable == 0: break else: new_name += next_syllable # Remove leading and trailing spaces new_name = new_name.strip() # Make sure name has less words than max_words, otherwise start over if len(new_name.split(" ")) <= max_words: break # Capitalise every word in the new name new_name = " ".join([word.capitalize() for word in new_name.split(" ")]) return new_name
dd40e0ad715bf8957d9bfcfc701997883766f7ca
691,775
def process_wildcard(composition): """ Processes element with a wildcard ``?`` weight fraction and returns composition balanced to 1.0. """ composition2 = composition.copy() wildcard_zs = set() total_wf = 0.0 for z, wf in composition.items(): if wf == "?": wildcard_zs.add(z) else: total_wf += wf if not wildcard_zs: return composition2 balance_wf = (1.0 - total_wf) / len(wildcard_zs) for z in wildcard_zs: composition2[z] = balance_wf return composition2
b2ab51b96c24fa80301a401bab5111ecfb77b4d0
691,776
def np_slice(matrix, axes={}): """ Slices a matrix along a specific axes Returns the new matrix """ return matrix[-3:, -3:]
c962db7a18e35056d61bf74c9f6dfc44161fc961
691,777
def sunion_empty(ls): """ return empty set if the list of sets (ls) is empty""" try: return set.union(*ls) except TypeError: return set()
4988820c60c6fa7bdb631bbe09d73f21a79dda9d
691,778
def tidyTermList(terms): """ Does a little bit of extra tidying to a term list Args: terms (list of strings): List of strings of terms Returns: list of augmente strings """ # Lower case everything (if not already done anyway) terms = [ t.lower().strip() for t in terms ] # Remove short terms terms = [ t for t in terms if len(t) > 3 ] # Remove terms that start with hyphens terms = [ t for t in terms if not t.startswith('-') ] # List of characters that are not allowed in terms filterChars = [ ',', '(', ')', '[', ']', '{', '}' ] # Filter out terms with a comma for char in filterChars: terms = [ t for t in terms if not char in t ] return terms
cca3b4eb6d8a85064adeedc9e344dcd027b9dcbc
691,779
import re def find_isomorphs(glycan): """returns a set of isomorphic glycans by swapping branches etc.\n | Arguments: | :- | glycan (string): glycan in IUPAC-condensed format\n | Returns: | :- | Returns list of unique glycan notations (strings) for a glycan in IUPAC-condensed """ out_list = [glycan] #starting branch swapped with next side branch if '[' in glycan and glycan.index('[') > 0 and not bool(re.search('\[[^\]]+\[', glycan)): glycan2 = re.sub('^(.*?)\[(.*?)\]', r'\2[\1]', glycan, 1) out_list.append(glycan2) #double branch swap temp = [] for k in out_list: if '][' in k: glycan3 = re.sub('(\[.*?\])(\[.*?\])', r'\2\1', k) temp.append(glycan3) #starting branch swapped with next side branch again to also include double branch swapped isomorphs temp2 = [] for k in temp: if '[' in k and k.index('[') > 0 and not bool(re.search('\[[^\]]+\[', k)): glycan4 = re.sub('^(.*?)\[(.*?)\]', r'\2[\1]', k, 1) temp2.append(glycan4) return list(set(out_list + temp + temp2))
770fe861318658348fa2607b7dda97847b664e5f
691,780
def clean_invalid_data(df): """Removes those samples in the dataframe where current and next day cases are 0 """ # Removing samples with 0 cases in previous day mask = df["Cases"]!=0 df = df.loc[mask] # Removing samples with 0 cases next day mask = df["NextDay"]!=0 df = df.loc[mask] # Removing samples with same amount on both days mask = df["NextDay"] != df["Cases"] df = df.loc[mask] # Let's ensure there're not object type columns df = df.infer_objects() return df
3fb005e77569507afa47c5e40088dc9a90e74913
691,781
import re def first_item_grabber(the_str: str, re_separator_ptn=";|\-|&#8211;|,|\|", def_return=None): """ From a string containing more than one item separated by separators, grab the first. >>> first_item_grabber("1987, A1899") '1987' >>> first_item_grabber("1987;A1899") '1987' >>> first_item_grabber("1916&#8211;1917[1915&#8211;1917]") '1916' """ ret_val = re.split(re_separator_ptn, the_str) if ret_val != []: ret_val = ret_val[0] else: ret_val = def_return return ret_val
1b332b28eed5043d0890e862fad884ab72bdf8c7
691,782
import os def replace_py_ipynb(fname): """Replace .py extension in filename by .ipynb""" fname_prefix, extension = os.path.splitext(fname) allowed_extension='.py' if extension != allowed_extension: raise ValueError( "Unrecognized file extension, expected %s, got %s" % (allowed_extension, extension)) new_extension = '.ipynb' return '{}{}'.format(fname_prefix, new_extension)
ec53ef420f2dfaa6721ba50fa72707821166e755
691,783
def getid(dev): """ Gets the id of a device. @return: id """ buf = bytes([0x00, 0x00, 0x00, 0x00, 0x00]); id = dev.ctrl_transfer(0xa1, 0x01, 0x0301, 0, buf, 500) if (len(id) == 0): return None ret = '' sep = '' for x in id: ret += sep ret += format(x, '02x') sep = ':' return ret
e3489362c4baf95e7521252b84107fafadcf5287
691,784
def get_word(word_type): """Get a word from a user and return that word.""" if word_type == 'adjetive': is_a_an = 'an' else: is_a_an = 'a' return input("Enter a word that is {0} {1}: ".format(is_a_an, word_type))
a4673bcd84b05a36ef69baecaa12f6eb0979c7a8
691,785
def xlsxExportAdd_tAB(Sheet,Data,rowoffset,coloffset,IName,UName,RName,FName,REName,ALabels,BLabels): """ This function exports a 3D array with aspects time, A, and B to a given excel sheet. Same as ExcelExportAdd_tAB but this function is for xlsx files with openpyxl. The t dimension is exported in one row, the A and B dimensions as several rows. Each row starts with IName (indicator), UName (unit), RName (region), FName (figure where data are used), REName (Resource efficiency scenario), and then come the values for the dimensions A and B and from coloffset onwards, the time dimension. Function is meant to be used multiple times, so a rowoffset is given, incremented, and returned for the next run. """ for m in range(0,len(ALabels)): for n in range(0,len(BLabels)): Sheet.cell(row=rowoffset, column=1).value = IName Sheet.cell(row=rowoffset, column=2).value = UName Sheet.cell(row=rowoffset, column=3).value = RName Sheet.cell(row=rowoffset, column=4).value = FName Sheet.cell(row=rowoffset, column=5).value = REName Sheet.cell(row=rowoffset, column=6).value = ALabels[m] Sheet.cell(row=rowoffset, column=7).value = BLabels[n] for t in range(0,Data.shape[0]): Sheet.cell(row=rowoffset, column=coloffset + t +1).value = Data[t,m,n] rowoffset += 1 return rowoffset
9df8d34ff24c07ceac5ca97d07001b02e6562bf4
691,786
import argparse def parser_params(): """Parse command line parameters including the config path and number of workers. """ parser = argparse.ArgumentParser( description= 'Code to convert single-scale (or a set of multi-scale) meshes to the neuroglancer multi-resolution mesh format' ) parser.add_argument( "config_path", type=str, help="Path to directory containing run-config.yaml and dask-config.yaml" ) parser.add_argument( '--num-workers', '-n', type=int, default=1, help= 'Number of workers to launch (i.e. each worker is launched with a single bsub command)' ) return parser.parse_args()
860ab112abf3cdca1939ca1a3005accce9001e9a
691,787
def exact_change_dynamic(amount,coins): """ counts[x] counts the number of ways an amount of x can be made in exact change out of a subset of coins given in the list of denominations 'coins'. Initially there are no possibilities, if no coins are allowed >>> exact_change_dynamic(20,[50,20,10,5,2,1]) [1, 1, 2, 2, 3, 4, 5, 6, 7, 8, 11, 12, 15, 16, 19, 22, 25, 28, 31, 34, 41] >>> exact_change_dynamic(100,[100,50,20,10,5,2,1])[-10:] [3229, 3376, 3484, 3631, 3778, 3925, 4072, 4219, 4366, 4563] """ counts = [0]*(amount+1) # Except: there is 1 way to get a change of 0 ct using no coins counts[0] = 1 # Recalculate counts by allowing additional denominations from coins one by one for denomination in coins: for x in range(denomination,amount+1): # Using an additional coin of 'denomination' we have an additional 'counts[x-denomination]' possibilities # counts[x] += counts[x-denomination] return counts
4a41b270451427a055a54afe346d7df8aa1874c9
691,788
def rubicon_and_project_client_with_experiments(rubicon_and_project_client): """Setup an instance of rubicon configured to log to memory with a default project with experiments and clean it up afterwards. Expose both the rubicon instance and the project. """ rubicon, project = rubicon_and_project_client for e in range(0, 10): experiment = project.log_experiment( tags=["testing"], commit_hash=str(int(e / 3)), training_metadata=("training", "metadata"), ) experiment.log_parameter("n_estimators", e + 1) experiment.log_feature("age") experiment.log_metric("accuracy", (80 + e)) return (rubicon, project)
bb35e31554d019ccf07131078736757c642354ab
691,790
def consoO(R,s,tau,w): """Compute the consumption of the old agents Args: R (float): gross return on saving s (float): savings tau (float): percentage of contribution of the wage of the young agent w (float): wage Returns: (float): consumption of the old agents """ return R*s+tau*w
522b6b51b50db29b60a5adc473d6cd9cc04a6a3a
691,791
def to_upper_camelcase(lower_underscore: str): """Convert underscore naming to upper camelcase. Example: rock_type --> RockType """ splits = lower_underscore.split("_") splits = [split.capitalize() for split in splits] return "".join(splits)
a0973c5b2c71e0df622cd6adc516459bf7896ea6
691,792
def normalize(tensor, mean, std): """Normalize a ``torch.tensor`` Args: tensor (torch.tensor): tensor to be normalized. mean: (list): the mean of BGR std: (list): the std of BGR Returns: Tensor: Normalized tensor. """ for t, m, s in zip(tensor, mean, std): t.sub_(m).div_(s) return tensor
2dea96d14fd52898bd967725d8805d1ab10ea7cd
691,793
def _read_band_number(file_path): """ :type file_path: Path :return: >>> _read_band_number(Path('reflectance_brdf_2.tif')) '2' >>> _read_band_number(Path('reflectance_terrain_7.tif')) '7' >>> p = Path('/tmp/something/LS8_OLITIRS_NBAR_P54_GALPGS01-002_112_079_20140126_B4.tif') >>> _read_band_number(p) '4' """ number = file_path.stem.split('_')[-1].lower() if number.startswith('b'): return number[1:] return number
e02594f32d87260231951df94bbe8e3d704ddc6b
691,794
def category_sort_key(category): """ Sort key for sorting categories. Parameters ---------- category : ``Category`` A command processor's category. Returns ------- sort_key : `str` The categories are sorted based on their display name. """ return category.display_name
5fe5c32a0cebc1155edcf8674acb438e9352a5fc
691,795
def min_max(x,axis=None): """ return min_max standalization x = (x-x.min)/(x.max-x.min) min=0 max=1 Parameters ------------------- x : numpy.ndarray(x,y) axis :int 0 #caliculate each col 1 # each row Returns -------------------- result : np.ndarray(x,y) """ xmin =x.min(axis=axis,keepdims=True) xmax =x.max(axis=axis,keepdims=True) result = (x-xmin)/(xmax-xmin) return result
a7a31bfdda1d6a21a8ee0fbe5148d6cdd53aa60b
691,796
def strength_of_measurement(current, total): """ :param current: int() checked objects for new optimization procedure :param total: int() total objects available in doc (this is approximation) :return: int() """ return (current * 100) / total
a1033f14ea0335df887b657aefd670f32d055ad7
691,798
from pathlib import Path def os_release(): """Return a dict containing a normalized version of /etc/os-release.""" lines = Path('/etc/os-release').read_text().strip().split('\n') os_info = dict([x.split('=', 1) for x in lines]) for k in os_info.keys(): if os_info[k].startswith('"') and os_info[k].endswith('"'): os_info[k] = os_info[k][1:-1] return os_info
727fe8df47bc2fb492d51b7dced44760149e62bc
691,799
def calc_fitness(fit_form, sum_energy, coef_energy, sum_rmsd, coef_rmsd): """Calculate the fitness of a pmem. Parameters ---------- fit_form : int Represents the fitness formula to use. The only value currently available is 0, where fitness = CE*SE + Crmsd*Srmsd. sum_energy : float The summation of all of the individual energy calculations for each of the geometries. coef_energy : float The energy coefficient in the fitness formula. sum_rmsd : float The summation of all rmsd when comparing pairs of geometries. coef_rmsd : float The rmsd coefficient in the fitness formula. Raises ------ ValueError The fit_form value is not available. Returns ------- fitness : float """ if fit_form == 0: return sum_energy*coef_energy + sum_rmsd*coef_rmsd raise ValueError("Unsupported fitness formula.")
7ac64e72dbbdf6caacad73f99061408d12f7df5e
691,800
def encode_sentences(sentences, lexicon_dictionary): """ Change words in sentences into their one-hot index. :param sentences: A list of sentences where all words are in lexicon_dictionary :param lexicon_dictionary: A dictionary including all the words in the dataset sentences are being drawn from. :return: sentences with each word replaced by a number. """ new_sentence = [] for word in sentences.split(): new_sentence.append(lexicon_dictionary[word]) return new_sentence
69af36f02b2b66198f54803072a340c93aaeb31f
691,802
def trim_method_name(full_name): """ Extract method/function name from its full name, e.g., RpcResponseResolver.resolveResponseObject -> resolveResponseObject Args: full_name (str): Full name Returns: str: Method/Function name """ point_pos = full_name.rfind('.') if point_pos != -1: return full_name[point_pos + 1:] else: return full_name
4783d19103822d68dfbc2c28a7d59acd041216f6
691,803
def permission_denied_page(error): """Show a personalized error message.""" return "Not Permitted", 403
b7009dc1a45f523f23082f6070bdef0a2bc6a353
691,804
def prepare_df_annoVar(df): """Prepare internal dataframe as input to ANNOVAR. Generates a list of all the column names, adding a repeat of position to give start and end, as required by ANNOVAR input format, then reorders the columns to ensure the first 5 are those required by ANNOVAR (chromosome, start position, end position, reference allele, observed allele.) See annovar.openbioinformatics.org/en/latest/user-guide/input/ for more details. """ # make a list of all column names; position repeats twice for input df['position2'] = df['position'] wanted = ['chromosome', 'position', 'position2','allele1', 'allele2'] colnames = df.columns # list comprehensions to identify first 5 column names final_colnames = [col for col in wanted if col in colnames] + [col for col in colnames if col not in wanted] # re-order dataframe according to final list of column names and return annot_input = df[final_colnames] return annot_input
21cf6cc2e884f5351b99ed7e5c6f2942dde6ad0d
691,805
def check_length(line, min=0, max=0): """Does a length check on the line Params: line (unicode) min (int) max (int) Returns true if length is ok """ status = True if min and status: status = len(line) >= min if max and status: status = len(line) < max return status
c0e4b79dc1caeaa94c2af7741f6a7113c0384abf
691,806
from typing import Union import pathlib def supplement_file_name(file: Union[str, pathlib.Path], sup: str) -> pathlib.Path: """ Adds a string between the file name in a path and the suffix. **Parameters** - `file` : str File name - `sup` : str String to be added **Returns** - `out`: pathlib.Path "Supplemented" file """ file = pathlib.Path(file) # the `suffix` is incorporated into the file name return file.with_name(file.stem + f'_{sup}' + file.suffix)
1cba9e55939a9c474d9d1a8fffda1023953a457d
691,807
import re def _translate_trace_all(program): """ Replaces the 'label_atoms' magic comments in the given program for label_atoms rule. @param str program: the program that is intended to be modified @return str: """ for hit in re.findall("(%!trace \{(.*)\} (\-?[_a-z][_a-zA-Z]*(?:\((?:[\-\+a-zA-Z0-9 \(\)\,\_])+\))?)(?:[ ]*:[ ]*(.*))?\.)", program): # 0: original match 1: "label",v1,v2 2: head 3: body. program = program.replace( hit[0], "&trace_all{{{head},{parameters} : }} :- {head}{rest_body}.\n".format( head=hit[2], parameters=hit[1], rest_body="," + hit[3] if hit[3] else "") ) return program
aaba6f875f4424ebba1a0b82355a428949d3bd17
691,808
import traceback def report_error(exception): """ Simple error report function :param exception: :return: """ message = f"ERROR: {exception} {traceback.format_exc()}" error = { "isError": True, "type": "Unhandled Exception", "message": message } return error
0a03a4fa0a3c195a6b5f071d44552258a70c098e
691,809
def split_xyz(xyz_file: bytes) -> list[bytes]: """Split an xyz file into individual conformers.""" lines = xyz_file.splitlines() structures = [] while True: if len(lines) == 0: break # removed one deck natoms = lines.pop(0) n = int(natoms.decode()) comment = lines.pop(0) geom = [] for _ in range(n): geom += [lines.pop(0)] deck = b"\n".join([natoms, comment] + geom) structures += [deck] return structures
4857fc838f4490526eb9fee4f71318b8ab7c06fe
691,810
def results_to_dict(results): """convert result arrays into dict used by json files""" # video ids and allocate the dict vidxs = sorted(list(set(results['video-id']))) results_dict = {} for vidx in vidxs: results_dict[vidx] = [] # fill in the dict for vidx, start, end, label, score in zip( results['video-id'], results['t-start'], results['t-end'], results['label'], results['score'] ): results_dict[vidx].append( { "label" : int(label), "score" : float(score), "segment": [float(start), float(end)], } ) return results_dict
ab42de303a1d039c5605b7828d624234e549d2c0
691,811
def left_justify(words, width): """Given an iterable of words, return a string consisting of the words left-justified in a line of the given width. >>> left_justify(["hello", "world"], 16) 'hello world ' """ return ' '.join(words).ljust(width)
26a2e9f3df582355966959996ae672f60b5c00cc
691,812
def check_validity(mol): """ If convertation to rdkit.Mol fails, the molecule is not valid. """ try: mol.to_rdkit() return True except: return False
9f9f059fb91e346f7fef5c4b54b92023a6c4d4e0
691,813
def civic_vid65(): """Create a test fixture for CIViC VID65.""" return { "id": "civic.vid:65", "type": "VariationDescriptor", "label": "D816V", "description": "KIT D816V is a mutation observed in acute myeloid leukemia (AML). This variant has been linked to poorer prognosis and worse outcome in AML patients.", # noqa: E501 "value_id": "ga4gh:VA.EGLm8XWH3V17-VZw7vEygPmy4wHQ8mCf", "value": { "location": { "interval": { "end": 816, "start": 815, "type": "SimpleInterval" }, "sequence_id": "ga4gh:SQ.TcMVFj5kDODDWpiy1d_1-3_gOf4BYaAB", "type": "SequenceLocation" }, "state": { "sequence": "V", "type": "SequenceState" }, "type": "Allele" }, "xrefs": [ "clinvar:13852", "caid:CA123513", "dbsnp:121913507" ], "alternate_labels": [ "ASP816VAL" ], "extensions": [ { "name": "civic_representative_coordinate", "value": { "chromosome": "4", "start": 55599321, "stop": 55599321, "reference_bases": "A", "variant_bases": "T", "representative_transcript": "ENST00000288135.5", "ensembl_version": 75, "reference_build": "GRCh37" }, "type": "Extension" }, { "name": "civic_actionability_score", "value": "67", "type": "Extension" }, { "name": "variant_group", "value": [ { "id": "civic.variant_group:2", "label": "KIT Exon 17", 'type': 'variant_group' } ], "type": "Extension" } ], "structural_type": "SO:0001583", "expressions": [ { "syntax": "hgvs:transcript", "value": "NM_000222.2:c.2447A>T", "type": "Expression" }, { "syntax": "hgvs:protein", "value": "NP_000213.1:p.Asp816Val", "type": "Expression" }, { "syntax": "hgvs:transcript", "value": "ENST00000288135.5:c.2447A>T", "type": "Expression" }, { "syntax": "hgvs:genomic", "value": "NC_000004.11:g.55599321A>T", "type": "Expression" } ], "gene_context": "civic.gid:29" }
e09f1c4068190744c7620b0679c452a8b428913f
691,814
def str_to_bool(value): """Represents value as boolean. :param value: :rtype: bool """ value = str(value).lower() return value in ('1', 'true', 'yes')
ff0f2107c3c7758769af2809d51859e016bdd15a
691,815
def encode_classes(y_symbols): """ Encode the classes as numbers :param y_symbols: :return: the y vector and the lookup dictionaries """ # We extract the chunk names classes = sorted(list(set(y_symbols))) """ Results in: ['B-ADJP', 'B-ADVP', 'B-CONJP', 'B-INTJ', 'B-LST', 'B-NP', 'B-PP', 'B-PRT', 'B-SBAR', 'B-UCP', 'B-VP', 'I-ADJP', 'I-ADVP', 'I-CONJP', 'I-INTJ', 'I-NP', 'I-PP', 'I-PRT', 'I-SBAR', 'I-UCP', 'I-VP', 'O'] """ # We assign each name a number dict_classes = dict(enumerate(classes)) """ Results in: {0: 'B-ADJP', 1: 'B-ADVP', 2: 'B-CONJP', 3: 'B-INTJ', 4: 'B-LST', 5: 'B-NP', 6: 'B-PP', 7: 'B-PRT', 8: 'B-SBAR', 9: 'B-UCP', 10: 'B-VP', 11: 'I-ADJP', 12: 'I-ADVP', 13: 'I-CONJP', 14: 'I-INTJ', 15: 'I-NP', 16: 'I-PP', 17: 'I-PRT', 18: 'I-SBAR', 19: 'I-UCP', 20: 'I-VP', 21: 'O'} """ # We build an inverted dictionary inv_dict_classes = {v: k for k, v in dict_classes.items()} """ Results in: {'B-SBAR': 8, 'I-NP': 15, 'B-PP': 6, 'I-SBAR': 18, 'I-PP': 16, 'I-ADVP': 12, 'I-INTJ': 14, 'I-PRT': 17, 'I-CONJP': 13, 'B-ADJP': 0, 'O': 21, 'B-VP': 10, 'B-PRT': 7, 'B-ADVP': 1, 'B-LST': 4, 'I-UCP': 19, 'I-VP': 20, 'B-NP': 5, 'I-ADJP': 11, 'B-CONJP': 2, 'B-INTJ': 3, 'B-UCP': 9} """ # We convert y_symbols into a numerical vector y = [inv_dict_classes[i] for i in y_symbols] return y, dict_classes, inv_dict_classes
298abb185012216b5f3234e6623c75a46bc7f5ac
691,816
import os import zipfile import io import numpy import pandas def read_zip(zipfilename, zname=None, **kwargs): """ Reads a :epkg:`dataframe` from a :epkg:`zip` file. It can be saved by @see fn read_zip. :param zipfilename: a :epkg:`*py:zipfile:ZipFile` or a filename :param zname: a filename in zipfile, if None, takes the first one :param kwargs: parameters for :func:`pandas.read_csv` :return: :func:`pandas.DataFrame` or :epkg:`numpy:array` """ if isinstance(zipfilename, str): ext = os.path.splitext(zipfilename)[-1] if ext != '.zip': raise NotImplementedError( # pragma: no cover "Only zip files are supported not '{0}'.".format(ext)) zf = zipfile.ZipFile(zipfilename, 'r') # pylint: disable=R1732 close = True elif isinstance(zipfilename, zipfile.ZipFile): zf = zipfilename close = False else: raise TypeError( # pragma: no cover "No implementation for type '{0}'".format(type(zipfilename))) if zname is None: zname = zf.namelist()[0] content = zf.read(zname) stb = io.BytesIO(content) ext = os.path.splitext(zname)[-1] if ext == '.npy': df = numpy.load(stb, **kwargs) else: df = pandas.read_csv(stb, **kwargs) if close: zf.close() return df
c32864ce274cbefb7ddcd2b2de113b4a1c4870af
691,817
from typing import Any def is_property(obj: Any) -> bool: """Check the given `obj` is defined with `@property`. Parameters: - `obj`: The python object to check. Returns: - `True` if defined with `@property`, otherwise `False`. """ return isinstance(obj, property)
22c20ea7050756a4274822b961811154c6b85210
691,818
def _is_in_bounds(x: int, y: int, width: int, height: int) -> bool: """ Returns whether or not a certain index is within bounds. Args: x (int): x pos. y (int): y pos. width (int): max x. height (int): max y. """ if x < 0: return False if y < 0: return False if x >= width: return False if y >= height: return False return True
8fc76261972588599b183364b3b8c350389d33c0
691,819
def collect_steps(cls): """Collect steps defined in methods.""" cls.steps = list() for attr_name in dir(cls): attr = getattr(cls, attr_name) if hasattr(attr, '_decor_data'): cls.steps.append(attr._decor_data) cls.steps = sorted(cls.steps, key=lambda s: s.index) return cls
8aa201bf2bf4633281af2cb1d0e7cc89adf5f525
691,820
def data_to_html(title, data): """Turns a list of lists into an HTML table""" # HTML Headers html_content = """ <html> <head> <style> table { width: 25%; font-family: arial, sans-serif; border-collapse: collapse; } tr:nth-child(odd) { background-color: #dddddd; } td, th { border: 1px solid #dddddd; text-align: left; padding: 8px; } </style> </head> <body> """ # Add the header part with the given title html_content += "<h2>{}</h2><table>".format(title) # Add each row in data as a row in the table # The first line is special and gets treated separately for i, row in enumerate(data): html_content += "<tr>" for column in row: if i == 0: html_content += "<th>{}</th>".format(column) else: html_content += "<td>{}</td>".format(column) html_content += "</tr>" html_content += """</tr></table></body></html>""" return html_content
c1eb000fd5947fbaa74e1876a6a4f839f5ffe8cf
691,821
from pathlib import Path def get_file_extension(path): """Gets the dot-prefixed extension from the path to a file. :param path: Path to the file to get the extension from. :type path: str :return: The file's extension. :rtype: str Examples -------- >>> get_file_extension('/home/user/file.txt') '.txt' """ return Path(path).suffix
8e6e97b0046edf31febbe0c731877ea8ecc5186a
691,823
def is_after(t1, t2): """True if t1 is after t2 t1, t2: Time objects """ return (t1.hour > t2.hour and t1.minute > t2.minute and t1.second > t2.second)
bec06b864152cd7c6857c6c4460f9e47c8e4dde5
691,825
import os def __get_version_from_version_txt(path): # pragma: no cover """ private function, tries to find a file ``version.txt`` which should contains the version number (if svn is not present) @param path folder to look, it will look to the the path of this file, some parents directories and finally this path @return the version number @warning If ``version.txt`` was not found, it throws an exception. """ file = os.path.split(__file__)[0] paths = [file, os.path.join(file, ".."), os.path.join(file, "..", ".."), os.path.join(file, "..", "..", ".."), path] for p in paths: fp = os.path.join(p, "version.txt") if os.path.exists(fp): with open(fp, "r") as f: return int(f.read().strip(" \n\r\t")) raise FileNotFoundError( "unable to find version.txt in\n" + "\n".join(paths))
9de5394f8033d3b63029bcd14bd91f08158cc6fc
691,826
def replace_color(img, color_map): """return a new image replacing the image colors which will be mapped to their corresponding colors in `color_map` (df)""" new_img = img.copy() for _, (source, target) in color_map.iterrows(): new_img[(img == source).all(axis=-1)] = target return new_img
f83ff06ae86f697d3a65b2bfedb265248befb7e5
691,827
def get_e_rtd_default(hs_type): """定格効率(規定値) Args: hs_type(str): 温水暖房用熱源機の種類 Returns: float: 定格効率(規定値) """ if hs_type in ['ガス従来型温水暖房機', 'ガス従来型給湯温水暖房機']: return 0.81 elif hs_type in ['ガス潜熱回収型温水暖房機', 'ガス潜熱回収型給湯温水暖房機']: return 0.87 else: raise ValueError(hs_type)
d43516867d3481c8b648402b2556c5f4898e0c41
691,828
def reversed_arguments(func): """ Return a function with reversed argument order. """ def wrapped(*args): return func(*reversed(args)) return wrapped
bfd818c0a87f169c06331f1db4e8e6e31e5546cd
691,829
from typing import Any def _detokenize_doc(doc: Any) -> str: """ Detokenize a spaCy Doc object back into a string, applying our custom replacements as needed. This requires the associated extension to have been registered appropriately. The :class:`WordNet` constructor should handle registering the extension. """ return "".join([f"{tok._.replacement}{tok.whitespace_}" for tok in doc])
eebd953f0496a3715243f7e91b9fccb12031c450
691,830
def get_priority_value_map(all_priorities): """ Maps an index of increasing size to each priority ranging from low -> high e.g. given ['LOW', 'MEDIUM', 'HIGH'] will return {'LOW': 0, 'MEDIUM': 1, 'HIGH': 2} """ return dict((priority_text.upper(), priority_index) for priority_index, priority_text in enumerate(all_priorities))
5a3b85f7b6bdd20a3c6cf2cbeac19e9bb3882cf5
691,831
import torch def val_epoch(model, val_loader, criterion, device): """Validate the model for 1 epoch Args: model: nn.Module val_loader: val DataLoader criterion: callable loss function device: torch.device Returns ------- Tuple[Float, Float] average val loss and average val accuracy for current epoch """ val_losses = [] val_corrects = [] model.eval() # Iterate over data with torch.no_grad(): for inputs, labels in val_loader: inputs = inputs.to(device) labels = labels.to(device) # prediction outputs = model(inputs) # calculate loss _, preds = torch.max(outputs, 1) loss = criterion(outputs, labels) # statistics val_losses.append(loss.item()) val_corrects.append(torch.sum(preds == labels.data).item()) return sum(val_losses)/len(val_losses), sum(val_corrects)/len(val_loader.dataset)
80576b4181f08a2a35276a78a143bbf59233dd9c
691,832
def compute_error(b, m, coordinates): """ m is the coefficient and b is the constant for prediction The goal is to find a combination of m and b where the error is as small as possible coordinates are the locations """ totalError = 0 for i in range(0, len(coordinates)): x = coordinates[i][0] y = coordinates[i][1] totalError += (y - (m * x + b)) ** 2 return totalError / float(len(coordinates))
c300a137e3fe75ee2c9a23265d1523a96907d7f7
691,833
import torch def _bilinear_interpolation_vectorized( image: torch.Tensor, grid: torch.Tensor ) -> torch.Tensor: """ Bi linearly interpolate the image using the uv positions in the flow-field grid (following the naming conventions for torch.nn.functional.grid_sample). This implementation uses the same steps as in the SoftRas cuda kernel to make it easy to compare. This vectorized version requires less memory than _bilinear_interpolation_grid_sample but is slightly slower. If speed is an issue and the number of faces in the mesh and texture image sizes are small, consider using _bilinear_interpolation_grid_sample instead. Args: image: FloatTensor of shape (H, W, D) a single image/input tensor with D channels. grid: FloatTensor of shape (N, R, R, 2) giving the pixel locations of the points at which to sample a value in the image. The grid values must be in the range [0, 1]. u is the x direction and v is the y direction. Returns: out: FloatTensor of shape (N, H, W, D) giving the interpolated D dimensional value from image at each of the pixel locations in grid. """ H, W, _ = image.shape # Convert [0, 1] to the range [0, W-1] and [0, H-1] grid = grid * torch.tensor([W - 1, H - 1]).type_as(grid) weight_1 = grid - grid.int() weight_0 = 1.0 - weight_1 grid_x, grid_y = grid.unbind(-1) y0 = grid_y.to(torch.int64) y1 = (grid_y + 1).to(torch.int64) x0 = grid_x.to(torch.int64) x1 = x0 + 1 weight_x0, weight_y0 = weight_0.unbind(-1) weight_x1, weight_y1 = weight_1.unbind(-1) # Bi-linear interpolation # griditions = [[y, x], [(y+1), x] # [y, (x+1)], [(y+1), (x+1)]] # weights = [[wx0*wy0, wx0*wy1], # [wx1*wy0, wx1*wy1]] out = ( image[y0, x0] * (weight_x0 * weight_y0)[..., None] + image[y1, x0] * (weight_x0 * weight_y1)[..., None] + image[y0, x1] * (weight_x1 * weight_y0)[..., None] + image[y1, x1] * (weight_x1 * weight_y1)[..., None] ) return out
b9e3596f1e3d98bb598e74cf3f1c142b376b79a9
691,834
def get_ds_data(ds, target_attribute='targets'): """ Returns X and y data from pymvpa dataset """ return ds.samples, ds.sa[target_attribute].value
f96a2bf87b18e53961c9abf99e24c2f22730461b
691,835
def create_db_strs(txt_tuple_iter): """ From an iterable containing DB info for records in DB or 'not in DB' when no records were found, return info formatted as string. :param txt_tuple_iter: an iterable of tuples where the 0 element of the tuple is the gene/name and element 1 is the accession/ID number of the instance. :return: string containing info to each entry (str) """ # a line can be [('Q8WYB5',)] or 'not in DB' or # [('Q8WYB6',), ('Q8WYB7',)] if txt_tuple_iter != 'not in DB': info_joiner = '/'.join # a record can be ('Q8WYB5',) or ('GENE1', 'Q8WYB7') or (12,) rec_strs = [info_joiner(map(str, record)) for record in txt_tuple_iter] new_line = ', '.join(rec_strs) else: new_line = txt_tuple_iter return new_line
57d80e04bab4cc00cf5fc297ef70d03aa100f5cd
691,836
def check_bram(test_data,layernumber): """ checks the Parameters ---------- test_data : numpy array [B,W*H,Ci] Data to check. Content of BRAM layernumber : integer Number of layer Returns ------- error_count : interger Number of errors. """ BLOCK_SIZE = test_data.shape[1] error_count = 0 for i in range(test_data.shape[0]): with open("tmp/l{}".format(layernumber) + "_bram{}.txt".format(i),"r") as f: for j in range(test_data.shape[0]*2): block_select = 1-(i+1)%2 read_data = f.readline().rstrip() result_data = [int(g) for g in read_data.split(' ')] for k in range(test_data.shape[2]): if block_select == 0 and j<BLOCK_SIZE: if result_data[k] != test_data[i,j,k]: print("Error in block {}".format(i) + " channel {}".format(k) + " in line {} ,".format(j+block_select*BLOCK_SIZE) \ + "{}".format(result_data[k]) + " != {}".format(test_data[i,j,k])) error_count += 1 elif block_select == 0 and j>=BLOCK_SIZE and i==0: if result_data[k] != 0: print("Error in block {}".format(i) + " channel {}".format(k) + " in line {} ,".format(j+block_select*BLOCK_SIZE) \ + "{}".format(result_data[k]) + " != {}".format(0)) error_count += 1 elif block_select == 0 and j>=BLOCK_SIZE: if result_data[k] != test_data[i-1,j-BLOCK_SIZE,k]: print("Error in block {}".format(i) + " channel {}".format(k) + " in line {} ,".format(j+block_select*BLOCK_SIZE) \ + "{}".format(result_data[k]) + " != {}".format(test_data[i-1,j-BLOCK_SIZE,k])) error_count += 1 elif block_select == 1 and j<BLOCK_SIZE: if result_data[k] != test_data[i-1,j,k]: print("Error in block {}".format(i) + " channel {}".format(k) + " in line {} ,".format(j+block_select*BLOCK_SIZE) \ + "{}".format(result_data[k]) + " != {}".format(test_data[i-1,j,k])) error_count += 1 elif block_select == 1 and j>=BLOCK_SIZE: if result_data[k] != test_data[i,j-BLOCK_SIZE,k]: print("Error in block {}".format(i) + " channel {}".format(k) + " in line {} ,".format(j+block_select*BLOCK_SIZE) \ + "{}".format(result_data[k]) + " != {}".format(test_data[i,j-BLOCK_SIZE,k])) error_count += 1 else: print("Error in porgram") if error_count == 0: print("No errors in BRAM") else: print("{} errors occured checking BRAM".format(error_count)) return error_count
29118648ad61642e55bc0b6849470b770c2ce741
691,837
def isfloat(x, num_only=False): """Returns true if the input is a float, false otherwise.""" return type(x) == float
a92681e497574bbcb02907ac94240ed47088973c
691,838
def getFrameLevelDisplacements(nodeFound, start, finish): """ Iterates through the entire time-series data for a given body part to extract the X,Y,Z coordinate data. Args: nodeFound (object): joint object for the targeted body part start (int): starting frame number finish (int): ending frame number Returns: list: list of lists of X,Y,Z coordiantes. The number of lists must equal to number of frames in the BVH """ displacements = [] for i in range(start, finish): X = nodeFound.trtr[i][0][3] Y = nodeFound.trtr[i][1][3] Z = nodeFound.trtr[i][2][3] displacements.append([X, Y, Z]) return displacements
7c05dcc901d8a0525e4983ce2301a4d40ef2a542
691,839
from datetime import datetime def get_datetime_object(datetime_string): """ Interpret the UltraSuite prompt date and time string as a python datetime object :param datetime_string: :return: """ return datetime.strptime(datetime_string, '%d/%m/%Y %H:%M:%S')
35fe2c9056f28d3d8dbe963121cd8ce93e36550f
691,840
def GetDuplicateRefcodes(sDuplicateRefcodeFileName): """Load duplicate refcodes. """ with open(sDuplicateRefcodeFileName,"r") as fDupl: lsDuplRefcodes= fDupl.read().strip().split() dtDuplicateRefcodes={} for sRefcode in lsDuplRefcodes: dtDuplicateRefcodes[sRefcode]=1 return dtDuplicateRefcodes
f1ac1cf95954f4923dcdff43bc43dfe80ff9e6ed
691,841
import operator def get_best_individual_in_population(number_of_individual_to_keep, grades, population): """ -> return the bests individual in populations -> number_of_individual_to_keep is the number of bests candidate to retrieve -> grades is a tuple, generated by grade_population() function -> population is a list of individual """ individual_keep_number = number_of_individual_to_keep individual_keep_list = [] best_individual = [] x = grades[1] sorted_x = sorted(x.items(), key=operator.itemgetter(1), reverse=True) cmpt = 0 for t in sorted_x: if(cmpt < individual_keep_number): individual_keep_list.append(t[0]) cmpt += 1 for individual in population: if(individual._id in individual_keep_list): best_individual.append(individual) return best_individual
b498e1b0660995e51cd975c0653ab84df7a44642
691,843
def reward_val(upn2, upn1, ufn2, ufn1, jain, param, uav_r, uav_f, list_up, list_uf, list_sum): """ This function calculates the reward function based on the current and previous throughput values from both the primary and the fusion(emergency) networks. :param upn2: The current-state throughput value for the primary network. :param upn1: The previous-state throughput value for the primary network. :param ufn2: The current-state throughput value for the fusion(emergency) network. :param ufn1: The previous-state throughput value for the fusion(emergency) network. :param jain: The jain value. :param param: - :param uav_r: - :param uav_f: - :param list_up: - :param list_uf: - :param list_sum: The list of summation throughput for both networks. :return: This function returns the current reward function, the difference between current and previous throughput for each network. """ deltaupn = upn2 - upn1 deltaufn = ufn2 - ufn1 reward = 0 if list_sum.size == 1: max_sum = 0 else: max_sum = max(list_sum) if ufn2 + upn2 - max_sum > 0: reward = 10 * (ufn2 + upn2 - max_sum) if ufn2 + upn2 - max_sum == 0: reward = 0 if ufn2 + upn2 - max_sum < 0: reward = (ufn2 + upn2 - max_sum) return reward, deltaupn, deltaufn
f34a3b6d08fc131b9c9197e20c4b0b57a56c5377
691,844