content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import textwrap import yaml def fio_job_dict(): """ Job template for fio workloads. """ template = textwrap.dedent(""" apiVersion: batch/v1 kind: Job metadata: name: fio spec: template: metadata: name: fio spec: containers: - name: fio image: quay.io/johnstrunk/fs-performance:latest command: - "/usr/bin/fio" - "--output-format=json" - "/etc/fio/workload.fio" volumeMounts: - name: fio-target mountPath: /mnt/target - name: fio-config-volume mountPath: /etc/fio restartPolicy: Never volumes: - name: fio-target persistentVolumeClaim: claimName: fio-target - name: fio-config-volume configMap: name: fio-config """) job_dict = yaml.safe_load(template) return job_dict
7067d09ae7227ddf1d500f2943fa43ef7f973b87
693,161
def sieve_of_eratosthene(num): """ Computes prime numbers using sieve of Eratosthenes. :param num: The number to which you need to find prime numbers. :returns: List of prime numbers. """ sieve = list(range(num)) sieve[1] = 0 # All non-prime nums we'll replace by zeros. for checked_num in sieve[2:]: if checked_num != 0: multiplier = checked_num * 2 while multiplier < num: sieve[multiplier] = 0 multiplier += checked_num return [n for n in sieve if n != 0]
7fc8499b4f7d04a0cd94ce67b2a135cbd27faaa9
693,162
def _format_range_unified(start, stop): """Convert range to the "ed" format""" beginning = start + 1 length = stop - start if length == 1: return '{}'.format(beginning) if not length: beginning -= 1 return '{},{}'.format(beginning, length)
a6e44924bbb745082a07385cd43dbf03efa239cb
693,163
def greeting(name: str) -> str: """ Construct a greeting. :param name: name of the person or object to greet :return: greeting """ return f"Hello, {name}!" if name else "Hello!"
16401457f2640e5495a6e932e0188704645ef578
693,164
def is_literal_value_packet(type: int) -> bool: """Check if the `type` corresponds to a literal value packet""" return type == 4
cf717881a446d9b8e4472543e92f45f8147fe89a
693,165
def changes_between_snapshots(before_dict, after_dict): """Given two 'snapshots' of an artifacts structure -- 'before' and 'after' -- return a tuple specifying which artifacts have been added, which have been removed, which have been modified, and which have remained unchanged. Both these dictionaries have artifact names as the keys and their hashes as the values.""" before_set = set(before_dict.keys()) after_set = set(after_dict.keys()) removed_artifacts = before_set.difference(after_set) added_artifacts = after_set.difference(before_set) unchanged_artifacts = set() modified_artifacts = set() for key in before_set.intersection(after_set): if before_dict[key] == after_dict[key]: unchanged_artifacts.add(key) else: modified_artifacts.add(key) return (unchanged_artifacts, modified_artifacts, added_artifacts, removed_artifacts)
5bf02354c99179fbc3492eed37f8268f6c93b5c8
693,166
def kernel_name(): """Unless manually modified, python3 should be the name of the current jupyter kernel that runs on the activated conda environment""" return "python3"
9425a22dc482c5850bf01f17b7c0ca9cba30c3b0
693,168
def filename2frame(fn): """Make two lists a shorthand CSV. In other words, unroll the CSV. It's a slightly unusual CSV in that column 0 is always a date, and then there are a *variable* number of subsequent columns that you can think of as word0, word1, ... wordn. This compact / rolled-up structure simply allows me to type the CSV faster. """ rows = open(fn, 'r').read().splitlines() date_list = [] word_list = [] for r in rows: columns = r.split(',') # Assume columns[0] is always date and columns[1..end] are words. for i in range(1,len(columns)): if columns[i] not in word_list: # dedup assumes CSV chronological. date_list.append(columns[0]) word_list.append(columns[i]) return date_list, word_list
61632a67d40f86969b6a7877117d579453010c1c
693,169
def transform(dict_, typed_dict) -> dict: """ Convert values in given dictionary to corresponding types in TypedDict . """ fields = typed_dict.__annotations__ try: data = {name: fields[name](value) for name, value in dict_.items()} except ValueError as ve: print(f'Go ValueError for {dict_.items()}\n{ve}') data = dict() for key, value in dict_.items(): try: data[key] = fields[key](value) except ValueError: # The data is maintained by a community and sometimes the data types are mismatching # Handle string -> float -> int type conversion print(f'value: {value} | type(value): {type(value)}') if len(value) > 0: print(f'Float population found: {value}') data[key] = int(float(value)) else: data[key] = None print(f'modified data: {data}') return data
3a454f57adf22055dfba64390423da04153afa1f
693,170
def check_error(error_code): """Converts the UCLCHEM integer result flag to a simple messaging explaining what went wrong" Args: error_code (int): Error code returned by UCLCHEM models, the first element of the results list. Returns: str: Error message """ errors={ -1:"Parameter read failed. Likely due to a mispelled parameter name, compare your dictionary to the parameters docs.", -2: "Physics intiialization failed. Often due to user chosing unacceptable parameters such as hot core masses or collapse modes that don't exist. Check the docs for your model function.", -3: "Chemistry initialization failed",#this doesn't exist yet -4: "Unrecoverable integrator error, DVODE failed to integrate the ODEs in a way that UCLCHEM could not fix. Run UCLCHEM tests to check your network works at all then try to see if bad parameter combination is at play.", -5: "Too many integrator fails. DVODE failed to integrate the ODE and UCLCHEM repeatedly altered settings to try to make it pass but tried too many times without success so code aborted to stop infinite loop." } try: return errors[error_code] except: raise ValueError(f"Unknown error code: {error_code}")
6d01e98b019b21b2de6f6449fad646fbc275b44d
693,171
import torch def inference_collate_batch(batch): """Collate a batch of data.""" feat_paths, mels = zip(*batch) return feat_paths, torch.stack(mels)
a2ecd6ef4ea634ac453fa7e5cafce97c0dddcb9f
693,174
def createOrderList(wantedOrder, currentOrder): """ Create an order list that can transform currentOrder to wantedOrder by applying applyOrderList function. An order list is a list that specifies the position of the desired element in a list in the correct order, e.g: order of [3,1,2,4,6]->[1,2,3] which is got by using createOrderList([1,2,3],[3,1,2,4,6]) is [1,2,0]. """ return map(lambda x:currentOrder.index(x), wantedOrder)
3456e04043d2a7a02de7daa67894546f7b50812d
693,175
def track_closed(cls): """Wraps a queue class to track down if close() method was called""" class TrackingClosed(cls): def __init__(self, *a, **kw): super(TrackingClosed, self).__init__(*a, **kw) self.closed = False def close(self): super(TrackingClosed, self).close() self.closed = True return TrackingClosed
744feddb4b9e9cf150331c413b6b8bc72ee4faa5
693,176
def add_vectors(vector_1, vector_2): """Example function. Sums the same index elements of two list of numbers. Parameters ---------- v1 : list List of ints or floats v2 : list List of ints or floats Returns ------- list Sum of lists Notes ----- This is NOT good Python, just an example function for tests. """ sum_vec = [] for a, b in zip(vector_1, vector_2): sum_vec.append(a + b) return sum_vec
87b7dbd1498d0661887b1e70df2f349ff21b7ec3
693,177
def parse_scoped_project_queue(scoped_name): """Returns the project and queue name for a scoped catalogue entry. :param scoped_name: a project/queue as given by :scope_queue_name: :type scoped_name: str :returns: (project, queue) :rtype: (str, six.text_type) """ return scoped_name.split('/')
6ebf16b203aef62f43e2a4c34e4dbc2d7b52ac52
693,178
def scan_row(row): """get the first and last shaded columns in a row""" start = 0 end = 0 for c, value in enumerate(row): if value: if start == 0: start = c end = c return (start, end)
e8c4dcda56f20c52387cf1ea3be425c71df37de3
693,179
def Flickr30k_sentence_data(fn): """ Parses a sentence file from the Flickr30K Entities dataset. :param fn: full file path to the sentence file to parse. :return: a list of dictionaries for each sentence with the following fields: sentence - the original sentence phrases - a list of dictionaries for each phrase with the following fields: phrase - the text of the annotated phrase first_word_index - the position of the first word of the phrase in the sentence phrase_id - an identifier for this phrase phrase_type - a list of the coarse categories this phrase belongs to """ with open(fn, 'r') as f: sentences = f.read().split('\n') annotations = [] for sentence in sentences: if not sentence: continue first_word = [] phrases = [] phrase_id = [] phrase_type = [] words = [] current_phrase = [] add_to_phrase = False for token in sentence.split(): if add_to_phrase: if token[-1] == ']': add_to_phrase = False token = token[:-1] current_phrase.append(token) phrases.append(' '.join(current_phrase)) current_phrase = [] else: current_phrase.append(token) words.append(token) else: if token[0] == '[': add_to_phrase = True first_word.append(len(words)) parts = token.split('/') phrase_id.append(parts[1][3:]) phrase_type.append(parts[2:]) else: words.append(token) sentence_data = {'sentence': ' '.join(words), 'phrases' : []} for index, phrase, p_id, p_type in zip(first_word, phrases, phrase_id, phrase_type): sentence_data['phrases'].append({'first_word_index': index, 'phrase': phrase, 'phrase_id': p_id, 'phrase_type': p_type}) annotations.append(sentence_data) return annotations
6f183ba3e847a9f7f4fb6c97e484b9c4e4d6b79f
693,180
def convert_to_base(decimal_number, base, fill=0): """" Transform a decimal number to any basis. """ remainder_stack = [] DIGITS = '0123456789abcdef' while decimal_number > 0: remainder = decimal_number % base remainder_stack.append(remainder) decimal_number = decimal_number // base new_digits = [] while remainder_stack: new_digits.append(DIGITS[remainder_stack.pop()]) return ''.join(new_digits).zfill( fill )
7c7f4e89636533b473c6e7a6712750d4abe02833
693,181
def is_avcs_table(table): """ True if table of AVC notifications """ phrase = 'attribute value change' return phrase == table.short_title[:len(phrase)].lower()
10ea2296d0ab8a64f531cb6d49c6832f900b896b
693,182
import math def pos_transform_resize(angle, x, y, w, h): """ return new coordinates after rotating the image with expandison. :param angle: the rotate angle :param x: coordinate x of any point in the original image :param y: coordinate y of any point in the original image :param w: width of the original image :param h: height of the original image :return: transformed coordinate (y, x) """ angle = angle * math.pi / 180 matrix = [ -math.sin(angle), math.cos(angle), 0.0, math.cos(angle), math.sin(angle), 0.0 ] def transform(x, y, matrix=matrix): a, b, c, d, e, f = matrix return a * x + b * y + c, d * x + e * y + f # calculate output size xx = [] yy = [] for x_, y_ in ((0, 0), (w, 0), (w, h), (0, h)): x_, y_ = transform(x_, y_) xx.append(x_) yy.append(y_) ww = int(math.ceil(max(xx)) - math.floor(min(xx))) hh = int(math.ceil(max(yy)) - math.floor(min(yy))) # adjust center cx, cy = transform(w / 2.0, h / 2.0) matrix[2] = ww / 2.0 - cx matrix[5] = hh / 2.0 - cy tx, ty = transform(x, y) return tx, ty
2226cc49d486e630fe1430c5bb9a4634b0ca7393
693,183
def valid_conversion(val, type_to_convert): """ Checking whether it is possible to convert val to the specified type :param val: value :param type_to_convert: type :return: boolean """ if isinstance(type_to_convert, type): try: type_to_convert(val) res = True except ValueError: res = False else: raise TypeError return res
6dddf95d633c55b63e1ed96a7efe3e8a7c108045
693,184
import struct def unpack_ibm32(bytearray_: bytearray, endian: str) -> float: """ Unpacks a bytearray containing the 4 byte IBM floating point value. Works by unpacking a value as an unsigned long and disassembling it bit by bit. `""" # It is 32 bits long. First bit is a sign, then go 7 bits of exponent, # then 24 bits of fraction # first, unpack the bytes as an unsigned integer -> it will just get all # the bits ibm = struct.unpack(endian + 'L', bytearray_)[0] # now we have an integer -> straight interpretation of the bits. # we can extract all the information from it # first, get the last bit - sign: sign = ibm >> 31 # # get 7 last bits (8 by shift and remove one by &) - exponent: exponent = ibm >> 24 & 0b1111111 # the first 24 bits (get them with &) are fraction. get them and divide # by 2 to the power of 24: fraction = (ibm & 0b111111111111111111111111) / float(pow(2, 24)) # the result is (-1) ** sign * fraction * 16 ** (exponent - 64) return (1 - 2 * sign) * fraction * pow(16, exponent - 64)
b1419d000df0c7c4c1a4cf74cc3380a94a35bfad
693,185
def extract_id_from_href(href): """Extract id from an href. 'https://vmware.com/api/admin/user/123456' will return 123456 :param str href: an href :return: id """ if not href: return None if '/' in href: return href.split('/')[-1] return href
8dc39612a6fba2d7524469bce5e9066544722ab6
693,186
from typing import List def validate(passport_list: List[str]) -> int: """Validate passports. All passports must contain the following parameters. Parameters: - byr (Birth Year) - iyr (Issue Year) - eyr (Expiration Year) - hgt (Height) - hcl (Hair Color) - ecl (Eye Color) - pid (Passport ID) Optional: - cid (Country ID) Args: passport_list (List[str]): the passport file in list form """ required = {'byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid'} params = {} valid = 0 for line in passport_list: if not line: # Flush passport contents if required <= set(params.keys()): valid += 1 params = {} continue for ps in line.split(' '): k, v = ps.split(':') params[k] = v if params: if required <= set(params.keys()): valid += 1 return valid
7ddf44cd6d558b82b90f14db8b481e07b1cad4d1
693,187
import argparse def arg_parser(): """ Arg parser for command line arguments. """ parser = argparse.ArgumentParser(prog='update_template') parser.add_argument('nxsfile', help='The nexus file') parser.add_argument('plugin', help='Plugin name') parser.add_argument('param', help='plugin parameter name') return parser.parse_args()
a9b809878d3ae91b4eafa1039983f3d7af2564ba
693,188
import math def variation_distance(data1, data2): """ Returns the total variation distance between the distributions represented by data1 and data2 """ union_keys = list(set(data1.keys()) | set(data2.keys())) distance = 0 for key in union_keys: if key in data1: val1 = data1[key] else: val1 = 0 if key in data2: val2 = data2[key] else: val2 = 0 distance += math.fabs(val1 - val2) return .5 * distance
813b357fe08ae3f027cf39a5f2b166d21774b644
693,189
def zip_author(author): """ Give a list of author and its affiliation keys in this following format [first_name, last_name, [key1, key2]] and return the output in [[first_name, last_name, key1], [first_name, last_name, key2]] instead """ author_zipped = list(zip([[author[0], author[1]]] * len(author[-1]), author[-1])) return list(map(lambda x: x[0] + [x[-1]], author_zipped))
c3dd14b6173407ae4e5c646b28ceb693e7e6161d
693,190
def hsv_to_rgb(h, s, v): """ Convert HSV to RGB (based on colorsys.py). Args: h (float): Hue 0 to 1. s (float): Saturation 0 to 1. v (float): Value 0 to 1 (Brightness). """ if s == 0.0: return v, v, v i = int(h * 6.0) f = (h * 6.0) - i p = v * (1.0 - s) q = v * (1.0 - s * f) t = v * (1.0 - s * (1.0 - f)) i = i % 6 v = int(v * 255) t = int(t * 255) p = int(p * 255) q = int(q * 255) if i == 0: return v, t, p if i == 1: return q, v, p if i == 2: return p, v, t if i == 3: return p, q, v if i == 4: return t, p, v if i == 5: return v, p, q
3375a5168604339a47a49d447948972733075b7f
693,191
import shutil def teardown_tempdir(path_to_dir): """Removes directory even if not empty.""" shutil.rmtree(path_to_dir) return None
3062b44f37de9162295ffbd8a70d3f49be20bed5
693,192
def flatten(dict_in, delim="__", loc=[]): """Un-nests the dict by combining keys, e.g. {'a': {'b': 1}} -> {'a_b': 1}""" loc = loc or [] output = {} if not dict_in and loc: output[delim.join(loc)] = {} for key in dict_in: if isinstance(dict_in[key], dict): nest_out = flatten(dict_in[key], delim=delim, loc=loc + [str(key)]) output.update(nest_out) else: base_key = delim.join(loc + [str(key)]) output[base_key] = dict_in[key] return output
c5f908955721c6f0c3bebacda565660559f23567
693,193
def split_message(message, lines): """Given a message to display and the number of lines to display on, return a list of the substrings from the message to display for each line. Param: message(str) lines(int) Return: sublines(list): """ sublines = list() message_words = message.split() for i in range(lines): index = 0 speech_line = "< " word_to_add = message_words[int(index)] + " " while (len(speech_line) + len(word_to_add)) <= 23: if index < len(message): speech_line += message[index] index += 1 else: speech_line += " " word_to_add = " " speech_line += " >\n" sublines.append(speech_line) return sublines """ sublines = list() for i in range(lines + 1): speech_line = "" speech_line += "< " index = 0 for i in range(21): if not len(message) == 0: speech_line += message[0] message = message[1:] else: speech_line += " " speech_line += " >\n" sublines.append(speech_line) return sublines """
717e61c432f8090f222a76a40641759d13520fca
693,195
import json def isolate_hit(JSON: bytes, URI: str) -> dict: """ Isolate the proper URI section from JSON bytes """ hit = json.loads(JSON) metadata = {} for item in hit.items(): if item[0] == URI: metadata = item[1] return metadata
119cb698901d50b7e99fd9f9a00b0182ba2e2627
693,196
def check_number_edges(ugraph): """ dict -> int Just a sanity check to find the number of nodes on a undirected graph. """ directed_edges = 0 for node in ugraph: directed_edges += len(ugraph[node]) if directed_edges % 2 == 0: return directed_edges / 2 else: return "Not Undirected"
032d5be7b514d66a92174dbf693e14c4a4d6f59c
693,197
import os def find_all_files(directory_path, recursive): """ Finds all files in a directory. :param directory_path: :param recursive: if True looks in the subdirectories too :return: list with file paths """ all_files = [] for root, directories, files in os.walk(directory_path): for file in files: all_files.append(os.path.join(os.path.abspath(root), file)) if not recursive: break # prevent descending into subdirectories return all_files
17b6342ee7db302b39124b12d751dbbd6cd43b17
693,198
import math def front_knee_acute(x, y, min_angle=100, side='right'): """ 10:"RKnee", 11:"RAnkle", 13:"LKnee", 14:"LAnkle" """ if side == 'right': degrees = math.degrees(math.atan2(y[14]-y[13], x[14]-x[13])) else: degrees = math.degrees(math.atan2(y[11]-y[10], x[11]-x[10])) if degrees > min_angle: return degrees, 1.0 else: return degrees, 0.0
4d99d952f0331ae9891f7c2400fb88f071a513c2
693,199
def loadHostDefaults(config): """ Load the defaults in "host" key of the configuration. """ if not config or not config.get("host") or not config["host"].get("url"): return [] urls = config["host"]["url"].split(",") return [{"key": "host", "src": url} for url in urls]
cd0bbb51f0e9a06469501754244497731ffbd804
693,200
def event_count(events): """Returns the total number of events in multiple events lists.""" return sum([len(e) for e in events])
5298bf42afae0c63c5d745965c47786d669a17b5
693,201
def run_sim_episode(sim, policy): """ Given a sim and a policy, step through some iterations of the simulator, returning the history of states. Args: sim: a PointSimulator policy: a function (SimState -> action dictionary) """ k = 0 # Count steps, break out of infinite loops state_history = [] reward_history = [] state = sim.episode_start() state_history.append(state) is_terminal = False while not is_terminal: action = policy(state) # convert to a [-1,1] action #action['direction_radians'] = action['direction_radians'] / math.pi - 1.0 (state, reward, is_terminal) = sim.simulate(action) print(state, reward, is_terminal) state_history.append(state) reward_history.append(reward) k += 1 if k > 1000: raise Exception("Simulation ran longer than 1000 steps. Stopping.") return state_history, reward_history
e77d069d4a265377162d0115fa7eda7c02b1c64a
693,202
def format_memory_size(size): """ Returns formatted memory size. :param size: Size in bytes :type size: Number :returns: Formatted size string. :rtype: String """ if not size: return 'N/A GB' if size >= 1099511627776: sizestr = '%.1f TB' % (float(size) / 1099511627776.0) elif size >= 1073741824: sizestr = '%.1f GB' % (float(size) / 1073741824.0) elif size >= 1048576: sizestr = '%d MB' % (int(size) / 1048576) elif size >= 1024: sizestr = '%d KB' % (int(size) / 1024) else: sizestr = '%d B' % int(size) return sizestr
9e8106b855ab810b80e0303574047edf0df2c49e
693,203
def turn_into_list(object): """Returns a list containing the object passed. If a list is passed to this function, this function will not create a nested list, it will instead just return the list itself.""" if isinstance(object, list): return object else: return [object]
eea2155b1a441fa1b84bcc8400d21c9263bdab48
693,204
def filter_by_control(df, threshold=0.6): """Find subjectNo where the CONTROL was rated greater than threshold.""" tmp1 = df[df['condition'] == 'CONTROL'].groupby(['subjectNo'])[ 'response'].min() > threshold tmp2 = tmp1[tmp1] print(f"N = {len(tmp2)}") return df[df['subjectNo'].isin(tmp2.keys())]
34d2633a1ba32679ac67d8788de272fb91d31ea6
693,205
import os def get_file_in_directory(path): """ Retrieves file names from a directory \ \n\nInput: path = directory \ \n\nOutput: list of subdirectories """ # The last conditional here is in order to ignore the /DS_store file in macs return [os.path.join(path, name) for name in os.listdir(path) if (os.path.isfile(os.path.join(path, name)) and (not name.startswith('.'))) ]
2f7fdfd626fc6603f28b0ef2226fb260f0c8b03e
693,207
def mapSqr(L): """returns the map of sqr and L""" power = 2 lst = [] # have to make a new list so old is not mutated # cannot do better for x in L: #lst += [x ** power] # faster lst.append(x ** power) return lst
49aa2753d8e18445a1de5ae5451c37173f96b090
693,208
def get_bounds_from_values(values): """Get list of bounds from given list of numeric values. :param values: List of numeric values :type values: list[int or float] :return: List of bounds :rtype: list[float] """ bounds = [] for i in range(len(values)): if i < len(values) - 1: if i == 0: diff = (values[i + 1] - values[i]) / 2 bounds.append(values[i] - diff) diff = (values[i + 1] - values[i]) / 2 bounds.append(values[i] + diff) else: diff = (values[i] - values[i - 1]) / 2 bounds.append(values[i] + diff) return bounds
f10419b59325d3ce46c1f9161920697a0b7d0e89
693,209
async def is_document_exists(collection, id): """Determine if a document with a specific id exist in a collection or not""" return await collection.count_documents({'_id': id}, limit=1)
0b000f45b3ffa1e517fc69fef09aa8f5934b247b
693,210
import re def extract_rules(input_file): """Get rules out of an input Comprehensive Rules doc. At the moment this does intentionally leave some things off, most notably rule '808.' This is not a feature, it's a hack. Keyword arguments: input_file -- the CR file you want to strip rules from """ with open(input_file, 'r') as rules_doc: entire_doc = rules_doc.read() # Handle editorial snafus on WotC's end entire_doc = entire_doc.replace(" \"", " “") entire_doc = entire_doc.replace("(\"", "(“") entire_doc = entire_doc.replace("\"", "”") entire_doc = entire_doc.replace("'", "’") entire_doc = entire_doc.replace(" ’", " ‘") entire_doc = entire_doc.replace("-", "—") entire_doc = re.sub(r"(\w)—(\w)", r"\1–\2", entire_doc) entire_doc = entire_doc.replace("(tm)", "™") entire_doc = entire_doc.replace("(r)", "®") entire_doc = re.sub(r"\n\s{4,}(\w)", r" \1", entire_doc) extracted_rules = re.findall('^\d{3}[^a-zA-Z\n]{2}.*[“"”.) :]$', entire_doc, re.MULTILINE) rules_list = [] for rule in extracted_rules[1:]: rules_list.append(rule.split()) return rules_list
8c9f4f9bee5d808909c964ef6651311c99cd55b7
693,211
from typing import List def _solve_tridiagonal_matrix(a: List[float], b: List[float], c: List[float], r: List[float]) -> List[float]: """ Solves the linear equation system given by a tri-diagonal Matrix(a, b, c) . x = r. Matrix configuration:: [[b0, c0, 0, 0, ...], [a1, b1, c1, 0, ...], [0, a2, b2, c2, ...], ... ] Args: a: lower diagonal [a0 .. an-1], a0 is not used but has to be present b: central diagonal [b0 .. bn-1] c: upper diagonal [c0 .. cn-1], cn-1 is not used and must not be present r: right-hand side quantities Returns: vector x as list of floats Raises: ZeroDivisionError: singular matrix """ n = len(a) u = [0.0] * n gam = [0.0] * n bet = b[0] u[0] = r[0] / bet for j in range(1, n): gam[j] = c[j - 1] / bet bet = b[j] - a[j] * gam[j] u[j] = (r[j] - a[j] * u[j - 1]) / bet for j in range((n - 2), -1, -1): u[j] -= gam[j + 1] * u[j + 1] return u
3f28f4e7fc26dc46b59c269ab2547561eaab749f
693,213
from re import VERBOSE def is_verbose(): """Encapsulate the module global for clean access from scripts that import this one. Returns: the value of the VERBOSE variable """ return VERBOSE
fdabd3f8d595d5b6d437a2a9b8c631bc8fb47f21
693,214
def compare_surveys(surv1, surv2, alpha): """Event rate surv1 / Event rate surv2 for an alpha.""" omega = surv1.beam_size_fwhm/surv2.beam_size_fwhm T_rec = surv1.T_rec/surv2.T_rec gain = surv1.gain/surv2.gain beta = surv1.beta/surv2.beta SEFD = T_rec*beta/gain bw = surv1.bw/surv2.bw S_min = surv1.snr_limit/surv2.snr_limit return omega * (SEFD * S_min)**alpha * (bw)**(-alpha/2)
b55ae829234a2c293f68b65c68a7760cf15c66cf
693,215
from typing import List def permutar(arr: List[int]) -> List[List[int]]: """Devuelve todas las permutaciones del array. :param arr: Lista de enteros únicos. :arr type: List[int] :return: Lista de permutaciones. :rtype: List[List[int]] """ if len(arr) == 0: return [] if len(arr) == 1: return [arr] t = [] for i in range(len(arr)): r = arr[:i] + arr[i+1:] for p in permutar(r): t.append([arr[i]] + p) return t
98af485e0834d5f108312fa188d3e21fc23bec8e
693,217
def print_menu(): """ Fonction qui permet à l'utilisateur de choisir une des options """ menu = """Que voulez vous faire? 1 - Ajouter un user à la liste 2 - Parcourir tous les users 3 - Récupérer un user via un ID q - quitter """ return input(menu)
47a589184bbb61607b68ff597db84de513e20db7
693,218
def smiles_in_training(smiles, data): """ Determines if a SMILES is a dataset. Parameters ---------- smiles : str SMILES string describing a compound. dataframe : pd.Pandas A pandas dataframe with a "smiles" column. Returns ------- bool : If the SMILES is in the dataset. """ if smiles in list(data["canonical_smiles"]): return True else: return False
63a87c8da359de4198968560c77e422a5b6ee8ae
693,219
def strFormatter(text: str) -> str: """Return a string by parsing and escaping latex reserved chars""" text = str(text) for x in range(len(text)): if x == 0 and text[x] == "&": text = "\\" + text elif text[x] == "&" and text[x - 1] != "\\": text = text[:x] + "\\" + text[x:] elif x == 0 and text[x] == "%": text = "\\" + text elif text[x] == "%" and text[x - 1] != "\\": text = text[:x] + "\\" + text[x:] elif text[-1] == "%" and text[-2] != "\\": text = text[:-1] + "\\" + text[-1] elif x == 0 and text[x] == "#": text = "\\" + text elif text[x] == "#" and text[x - 1] != "\\": text = text[:x] + "\\" + text[x:] elif x == 0 and text[x] == "_": text = "\\" + text elif text[x] == "_" and text[x - 1] != "\\": text = text[:x] + "\\" + text[x:] text = text.replace('"', "``", 1) text = text.replace("'", "`", 1) text = text.replace("’", "'") text = text.replace("“", "``") text = text.replace("”", '"') text = text.replace("–", "-") text = text.replace("‘", "`") text = text.replace("’", "'") text = text.replace("•", "") return text
3247d27fa01469ecac5cff33f4ab735be5db158d
693,220
def polygon2pathd(polyline_d): """converts the string from a polygon points-attribute to a string for a Path object d-attribute. Note: For a polygon made from n points, the resulting path will be composed of n lines (even if some of these lines have length zero).""" points = polyline_d.replace(', ', ',') points = points.replace(' ,', ',') points = points.split() reduntantly_closed = points[0] == points[-1] d = 'M' + points[0].replace(',', ' ') for p in points[1:]: d += 'L' + p.replace(',', ' ') # The `parse_path` call ignores redundant 'z' (closure) commands # e.g. `parse_path('M0 0L100 100Z') == parse_path('M0 0L100 100L0 0Z')` # This check ensures that an n-point polygon is converted to an n-Line path. if reduntantly_closed: d += 'L' + points[0].replace(',', ' ') return d + 'z'
49462a408bb38eb85cdc68034413fb3b45512826
693,221
def build_compound(compound): """Build a compound Args: compound(dict) Returns: compound_obj(dict) dict( # This must be the document_id for this variant variant = str, # required=True # This is the variant id display_name = str, # required combined_score = float, # required rank_score = float, not_loaded = bool genes = [ { hgnc_id: int, hgnc_symbol: str, region_annotation: str, functional_annotation:str }, ... ] ) """ compound_obj = dict( variant=compound["variant"], display_name=compound["display_name"], combined_score=float(compound["score"]), ) return compound_obj
b01868e84ba453b742e0ae4dc7a08c29a171860a
693,222
import os def check_exist(arg_filename): """" Helper function that checks if file exists and returns the result. :param: arg_filename :return: append_write """ if os.path.exists(arg_filename): append_write = "a" else: append_write = "w" return append_write
de08707c7f5805d57b59f433a3ce03be075bc8af
693,223
def isiterable(iterable): """ https://stackoverflow.com/a/36407550/3356840 """ if isinstance(iterable, (str, bytes)): return False try: _ = iter(iterable) except TypeError: return False else: return True
b046f09859bddd28e3ae802189cef828f626c272
693,224
import requests import time def get_results_with_retry(wdt_sparql_url, query): """ Run SPARQL query multiple times until the results are there. """ while True: try: r = requests.get(wdt_sparql_url, params = {'format': 'json', 'query': query}) # res_text=r.text # response = json.loads(res_text) response = r.json() break except Exception as e: print(e, 'error, retrying') time.sleep(2) continue return response
54f6bc3fe79924d2c914867d7ce6b6cadf6c8497
693,225
def print_palindromes(palindrome_dict): """ Given a dictionary with palindrome positions as keys, and lengths as first element of the value, print the positions and lengths separated by a whitespace, one pair per line. """ for key, value in palindrome_dict.items(): print(key, value[0]) return None
7b30402a16ef0e7f7b7e8643ae3d2efcc72294a8
693,226
import math def trailingZeroes(self, n): # ! 这个解法太妙了 """ :type n: int :rtype: int """ zeroCnt = 0 while n > 0: n = math.floor(n/5) zeroCnt += n return zeroCnt
fc1bf2d3046314af4757390be963610f1e036032
693,227
def formatTuples(tuples): """ Renders a list of 2-tuples into a column-aligned string format. tuples (list of (any, any): The list of tuples to render. Returns: A new string ready for printing. """ if not tuples: return "" tuples = [(str(x), str(y)) for (x, y) in tuples] width = max([len(x) for (x, y) in tuples]) fmt = " {{:{}}} {{}}\n".format(width + 2) result = "" for (x, y) in tuples: result += fmt.format(x, y) return result
f078aa0b512c0ce63a8d4a06ea0a2d3df31bde7b
693,228
def normalized_thread_name(thread_name): """ Simplifies a long names of thread (for Zafira UI), e.g. MainThread -> MT, ThreadPoolExecutor -> TPE, etc. :param thread_name: thread name from Log Record object :return: simplified thread name """ normalized = '' for symb in thread_name: if symb.isupper(): normalized += symb return normalized
02c886b7081eac0e289c4566ab3c70a9fb92a5f6
693,229
def _subexon_ranks(strand, transcript_len): """ Return a list of the subexon ranks. NOTE: Rank starts in 0 to make this compatible with end_phase_previous_exon that expect exon_pos(ition) to start in 0. >>> _subexon_ranks(-1, 10) [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] >>> _subexon_ranks(1, 10) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] """ if strand == -1: return list(range(transcript_len - 1, -1, -1)) return list(range(0, transcript_len))
efcf0e376d0666c7ca018e220fab598abf80eec8
693,231
def check_not_mnist_files(path): """Filters some bad files in NotMNIST dataset.""" bad_paths = [ "RGVtb2NyYXRpY2FCb2xkT2xkc3R5bGUgQm9sZC50dGY=.png", "Q3Jvc3NvdmVyIEJvbGRPYmxpcXVlLnR0Zg==.png", ] for bad_path in bad_paths: if bad_path in path: return False return True
2b663f4272432540ec4de1903539d9275ac2ce8c
693,233
def best_alignment(seq1, seq2, S, P): """ The function aligns seq1 to seq2 using P matrix. The score will be extracted from the matrix S, which stores all the maxima of the alignment. "best_alignment" returns the template (seq1), target (seq2) including eventual gaps in the two AND best score""" i = len(S) - 1; j = len(S[0]) -1 # Get the index of the last position on the matrix. Indicates start of backtracking template = ""; target = ""; score = S[i][j] # The last position of the matrix S is the best score in NW with gaps while i + j != 0: #because only if both are 0 it should stop if P[i][j] == "d": # if from diagonal = match or mismatch template += seq1[i - 1] # Add to the string template the letter of the sequence 1 target += seq2[j - 1] # Add to the string template the letter of the sequence 2 i -= 1 # decrement i for one to continue backtracking j -= 1 # same for j elif P[i][j] == "u": template += seq1[i - 1] # Insert in template the letter of the sequence 1 target += "-" # Inserts a gap i -= 1 else: template += "-" #Symbolizes insertion of a gap target += seq2[j - 1] # Insert in template the letter of the sequence 2 j -= 1 # Use gaps when the movement is to left or up, we are add a gap "-". It means no match. return template, target, score
88a4c4ad95059a976d2929b81eaa89bda434ed1a
693,234
import platform def get_OS(): """ Returns the operating system. """ if "Windows" in platform.platform(): return "Windows" elif "macOS" in platform.platform(): return "Mac" else: return "Linux"
db7b83fc7036e4b6571d9880032c459a9a4c9c54
693,235
def get_diff_dict(d1, d2): """ return common dictionary of d1 and d2 """ diff_keys = set(d2.keys()).difference(set(d1.keys())) ret = {} for d in diff_keys: ret[d] = d2[d] return ret
63eb9638b10cf004ba58b9a17bc55858d2c29030
693,236
from typing import Dict from typing import List from typing import Tuple from typing import Optional def deconstruct_entry(entry: Dict[str, str], username_fields: List[str], sightings_fields: List[str]) -> Tuple[Optional[str], Optional[str], Optional[int]]: """ deconstruct_entry Extracts device relevant fields from a log entry. :type entry: ``Dict[str, str]`` :param entry: Log entry as dictionary of fields. :type sightings_fields: ``List[str]`` :param sightings_fields: List of possible field names in log entry to be considered as number of occurences. :type username_fields: ``List[str]`` :param username_fields: List of possible field names in log entry to be considered as username. :return: Tuple where the first element is the username or None, the second is the domain extracted from the username field and the third element is the number of occurences of the event. :rtype: ``Tuple[Optional[str], Optional[str], Optional[int]]`` """ username = next((entry[field] for field in username_fields if field in entry), None) sightings = next((int(entry[field]) for field in sightings_fields if field in entry), 1) domain = None if username is not None and "\\" in username: domain, username = username.split("\\", 1) return username, domain, sightings
fc3b6f1d5386cc5b2fc6e4a7efb8ec92033bf0f7
693,237
def get_computername(): """ Returns a Computer name from text file which is then iterated over in powershell() """ with open (r'f:\Windows_10_Refresh\Powershell\NoBOM.txt', 'r') as f: x = f.read().splitlines() return x
901a55a195fea6e460d4e44eadcba0b518d87769
693,238
def _vrtWrapBand(vrtDataset, sourceBand): """ Wraps a GDAL raster band in a VRT band. """ # Retrieve the width and height from the source band width = sourceBand.XSize height = sourceBand.YSize # Create the new VRT raster band vrtDataset.AddBand(sourceBand.DataType) vrtBand = vrtDataset.GetRasterBand(vrtDataset.RasterCount) # Build the XML for the data source bandSource = '''<SimpleSource> <SourceFilename relativeToVRT="1">{}</SourceFilename> <SourceBand>{}</SourceBand> <SrcRect xOff="{}" yOff="{}" xSize="{}" ySize="{}"/> <DstRect xOff="{}" yOff="{}" xSize="{}" ySize="{}"/> </SimpleSource>'''.format( sourceBand.GetDataset().GetFileList()[0], sourceBand.GetBand(), 0, 0, width, height, 0, 0, width, height ) # Add the data source to the VRT band metadata = {} metadata['source_0'] = bandSource vrtBand.SetMetadata(metadata, 'vrt_sources') return vrtBand
44ea14188fdd79d2d8d5cfcdd2c175c782786062
693,239
def utcstr(ts): """ Format UTC timestamp in ISO 8601 format. :param ts: The timestamp to format. :type ts: instance of :py:class:`datetime.datetime` :returns: Timestamp formatted in ISO 8601 format. :rtype: unicode """ if ts: return ts.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + "Z" else: return ts
980a401e157c7986b023c1e5fba8f5e9060de501
693,240
import sqlite3 def conectionDB(func): """This is a decorator to open and close the database""" def wrapper(*args, **kwargs): global myConection global myCursor myConection = sqlite3.connect("PasswordsDB") myCursor = myConection.cursor() result = func(*args, **kwargs) myConection.commit() myConection.close() return result return wrapper
e692d4d5c0c4098c164d7d3159810ce894576f34
693,241
from datetime import datetime def current_year(): """ Return current year. """ now = datetime.now() return now.strftime('%Y')
a75f21f33a8536fde0fe917f3e04653dbe6f0578
693,242
def get_strikes(df, strike, dte): """Get strike closest to underlying price""" # Sort df by DTE df = df[(df['daysToExpiration'] <= dte)] #Sort df by strike df =df.iloc[(df['strikePrice'] - strike).abs().argsort()[:1]] df.reset_index(drop=True, inplace=True) strike_price = df['strikePrice'][0] #print("Strike:", strike_price) return strike_price
b502825118c3410976ccb2de47272dde9692fcee
693,243
import requests def init_session(refresh_token, base_search_url): """Helper method to create a requests Session""" post_body = {"refresh_token": refresh_token} response = requests.post( "{}/tokens/".format(base_search_url), json=post_body) response.raise_for_status() token = response.json()["id_token"] session = requests.Session() session.headers.update({"Authorization": "Bearer {}".format(token)}) return session
212baee2bc0c4062357d334caff07ac48b537425
693,244
import os def file_exists(file_name): """ Check if file or folder exists :param file_name: Path of the file :return: True/False """ return os.path.exists(file_name)
ae8e62c74cad979e0c2cdaa0b7790306960f3af1
693,245
import inspect def _get_args_from_config(from_config_func, *args, **kwargs): """ Use `from_config` to obtain explicit arguments. Returns: dict: arguments to be used for cls.__init__ """ signature = inspect.signature(from_config_func) if list(signature.parameters.keys())[0] != "cfg": raise TypeError( f"{from_config_func.__self__}.from_config must take 'cfg' as the first argument!" ) support_var_arg = any( param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD] for param in signature.parameters.values() ) if support_var_arg: # forward all arguments to from_config, if from_config accepts them ret = from_config_func(*args, **kwargs) else: # forward supported arguments to from_config supported_arg_names = set(signature.parameters.keys()) extra_kwargs = {} for name in list(kwargs.keys()): if name not in supported_arg_names: extra_kwargs[name] = kwargs.pop(name) ret = from_config_func(*args, **kwargs) # forward the other arguments to __init__ ret.update(extra_kwargs) return ret
9860ec1c7d589fbc6bef0c72930380521482ce7d
693,247
def my_vocab(x_tokens): """Create vocabulary""" vocab = dict() vocab['unk'] = 0 # token desconhecido cont=1 for tokens in x_tokens: for t in tokens: if vocab.get(t) == None: vocab[t] = cont cont+=1 return vocab
f62daeec43bc4800a820453c6ff431cf644f3cae
693,248
import argparse from pathlib import Path def handle_args(): """ :return: """ parser = argparse.ArgumentParser() parser.add_argument( "-o", "--object-model", help="The path to the object model file", ) parser.add_argument( "--vendor", help="The vendor name", required=True, ) parser.add_argument( "-D", "--device", help="The device name", required=True, ) parser.add_argument( "-b", "--bsp-dir", help="The path to the bsp directory", type=Path, required=True, ) parser.add_argument( "-x", "--overwrite-existing", action="store_true", default=False, help="overwrite existing files" ) return parser.parse_args()
fbe9ea10b2c83c440a86f82f73e760411b45a134
693,249
import json def load_dict(x, transformer=None): """ convert to dict :param x: :return: """ if x is None or isinstance(x, dict): return x try: data = json.loads(x) if not transformer: transformer = lambda x: x data = {k: transformer(v) for k, v in data.items()} return data except: return {}
5bbec386a2936e3d55b5ee67c8ecf5b791ddc268
693,250
import os def index_file_path(base_dir, index_name, date_string): """ Create path to index file. The arg date_string can be in one of two forms: yyyymmdd or yyyymmdd.hh. """ ifp = os.path.join(base_dir, "%s.%s" % (index_name, date_string)) return ifp
fb5cc4e11d96057ad10b51b6ea6d590e8a4f3ff4
693,251
import argparse def get_args(): """ desc: get cli arguments returns: args: dictionary of cli arguments """ parser = argparse.ArgumentParser(description="this script creates tf record files", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("dataset", choices=["imdb"], default="imdb", help="dataset to use", type=str) parser.add_argument("--train_data_percentage", default=0.70, help="percent of dataset to use for training", type=float) parser.add_argument("--validation_data_percentage", default=0.20, help="percent of dataset to use for validation", type=float) parser.add_argument("--test_data_percentage", default=0.10, help="percent of dataset to use for testing", type=float) args = parser.parse_args() return args
d11a6cb90ad6ac9bc5b05dbea4407e590fe0a19f
693,252
def getkey(pstring): """Strips units and argument lists from a property/trajectory keyword. Args: pstring: The string input by the user that specifies an output, which in general will specify units and argument lists. Returns: A string giving the keyword for the property, stripped of the argument lists and units key words. """ pa = pstring.find('(') if pa < 0: pa = len(pstring) pu = pstring.find('{') if pu < 0: pu = len(pstring) return pstring[0:min(pa,pu)].strip()
9e10fc8e0316eb04074b429006e8bf14342b65ee
693,253
from pathlib import Path import hashlib def get_hash(path: Path) -> str: """Get the SHA256 hash digest of a file.""" m = hashlib.sha256() m.update(path.read_bytes()) return m.hexdigest()
219b35627a647dfcfa1fac86d7bc43c27b4c8b10
693,254
import re def is_line_function_definition(line: str) -> bool: """Returns true if the corresponding line (of a python file) is the start of a function definition. Excludes functions that start with `__` which indicates a private function. Args: line: a line in a python file """ return bool(re.search('^( *)def ', line)) and 'def __' not in line
faf550e59d7eac5686b6881df515db8ab3caedcf
693,255
def sum_BinTNode(t): """ 二叉树节点求和 """ if t is None: return 0 else: return t.data + sum_BinTNode(t.left) + sum_BinTNode(t.right)
83c3f1a5df147c38871cd1dcdb4ed82f7a4abcfe
693,256
def salt_cloud_cli(salt_master_factory): """ The ``salt-cloud`` CLI as a fixture against the running master """ return salt_master_factory.salt_cloud_cli()
e8bbecea3f63e14a5c6e52c90d154f9342ee57b2
693,258
def retrieve_search_terms(): """Open a text file. Store each line in a set. Return the set.""" with open('search_terms.txt', 'r') as infile: search_terms = {line.strip().lower() for line in infile} return search_terms
8a31e7b911e1d1205d096487fc53a57268feb783
693,259
def logged_in_user(testapp, user): """Pytest fixture to produce a user that is correctly logged in.""" res = testapp.get('/login') # Fills out login form in navbar form = res.forms['login_user_form'] form['email'] = user.email form['password'] = 'myprecious' # Submits res = form.submit().follow() return user
3092cde38c889c224015a5a108188c0b4fc34032
693,261
def get_answers_for_question(current_user, question): """ Get the answers for a given question :param question: neomodel room with answers :return answers[]: array of answers """ answers_array = [] answers = question.answers for a in answers: answer = a.json() if(a.users.is_connected(current_user)): answer['answered'] = True answers_array.append(answer) return answers_array
cd70ec3b605edc0476b26976438e8f554046e2e1
693,262
def get_tool_dependency(app, id): """Get a tool_dependency from the database via id""" return app.install_model.context.query(app.install_model.ToolDependency).get(app.security.decode_id(id))
e230e9837bcf55a01a4dd93329686e587413e029
693,263
def is_numpy(value): """ Determines whether the specified value is a NumPy value, i.e. an numpy.ndarray or a NumPy scalar, etc. Parameters: ----------- value: The value for which is to be determined if it is a NumPy value or not. Returns: -------- boolean: Returns True if the value is a NumPy value and False otherwise. """ return type(value).__module__.split(".")[0] == "numpy"
3bc294d739e9b108abf7cde3c072611890b7374a
693,264
from datetime import datetime def parse_time(argument): """ Time parser to be used as type argument for argparser options. """ return datetime.strptime(argument, "%H:%M")
e6b8204f906f3ea2076058a2877e8f09a002319e
693,265
def open_tdump(filename): """Opens the tdump file Parameters ---------------- filename: string Full file path for tdump file Returns ----------- tdump: _io.TextIOWrapper tdump file opened in read mode, encoding UTF-8 ready to be used by other functions in this code """ tdump = open(filename) return tdump
ea01161dee3c9d8e098befe860531c8cfa4800db
693,266
def methods_equivalent(meth1, meth2): """Return True if the two methods are the same implementation.""" return getattr(meth1, '__func__', meth1) is getattr(meth2, '__func__', meth2)
87f36bb2ae2bc900c73dce452eb39bf58a9f2b87
693,267
import os import shutil def which_executable(environment_variable, executable_name): """ Determine the path of an executable. An environment variable can be used to override the location instead of relying on searching the PATH. :param str environment_variable: The name of the environment variable :param str executable_name: The name of the executable :rtype: str """ value = os.getenv(environment_variable) if value: return value return shutil.which(executable_name)
553b0ba2e99077453848663429604e701c37f486
693,269
def in_region(val, x_ranges, lines, eps=1e-14) -> bool: """ checks if val is in region with boundary determined by x_ranges, lines x_ranges and lines are returned from pm_boundary(n) """ for i in range(len(x_ranges)): x1, x2 = x_ranges[i] if val.real >= x1-eps and val.real <= x2+eps: m, b = lines[i] if abs(val.imag) < m*val.real + b + eps: return True return False
d312792ab97d9f57dad927610d2f383d73abf399
693,270
from functools import reduce def union(sets): """ Returns the union of multiple sets. """ return reduce(lambda x, y: x | y, sets)
fb523e64f2a08f62ed1b90e029a9ed5afe8b02ac
693,271
def get_task_no(job_id): """Extract the task number from the full job/job step ID""" task_info = job_id.split('_')[1] task_no = task_info.split('.')[0] if '.batch' in task_info else task_info return int(task_no)
2c8f9934c96ecc96adfcba927e27dd4ea46d590d
693,272
import os def process_args(args): """ Process the options got from get_args() """ if not os.path.exists(args.input_file): raise Exception("This scripts expects input scores file [ {0} ] exist.".format(args.input_file)) if not os.path.exists(args.trials_file): raise Exception("This scripts expects trials file [ {0} ] exist.".format(args.trials_file)) args.select_best_test_spk = args.select_best_test_spk == 'yes' return args
e94db62a78360e50d0b35bb5f7a7737fd86f1d47
693,274