content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def get_matrix_or_template_parameters(cli_args): """ cifti_conn_matrix and cifti_conn_template both have the same required parameters, with only a few exceptions. This function returns a list of all parameters required by both scripts. :param cli_args: Full argparse namespace containing all CLI args, including all necessary parameters for cifti_conn_matrix and cifti_conn_template. :return: A list of all parameters required by matrix and template scripts. """ return([ cli_args.mre_dir, cli_args.wb_command, cli_args.series_file, cli_args.time_series, cli_args.motion, cli_args.fd, cli_args.tr, cli_args.minutes, cli_args.smoothing_kernel, cli_args.left, cli_args.right, cli_args.beta8, cli_args.remove_outliers, cli_args.mask, cli_args.make_conn_conc, cli_args.output, cli_args.dtseries ])
fbda701f988ebf490bfd33fa8d78d8bcc1dd109f
695,412
def TR4(rv): """Identify values of special angles. a= 0 pi/6 pi/4 pi/3 pi/2 ---------------------------------------------------- cos(a) 0 1/2 sqrt(2)/2 sqrt(3)/2 1 sin(a) 1 sqrt(3)/2 sqrt(2)/2 1/2 0 tan(a) 0 sqt(3)/3 1 sqrt(3) -- Examples ======== >>> for s in (0, pi/6, pi/4, pi/3, pi/2): ... print('%s %s %s %s' % (cos(s), sin(s), tan(s), cot(s))) ... 1 0 0 zoo sqrt(3)/2 1/2 sqrt(3)/3 sqrt(3) sqrt(2)/2 sqrt(2)/2 1 1 1/2 sqrt(3)/2 sqrt(3) sqrt(3)/3 0 1 zoo 0 """ # special values at 0, pi/6, pi/4, pi/3, pi/2 already handled return rv
229b610c2898be7f01b8b7e6d3c17df71d27162a
695,413
import argparse def parse_args(): """Process input arguments""" parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('vcf', metavar='V', help="VCF file") parser.add_argument('--filter', '-f', nargs='+', help="Terms to filter, filtering all problematic sites by default") return parser.parse_args()
bb9de19f5c2f4e54c73c3ac1b003d53d0252cde7
695,414
import time import logging def get_set_num_ruptures(src): """ Extract the number of ruptures and set it """ if not src.num_ruptures: t0 = time.time() src.num_ruptures = src.count_ruptures() dt = time.time() - t0 clsname = src.__class__.__name__ if dt > 10: if 'Area' in clsname: logging.warn('%s.count_ruptures took %d seconds, perhaps the ' 'area discretization is too small', src, dt) elif 'ComplexFault' in clsname: logging.warn('%s.count_ruptures took %d seconds, perhaps the c' 'omplex_fault_mesh_spacing is too small', src, dt) elif 'SimpleFault' in clsname: logging.warn('%s.count_ruptures took %d seconds, perhaps the ' 'rupture_mesh_spacing is too small', src, dt) else: # multiPointSource logging.warn('count_ruptures %s took %d seconds', src, dt) return src.num_ruptures
1ae503627e0f971de00ce4dfbc449c2037514570
695,415
def icalevent2item(event): """icalendarイベントからjson用itemを取得する. Arguments: event {Event} -- icalendarイベント Returns: dict -- json用item """ item = dict() summary = event.decoded('SUMMARY').decode() last_paren = summary.rfind('(') race_name = summary[:last_paren] item['Name'] = race_name race_date = event.decoded('DTSTART').strftime('%Y%m%d') item['Date'] = race_date race_course = event.decoded('LOCATION').decode() item['Course'] = race_course return item
453e578f8e2eac6e399bb925bfeac6dd149f6dc7
695,416
from typing import Tuple import subprocess def run_in_shell(cmd: str) -> Tuple[bool, str]: """ Run given command in the shell and return result of the command. Usually a malformed command or error in the executed code will result in the CalledProcessError and then that message is shown. During development of this project, the OSError was experienced so this is covered below too. :return success: True if it ran without error, False otherwise. :return output: Text result of the command. If there was an error, this will be the error message. """ try: exit_code, output = subprocess.getstatusoutput(cmd) except OSError as os_err: success = False output = str(os_err) else: success = exit_code == 0 return success, output
81ef80fdef28714600afd163c9dc9fdae09f9d0f
695,417
import os def make_path(path: str) -> str: """Make directory. Args: path (str): Path to be made if it doesn't exist. Returns: path (str) """ if not os.path.exists(path): os.makedirs(path) return path
291476930ccab8d857b5544601982af185b6ac89
695,418
import sys import argparse def color(value): # type: (str) -> bool """Strict converter for color option.""" if value == 'yes': return True if value == 'no': return False if value == 'auto': return sys.stdout.isatty() raise argparse.ArgumentTypeError(f"invalid choice: '{value}' (choose from 'yes', 'no', 'auto')")
3cbf58e63b2f982274e8304c8d0938a7eb4e3a5e
695,419
import os import time import requests def retrieve_order(outdir: str, order_url: str): """ Download the order :param outdir: Full path to the order download folder :param order_url: Order download URL :return: """ order_id = order_url.split("/")[-1] if not os.path.exists(outdir): os.makedirs(outdir) outfile = outdir + os.sep + order_id print("Initiating download...") t = time.time() resp = requests.get(order_url, stream=True) print(resp.status_code) if resp.status_code == "ordered" or resp.status_code == 200: print("Starting download...") with open(outfile, "wb") as f: for chunk in resp.iter_content(chunk_size=2048): f.write(chunk) if os.path.exists(outfile): print(os.path.getsize(outfile) / 1024 / (time.time() - t) / 1024) else: print("Could not download {}".format(order_url)) print("Error {}".format(str(resp.status_code))) print("Download of {} complete.".format(order_id)) return None
0707ce160dd1a2bbf2e608caf9aab8a619871b92
695,421
def _format_training_params(params): """Convert dict pof parameters to the CLI format {"k": "v"} --> "--k v" Args: params (dict): Parameters Returns: str: Command line params """ outputs = [] for k, v in params.items(): if isinstance(v, bool): if v: outputs.append(f"--{k}") else: outputs.append(f"--{k} {v}") return " ".join(outputs)
bc0146afe7fb5201a78ee9cf7a0bb47a80bba4db
695,422
import os def tail(filename, nlines): """ Read the last nlines of a text file """ with open(filename) as qfile: qfile.seek(0, os.SEEK_END) endf = position = qfile.tell() linecnt = 0 while position >= 0: qfile.seek(position) next_char = qfile.read(1) if next_char == "\n" and position != endf-1: linecnt += 1 if linecnt == nlines: break position -= 1 if position < 0: qfile.seek(0) return qfile.read()
1aca62b1ab223a6567cb8b879a8fb979d43887ba
695,423
def valid_parentheses_brackets(input_string: str) -> bool: """ Determine whether the brackets, braces, and parentheses in a string are valid. Works only on strings containing only brackets, braces, and parentheses. Explanation: https://www.educative.io/edpresso/the-valid-parentheses-problem :param input_string: :return: Boolean >>> valid_parentheses_brackets('()') True >>> valid_parentheses_brackets('()[]{}') True >>> valid_parentheses_brackets('{[()]}') True >>> valid_parentheses_brackets('(})') False Time complexity: O(n) where n is the length of the input string. Space complexity: O(n) where n is the length of the input string. """ open_stack: list = [] map_close_to_open: dict = { ')': '(', '}': '{', ']': '[' } for character in input_string: if character in map_close_to_open: if open_stack and open_stack.pop() == map_close_to_open[character]: pass else: return False else: open_stack.append(character) return False if open_stack else True
fd531bc264fc56df699de67cae60b52cf51519c3
695,424
import os def clear_directory(path): """ clear the dir of path :param path: :return: """ if not os.path.exists(path): os.mkdir(path) try: cmd = 'rm %s/*' % path print(cmd) os.popen(cmd) except Exception as e: print("error: %s" % e) return False return True
b03a832e8c2ae7b2b1d724c76d16be18d1b2305f
695,425
def do_chars_exist_in_db(conn, mal_id: int) -> bool: """ Args: conn ([type]): database connection mal_id (int): myanimelist id Returns: bool: returns true if there are character records for anime id """ with conn: with conn.cursor() as cursor: query = """select exists(select 1 from anime_characters where id=(%s))""" cursor.execute(query, (mal_id,)) res = cursor.fetchall() return res[0][0]
b106342207feea434c59fe4493dc09c97ef3376c
695,426
def id_to_ec2_id(instance_id, template='i-%08x'): """Convert an instance ID (int) to an ec2 ID (i-[base 16 number])""" return template % instance_id
7f1f65b4a846be1c46bc3d80c207f2b9ba0033af
695,427
def buildnavcell(prefix, dateString, mouseoverImg, mouseoutImg, name): """Build the HTML for a single navcell. prefix -- the string to tack on to the front of the anchor name dateString -- the date, as a string, to be used in the anchor name mouseoverImg -- name of the image to be displayed on mouseover mouseoutImg -- name of the image to be displayed on mouseout name -- name of this navcell """ return """<a href="timeline.html#%s%s" target="timeline" onclick="onimgs('%s', '%s', '%s'); return true;" onmouseover="chimgs('%s', '%s'); return true;" onmouseout="chimgs('%s', '%s'); return true;"><img src="%s" alt="" width="25" height="20" border="0" name="%s" /></a>""" \ % (prefix, dateString, name, mouseoverImg, mouseoutImg, name, mouseoverImg, name, mouseoutImg, mouseoutImg, name)
250ebba918b850a6d87fa5146fa9865c6acc14f9
695,428
import os import json import pprint def read_filter_params(file_path:str,file_name:str="filter.json",filter_str=None,metadata=False,verbose=False): """ reads filter parameter file from json will read only the first filter entry of the first category if filter_str is None otherwise it will look either for command or name if metadata is true, filter + list of available filters is returned in a dictionary with command as key """ filter_params = None fp = os.path.join(file_path,file_name) with open(fp,encoding="utf-8",errors="ignore") as f: filter_params = json.load(f,strict=False) filter_dict = None if filter_str is None: filter_dict = filter_params["categories"][0]["filters"][0] filter_categories = filter_params["categories"] gmic_filter_list_dict = {} for filter_category in filter_categories: filter_cat_name = filter_category["name"] if verbose: print("\n---- FILTER CATEGORY ",filter_cat_name," ----") gmic_filter_list = filter_category["filters"] for gmic_filter in gmic_filter_list: gmic_dict = {} gmic_dict["name"] = gmic_filter["name"] gmic_dict["command"] = gmic_filter["command"] gmic_dict["category"] = filter_cat_name gmic_dict["num_params"] = len(gmic_filter["parameters"]) if (filter_dict is None) and \ ((filter_str in gmic_dict["command"]) or (filter_str in gmic_dict["name"])): filter_dict = gmic_filter if verbose: print(f' {gmic_dict["name"]} ({gmic_dict["command"]}), params:{gmic_dict["num_params"]}') gmic_filter_list_dict[gmic_dict["command"]] = gmic_dict if not (filter_dict is None) and verbose: print("\n------------------------") print(f"\nFilter {filter_dict['name']} {filter_dict['command']}\n{pprint.pformat(filter_dict)}") print("------------------------") if metadata: out = {"filter_dict":filter_dict,"filter_list_dict":gmic_filter_list_dict} else: out = filter_dict return out
bfa6916d1e810631e7bf81d32c3b2ee6ff5a28a9
695,429
def calc_prob_sr(pt, sl, freq, tgt_sr, rf=0.): """Calculate required probability wrt target SR Paramters --------- pt: float Profit Take sl: float Stop Loss freq: float Frequency of trading tgt_sr: float Target Sharpe Ratio rf: float, (default 0) Risk Free Rate Returns ------- float: Required probability """ diff = pt - sl a = (freq + tgt_sr ** 2) * diff ** 2 b = diff * (2 * freq * (sl - rf) - tgt_sr ** 2 * diff) c = freq * (sl - rf) ** 2 p = (-b + (b ** 2 - 4 * a * c) ** .5) / (2. * a) return p
96b017e8ec18ed5c267bfd9eb0176f961481982e
695,430
import sqlite3 def get_brand_stats(db_file_name, table_name): """Calculate brand stats of collection""" conn = sqlite3.connect(db_file_name) c = conn.cursor() c.execute('''SELECT brand, count(brand) FROM {} GROUP BY brand'''.format(table_name)) brand_dict = {} results = c.fetchall() for result in results: brand_dict[str(result[0])] = result[1] conn.commit() conn.close() return brand_dict
96a422ed7579314b0ccc9e59bf0e1027ce70152b
695,432
def encode(integer: int) -> bytes: """Encodes an integer as an uvarint. :param integer: the integer to encode :return: bytes containing the integer encoded as an uvarint """ def to_byte(integer: int) -> int: return integer & 0b1111_1111 buffer: bytearray = bytearray() while integer >= 0b1000_0000: buffer.append(to_byte(integer) | 0b1000_0000) integer >>= 7 buffer.append(to_byte(integer)) return bytes(buffer)
da3b6b320ddcc39ecf494fca564d6d3ae06faea9
695,434
import requests def request_html(url, method='GET', headers=None, proxies=None): """ send request :param url: :param method: :param headers: :param proxies: :return: """ resp = None try: r = requests.request(method, url, headers=headers, proxies=proxies) if r.status_code == 200: resp = r.text except requests.RequestException as e: print(e) return resp
b7ee1a20da1bc718cf00befa40d45eaf351f9252
695,435
from typing import Dict from typing import Any import json import base64 def extract_pubsub_payload(event: dict) -> Dict[str, Any]: """Extracts payload from the PubSub event body. Args: event: PubSub event body, e.g. { "message": { "data": <base64 encoded object> } } Returns: Dict with PubSub event body payload. Raises: KeyError: Raised when the payload doesn't contain required key(s). TypeError: Raised when payload message has wrong dataype. """ if not event.get("message"): raise KeyError("Payload doesn't contain the 'message' key") pubsub_message = event["message"] if not isinstance(pubsub_message, dict): raise TypeError("Event payload's message is of wrong data type.") if not pubsub_message.get("data"): raise KeyError("Payload's 'message' doesn't contain the 'data' key") return json.loads(base64.b64decode(pubsub_message["data"]).decode("utf-8"))
05dadc17078fb723a385b7d36a7b9dd005c7e931
695,436
def sort_ortholog_information(faa_defs, faa_seqs): """Group contig name with start and stop""" first_species_defs = [] second_species_defs = [] first_species_seqs = [] second_species_seqs = [] for x, i in enumerate(faa_defs): if x%2==0: first_species_defs.append(i) first_species_seqs.append(faa_seqs[x]) elif x%2==1: second_species_defs.append(i) second_species_seqs.append(faa_seqs[x]) # species_defs can only be used for their strand information return first_species_defs, first_species_seqs, second_species_defs, second_species_seqs
82fb26046cabcb74f383bbc27966322620cc49ed
695,437
def extract_column_names(file_lines): """ Return a list of tuples containing column names and its respective index. Mutates the file lines list by extracting first element (first row of data file) """ column_names = file_lines.pop(0) column_names = column_names.rstrip().split("\t") column_names = list(enumerate(column_names)) return column_names
178b4229bf77fe84d37f3d09668f2c5851ca43db
695,438
import io def parse_aliases_file(file_path): """ Parses an emoji aliases text file. Returns a list of tuples in the form ('src_name', 'dst_name'). """ with io.open(file_path, encoding='utf-8') as fp: lines = fp.read().splitlines() aliases_list = [] for line in lines: line = line.strip() if not line or line.startswith('#'): continue # strip in-line comments comment_idx = line.find('#') if comment_idx > 0: line = line[:comment_idx].strip() aliases_list.append(tuple(line.split(';'))) return aliases_list
0ab98e0921655d7582c8a6261660da84f9287395
695,439
def make_goal(func): """Decorator that turns a function of the form f(Substitution, ...) into a goal-creating function. For example: @make_goal def same(s, u, v): ... is equivalent to def same(u, v): def goal(s): ... return goal """ def wrap(*args, **kwargs): def goal(s): return func(s, *args, **kwargs) return goal if func.__doc__ is not None: wrap.__doc__ = "produce a " + func.__doc__ return wrap
d5320d46d45fa749b7f3e6cea173fd41acca29b0
695,440
def _bb_has_tight_loop(f, bb): """ parse tight loops, true if last instruction in basic block branches to bb start """ return bb.offset in f.blockrefs[bb.offset] if bb.offset in f.blockrefs else False
05b703f487b7cf8a0a828f03fd78ab53a34d87da
695,442
from platform import python_compiler def python_version(name): """ Get compiler version used in Python. Parameters ---------- name : str Compiler name. Returns ------- float: version """ version_str = python_compiler() if name not in version_str.lower(): return 0.0 return float(version_str.split(" ", 2)[1].rsplit(".", 1)[0])
2353ca4d9156560e7109d83673a0c485c2b27437
695,443
def get_table_rows(soup): """ Get all table rows from the first tbody element found in soup parameter """ tbody = soup.find('tbody') return [tr.find_all('td') for tr in tbody.find_all('tr')]
b93e2968ff289c1380ee0f0be571897d9db06437
695,444
import re def slug_to_name(slug): """simple function that transforms a slug to a name args: string name -- the name to be converted """ return re.sub('-', ' ', slug)
4603d5d4e1206ced28d8d24c13c91030970fc8c7
695,445
def topleft2corner(topleft): """ convert (x, y, w, h) to (x1, y1, x2, y2) Args: center: np.array (4 * N) Return: np.array (4 * N) """ x, y, w, h = topleft[0], topleft[1], topleft[2], topleft[3] x1 = x y1 = y x2 = x + w y2 = y + h return x1, y1, x2, y2
094b12c4c112e906714e717c7fce79321610f69c
695,446
def crop(im, slices=(slice(100, -100), slice(250, -300))): """Crop an image to contain only plate interior. Parameters ---------- im : array The image to be cropped. slices : tuple of slice objects, optional The slices defining the crop. The default values are for stitched images from the Marcelle screen. Returns ------- imc : array The cropped image. Examples -------- >>> im = np.zeros((5, 5), int) >>> im[1:4, 1:4] = 1 >>> crop(im, slices=(slice(1, 4), slice(1, 4))) array([[1, 1, 1], [1, 1, 1], [1, 1, 1]]) """ return im[slices]
e3e7c2f737b0e589e6491cba44eb3c3aaee930d0
695,448
import importlib def internal_load_class(data: dict): """ For internal use only. """ if not isinstance(data, dict): raise ValueError("Given parameter not a dict object.") if "type" in data: try: klass = None if data["type"].endswith("Token"): mod = importlib.import_module("RDS.Token") klass = getattr(mod, data["type"]) elif data["type"].endswith("Service"): mod = importlib.import_module("RDS.Service") klass = getattr(mod, data["type"]) elif data["type"].endswith("User"): mod = importlib.import_module("RDS.User") klass = getattr(mod, data["type"]) if klass is not None: return klass except Exception: raise raise ValueError("given parameter is not a valid class.") raise ValueError("Type not specified in dict.")
169761bd81d590e2a51137f5f1732c72a463d75f
695,450
def _get_groupings(input_df): """Return groupings of table data based on filled/empty columns.""" input_cols = list(input_df.columns) # change any NAs to blank strings (this might cause TypeErrors !) input_df = input_df.fillna("") # Create a DF with True if a col value is blank blank_cols = input_df == "" # save the index as "orig_index" column blank_cols = blank_cols.reset_index().rename(columns={"index": "orig_index"}) # remove one col - since we need something to count() in our group_by # statement # TenantId is a good choice since we know it's always non-blank # and the same value input_cols.remove("TenantId") # group the bool DF by all the cols return ( blank_cols .groupby(list(input_cols)) .count() .reset_index() # reset index to get back a DF .reset_index() # reset again to create an "index" column .rename(columns={"TenantId": "count", "index": "group_index"}) .drop(columns=["orig_index"]) )
84d6a714b0835beead7f857bdf05a0ed21e4e721
695,452
def build_speech_response(title, ssml_output, plain_output): """Build a speech JSON representation of the title, output text, and end of session.""" # In this app, the session always ends after a single response. return { 'outputSpeech': { 'type': 'SSML', 'ssml': ssml_output }, 'card': { 'type': 'Simple', 'title': title, 'content': plain_output }, 'shouldEndSession': True }
2d38b9d0d8a261c6011eec3416227e972329de86
695,454
def bin2hex(s): """ bin2hex 二进制 to 十六进制: hex(int(str,2)) :param s: :return: """ padding_length = (8 - len(s) % 8) % 8 # 从前往后解码,不足8个的,在后面补0 encode_str = '%s%s' % (s, '0' * padding_length) # 解码后是 0xab1234,需要去掉前面的 0x return hex(int(encode_str, 2))[2:].rstrip('L')
02e9f0753aff776278eb53a9667683418b6fb425
695,455
def check_input(fit_function): # pylint: disable=no-self-argument """Check fit input args.""" def wrapper(obj, pk, k=None): pk = obj.check_pk(pk) k = obj.check_k(k) return fit_function(obj, pk, k=k) return wrapper
c8bf35dbea4f5db1f4764a311946f37697ef6865
695,456
import copy def nondimensionalise_parameters(params): """ Nondimensionalise parameters. Arguments --------- rc : float Characteristic radius (length) qc : float Characteristic flow Ru : float Upstream radius Rd : float Downstream radius L : float Vessel length k1 : float First constant from the relation Eh/r0 k2 : float Second constant from the relation Eh/r0 k3 : float Third constant from the relation Eh/R0 rho : float Density of blood Re : float Reynolds' number nu : float Viscosity of blood p0 : float Diastolic pressure Returns ------- return : tuple Tuple of dimensionless quantities, including Reynold's number """ param = params.param sol = params.solution geo = params.geo nondim = copy.deepcopy(param) rc = param['rc'] rho = param['rho'] qc = param['qc'] nondim['Ru'] = param['Ru']/rc nondim['Rd'] = param['Rd']/rc nondim['R_term'] = param['R_term']/rc nondim['L'] = param['L']/rc nondim['k1'] = param['k1']*rc**4/rho/qc**2 nondim['k2'] = param['k2']*rc nondim['k3'] = param['k3']*rc**4/rho/qc**2 nondim['Re'] = param['qc']/param['nu']/rc nondim['nu'] = param['nu']*rc/qc nondim['p0'] = param['p0']*rc**4/rho/qc**2 nondim['p_term'] = param['p_term']*rc**4/rho/qc**2 nondim['R1'] = param['R1']*rc**4/rho/qc nondim['R2'] = param['R2']*rc**4/rho/qc nondim['CT'] = param['CT']*rho*qc**2/rc**7 return nondim
d12200d10b4ee25bf3bfdec7f461e00e431cd7e0
695,457
def qtr_offset(qtr_string, delta=-1): """ Takes in quarter string (2005Q1) and outputs quarter string offset by ``delta`` quarters. """ old_y, old_q = map(int, qtr_string.split('Q')) old_q -= 1 new_q = (old_q + delta) % 4 + 1 if new_q == 0: new_q = 4 new_y = old_y + (old_q + delta)//4 return '{:.0f}Q{:d}'.format(new_y, new_q)
e14465ad5ab600a809592a4a3a8d2aa624035515
695,458
from zlib import decompress from base64 import b64decode def decompress_string(string: str) -> str: """ Decompress a UTF-8 string compressed by compress_string :param string: base64-encoded string to be decompressed :return: original string """ # b64 string -> b64 byte array -> compressed byte array b64_bytes = b64decode(string.encode('utf-8')) # compressed byte array -> byte array -> original string string_bytes = decompress(b64_bytes) string_decompressed = string_bytes.decode('utf-8') return string_decompressed
e9c8cfd4f226e4bae5d00e32428c8c028b03797c
695,459
def create_dense_state_space_columns(optim_paras): """Create internal column names for the dense state space.""" columns = list(optim_paras["observables"]) if optim_paras["n_types"] >= 2: columns += ["type"] return columns
979a17c32dbe9a31e52b2dfb16d9771bd7a4746b
695,460
def calculate_overall_score( google_gaps_percentage: float, transcript_gaps_percentage: float, google_confidence: float, alignment_score: float, weight_google_gaps: float, weight_transcript_gaps: float, weight_google_confidence: float, weight_alignment_score: float ) -> float: """ Calculates a score to predict if an alignment is "good" or not. :param google_gaps_percentage: Percentage of gaps added to google's STT output :param transcript_gaps_percentage: Percentage of gaps added to the transcript :param google_confidence: Confidence of google's STT :param alignment_score: Final score of the alignment algorithm :param weight_google_gaps: Weight for weighted sum :param weight_transcript_gaps: Weight for weighted sum :param weight_google_confidence: Weight for weighted sum :param weight_alignment_score: Weight for weighted sum :return: Score between 0 and 1 """ return ( (weight_google_gaps * google_gaps_percentage) + (weight_transcript_gaps * transcript_gaps_percentage) + (weight_google_confidence * google_confidence) + (weight_alignment_score * alignment_score) )
ea7d92c694dd477b9cd801eb8c91f969a8d757e1
695,461
def toIndex(coord, size): """ Coordinate to index """ return coord[0] * size * size + coord[1] * size + coord[2]
82ff848d4c728129d0ad6691d3d960dff12ef388
695,462
def scal(u, v): """Retourne le produit scalaire u.v """ return sum([u[i]*v[i] for i in range(len(u))])
f35c7ab378e809db8e7f13432a62da3310ad7611
695,463
def recursive_thue_morse(n): """The recursive definition of the Thue-Morse sequence. The first few terms of the Thue-Morse sequence are: 0 1 1 0 1 0 0 1 1 0 0 1 0 1 1 0 . . .""" if n == 0: return 0 if n % 2 == 0: return recursive_thue_morse(n / 2) if n % 2 == 1: return 1 - recursive_thue_morse((n - 1) / 2)
8fba270d9a62bf3ea4a2b320a19876228255bd0f
695,464
from typing import Dict def add_information(statistic_dict: Dict, statistic_information: Dict): """ Add information to existing dict. Arguments: statistic_dict {Dict} -- Existing dict. statistic_information {Dict} -- Data to add. """ statistic_dict["dates"].append(statistic_information["datetime"]) total = statistic_information["people_total"] people_with_mask = statistic_information["people_with_mask"] people_without_mask = statistic_information["people_without_mask"] statistic_dict["people_total"].append(total) statistic_dict["people_with_mask"].append(people_with_mask) statistic_dict["people_without_mask"].append(people_without_mask) mask_percentage = ( statistic_information["people_with_mask"] * 100 / total if total != 0 else 0 ) statistic_dict["mask_percentage"].append(mask_percentage) statistic_dict["visible_people"].append( people_with_mask + people_without_mask ) return statistic_dict
3b45591d4fe96a5cd93552b322869495a007fe86
695,465
def flip_edges(adj, edges): """ Flip the edges in the graph (A_ij=1 becomes A_ij=0, and A_ij=0 becomes A_ij=1). Parameters ---------- adj : sp.spmatrix, shape [n, n] Sparse adjacency matrix. edges : np.ndarray, shape [?, 2] Edges to flip. Returns ------- adj_flipped : sp.spmatrix, shape [n, n] Sparse adjacency matrix with flipped edges. """ adj_flipped = adj.copy().tolil() if len(edges) > 0: adj_flipped[edges[:, 0], edges[:, 1]] = 1 - adj[edges[:, 0], edges[:, 1]] return adj_flipped
0e163616ddb645e636424d673500f07aedabf336
695,466
import socket def _check_ip_and_port(ip_address, port, timeout): """Helper function to check if a port is open. Args: ip_address(str): The IP address to be checked. port(int): The port to be checked. timeout(float): The timeout to use. Returns: bool: True if a connection can be made. """ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as socket_: socket_.settimeout(timeout) return not bool(socket_.connect_ex((ip_address, port)))
f97ceaba05c54c4bb70e8731462469e4d89e1bbf
695,467
def pretty_eqdmr(eqdmr): """Returns an executable QDMR program in a compositional manner Parameters ---------- eqdmr : str string representation of the executable QDMR Returns ------- bool return True """ tab_count = 0 pretty_represenation = '' for i in range(len(eqdmr)): if eqdmr[i] == '(': tab_count += 1 pretty_represenation += '(\n' pretty_represenation += '\t' * tab_count elif eqdmr[i] == ',': pretty_represenation += ',\n' pretty_represenation += '\t' * tab_count elif eqdmr[i] == ')': tab_count -= 1 pretty_represenation += '\n' pretty_represenation += '\t' * tab_count pretty_represenation += ')' else: pretty_represenation += eqdmr[i] return pretty_represenation
d2d53d49a94f94e718a0aecc1af3ce320a3e95f4
695,468
def _add_thumb(s): """ Modifies a string (filename, URL) containing an image filename, to insert '.thumb' """ parts = s.split(".") parts.insert(-1, "thumb") if parts[-1].lower() not in ['jpeg', 'jpg']: parts[-1] = 'jpg' return ".".join(parts)
9fe7ba9d1e739828471e07091008cb8de47ea312
695,469
from typing import Any import asyncio import inspect def is_coroutine(obj: Any) -> bool: """Check to see if an object is really an asyncio coroutine. :param Any obj: any object. :return: `True` or `False`. """ return asyncio.iscoroutinefunction(obj) or inspect.isgeneratorfunction(obj)
595582f9bd8fae930532cea4a0aa7e3b05e010be
695,470
import csv def recuperer_donnee_csv(fichier, separateur=','): """ cree une liste de liste contenant les donnees de fichier Parameters ---------- fichier : string chemin du fichier csv a lire ce fichier ne doit contenir que des float. separateur : string, optional string contenant le separateur utiliser dans fichier. The default is ','. Returns ------- data : np.array array de dimension 2. """ with open(fichier, newline='', encoding='utf-8') as dataFile: data=[] dataReader = csv.reader(dataFile, delimiter=separateur) for ligne in dataReader: data.append(ligne) return data
a16ebe71654688d8963dfe0930a035b8953a6aef
695,471
import os def get_connection_file(): """ --------------------------------------------------------------------------- Gets (and creates, if necessary) the auto-connection file. If the environment variable 'VERTICAPY_CONNECTIONS' is set, it is assumed to be the full path to the auto-connection file. Otherwise, we reference "connections.verticapy" in the hidden ".verticapy" folder in the user's home directory. Returns ------- string the full path to the auto-connection file. """ if 'VERTICAPY_CONNECTIONS' in os.environ: return os.environ['VERTICAPY_CONNECTIONS'] # path = os.path.join(os.path.dirname(verticapy.__file__), "connections.verticapy") path = os.path.join(os.path.expanduser('~'), '.vertica') os.makedirs(path, 0o700, exist_ok = True) path = os.path.join(path, 'connections.verticapy') return path
80a5d9adeb5a9b671f57377897cd35685589375d
695,472
import torch def compute_hessian(f, params): """Compute hessian matrix of given function.""" h = [] for i in params: h_i = [] for j in params: grad = torch.autograd.grad(f, j, create_graph=True) h_ij = torch.autograd.grad(grad, i, allow_unused=True, retain_graph=True) h_ij = (torch.tensor(0.), ) if h_ij[0] is None else h_ij h_i.append(h_ij[0]) h_i = torch.stack(h_i) h.append(h_i) h = torch.stack(h) h = h.reshape((len(params), len(params))) return h
5dba4f34846f25b268e9156902abdee732c8a7b0
695,473
import os def makedoedict(str1): """makedoedict""" blocklist = str1.split('..') blocklist = blocklist[:-1]#remove empty item after last '..' blockdict = {} belongsdict = {} for num in range(0, len(blocklist)): blocklist[num] = blocklist[num].strip() linelist = blocklist[num].split(os.linesep) aline = linelist[0] alinelist = aline.split('=') name = alinelist[0].strip() aline = linelist[1] alinelist = aline.split('=') belongs = alinelist[-1].strip() theblock = blocklist[num] + os.linesep + '..' + os.linesep + os.linesep #put the '..' back in the block blockdict[name] = theblock belongsdict[name] = belongs return [blockdict, belongsdict]
fca4884f36181072e173ed2941fbf2a2b5a48ec2
695,474
import re def format_docstring(docstring): """Formats a docstring. Any leading # will be converted to #### for formatting consistency. # args - docstring -str: the dosctring to be formatted. """ if isinstance(docstring, str): result = re.sub(r'^#+ ?', '#### ', docstring, flags=re.MULTILINE) return result return None
62a6c9d470be454b4c9dc2e386cd9be81450df4e
695,475
def get_id_name(module, id_to_check): """Return the ID name if defined, otherwise return the numbered ID.""" for name in module.symbol_name_to_id: if module.symbol_name_to_id[name] == id_to_check: return name return str(id_to_check)
5647b32d86bee44d302e551ffc0e8c290254ea10
695,476
def rect_radius(ellipsoid): """ Computes the Rectifying Radius of an Ellipsoid with specified Inverse Flattening (See Ref 2 Equation 3) :param ellipsoid: Ellipsoid Object :return: Ellipsoid Rectifying Radius """ nval = (1 / float(ellipsoid.inversef)) / (2 - (1 / float(ellipsoid.inversef))) nval2 = nval ** 2 return (ellipsoid.semimaj / (1 + nval) * ((nval2 * (nval2 * (nval2 * (25 * nval2 + 64) + 256) + 4096) + 16384) / 16384.))
525e6428de9a34f5f1f10bdb9bc1b0943f435e70
695,477
import os def normalizePath(path): """Normalize the path given. All slashes will be made the same (and doubles removed) The real case as stored on the file system is recovered. Returns None on error. """ # normalize path = os.path.abspath(path) # make sure it is defined from the drive up path = os.path.normpath(path) # If does not exist, return as is. # This also happens if the path's case is incorrect and the # file system is case sensitive. That's ok, because the stuff we # do below is intended to get the path right on case insensitive # file systems. if not os.path.isfile(path): return path # split drive name from the rest drive, rest = os.path.splitdrive(path) fullpath = drive.upper() + os.sep # make lowercase and split in parts parts = rest.lower().split(os.sep) parts = [part for part in parts if part] for part in parts: options = [x for x in os.listdir(fullpath) if x.lower() == part] if len(options) > 1: print("Error normalizing path: Ambiguous path names!") return path elif not options: print("Invalid path (part %s) in %s" % (part, fullpath)) return path fullpath = os.path.join(fullpath, options[0]) # remove last sep return fullpath
633e5518f8716443770e7570c495f7ba4a541a01
695,478
def endgame_score_connectfour_faster(board, is_current_player_maximizer) : """Given an endgame board, returns an endgame score with abs(score) >= 1000, returning larger absolute scores for winning sooner.""" chains_1 = board.get_all_chains(current_player=is_current_player_maximizer) chains_2 = board.get_all_chains(current_player= not(is_current_player_maximizer)) for chain in chains_1: if len(chain) >= 4: return 1100 - board.count_pieces() for chain in chains_2: if len(chain) >= 4: return -1100 + board.count_pieces() return 0
24f22b4d76edcdae5918c4327855aa432f404a68
695,479
def ip_as_int(ip_address): """convert dot notation to int. """ parts = [int(x) for x in ip_address.split(".")] return (parts[0] << 24) + (parts[1] << 16) + (parts[2] << 8) + parts[3]
fd2c83b3b279bbdce240cbb11acf7334c8e2d553
695,480
import types def dump_packages(): """ Function that will return a list of packages that have been loaded and their version numbers. This function will ignore system packages: sys, __builtins__, types, os as well as modules with no version. This is not working the way I want it to... returns: ------------- mod_dict - dict with keys corresponding to module name, and values the version number. """ mod_dict = dict() sys_packages = ["sys", "__builtins__", "types", "os"] for name, module in globals().items(): if isinstance(module, types.ModuleType): if module.__name__ not in sys_packages: try: mod_name = module.__name__ mod_ver = module.__version__ mod_dict[mod_name] = mod_ver except AttributeError: pass return mod_dict
f6f4cf9c86962f68c998f2e3b127326837c12ac0
695,481
def padding_size(length: int, block_size: int, ceil: bool = True) -> int: """Return minimum the multiple which is large or equal than the `length` Args: block_size (int): the length of bytes, no the length of bit """ remainder = length % block_size if ceil: return (block_size - remainder) * int(remainder != 0) + length else: return length - remainder
3f87de905962e70f2514617bd10472a4087ad9f3
695,482
from typing import Any def _is_simplekv_key_value_store(obj: Any) -> bool: """ Check whether ``obj`` is the ``simplekv.KeyValueStore``-like class. simplekv uses duck-typing, e.g. for decorators. Therefore, avoid `isinstance(store, KeyValueStore)`, as it would be unreliable. Instead, only roughly verify that `store` looks like a KeyValueStore. """ return hasattr(obj, "iter_prefixes")
1529b4531aabc50163cd00ebbb6b4aef1f99e5bd
695,483
import typing def list_difference(list_a, list_b) -> typing.List: """ Function that returns difference between given two lists. """ # Returning. return [element for element in list_a if element not in list_b]
0746a34c4f24ae2f46d5c176568e733d54ba7dea
695,484
def check_ranges(cpe_item, version): """ According to the specification, CPE uses also ranges with the keywords 'version[Start|End][Including|Excluding]'. This way it specifies ranges of versions which are affected by the CVE, for example versions from 4.0.0 to 4.5.0. :param cpe_item: cpe data :param version: version to be checked :return: True if the version is in the specified range """ if "versionStartIncluding" in cpe_item and \ version < cpe_item['versionStartIncluding']: return False if "versionStartExcluding" in cpe_item and \ version <= cpe_item['versionStartExcluding']: return False if "versionEndIncluding" in cpe_item and \ version > cpe_item["versionEndIncluding"]: return False if "versionEndExcluding" in cpe_item and \ version >= cpe_item["versionEndExcluding"]: return False return True
614148e671b7d6c526badf02f784bd7669b37ec0
695,486
def CStringIo_to_String(string_io_object): """Converts a StringIO.StringIO object to a string. Inverse of String_to_CStringIo""" return string_io_object.getvalue()
21f2b027f1eb43063bc24df25db2c2098d894d46
695,488
def is_valid_response(resp): """ Validates a Discovery response Fail if a failure response was received """ return resp.get("result") != "failure"
e008cc34eb43906bc67f2ad645625777241b76e2
695,489
import struct def _pack(pv, chksum, box_data, party_data): """Pack Pokémon data into a PKM file. Keyword arguments: pv (int) -- the Pokémon's personality value chksum (int) -- the data's checksum data (string) -- the Pokémon data """ chunks = [ struct.pack('<L', pv), '\x00\x00', struct.pack('<H', chksum), box_data, party_data, ] return ''.join(chunks)
ebae4778b791ed99abb1dbb39f1fcfd6bec9ef33
695,490
def is_valid_provider(user_input: str, static_provider: str) -> bool: """ Validate a user's provider input irrespectve of case """ try: return user_input.lower() == static_provider.lower() except AttributeError: return False
7dad54bf8b2ba3a705f54915b094e3633a72ce2f
695,491
def print_diff(a1,a2): """ Print not similar elements """ diff = [] for i in a1: if a2.count(i) == 0: diff.append(i) for i in a2: if a1.count(i) == 0: if diff.count(i) == 0: diff.append(i) return diff
519ab38a193cf53fe022e8cba1627252abe6dc81
695,492
import sys import inspect def fident(f): """ Get an identifier for a function or method """ joinchar = "." if hasattr(f, "im_class"): fparent = f.im_class.__name__ else: joinchar = ":" fparent = f.__module__.split(".")[-1] # sometimes inspect.getsourcelines() segfaults on windows if getattr(sys, "frozen", False) or sys.platform == "win32": lineno = 0 else: lineno = inspect.getsourcelines(f)[1] fullname = joinchar.join((fparent, f.__name__)) return ":".join((fullname, str(lineno)))
448d5257996a6a14fb047aacd3392f194b89301b
695,495
import ntpath import os def split_path(filepath): """Splits a file path into folder path, file name and extension""" head, tail = ntpath.split(filepath) filename, extension = os.path.splitext(tail) return head, filename, extension
5622cec9f3223f5d41c5e9e12c05f07169ab7db2
695,496
def rateTSCan(params: dict, states: dict) -> float: """ Temperature Sum [oC d] TSCan is set from negative in the vegetative phase and to 0oC d at the start of the generative phase (i.e. the first fruit set). When TSCan exceeds 0 oC d, the carbohydrate distribution to the fruits increases linearly from zero till its full potential is reached at the temperature sum TSSumEnd . At values higher than TSSumEnd, the carbohydrate distribution to the fruits remains at its potential value. Parameters ---------- params: dictionary tau: float Time constant states: dictionary TCan: float TCan is the simulated or measured canopy temperature. [oC] Returns ------- tsCan_: float Development rate of the plant, expressed as the time derivative of the temperature sum [oC]. """ tsCan_ = (1/params['tau'])*states["TCan"] return tsCan_
7ab4c8242c26a846c719a304ead1cb7748513c0b
695,497
import requests def get_from_api(url, *, verbose=False): """ Performs GET request to URL with the ProPublica API Key header """ vprint = lambda *a, **kwa: print(*a, **kwa) if verbose else None with open("APIKey.txt", "r") as keyFile: apiKey=keyFile.readline() if apiKey[-1] == '\n': apiKey = apiKey[:-1] headers = {'X-API-Key': apiKey} vprint("getting", url, "with headers", headers, "...") r = requests.get(url, headers=headers) vprint("...done") return r
b0ac6365008e145d08376087b97d20a9df0f7d65
695,498
import json def json_querystring(query_string, sort=None): """Generate the JSON data for a query_string query.""" query_dict = {'query': {'query_string': query_string}} if sort: query_dict['sort'] = sort return json.dumps(query_dict)
11c8119009994644e8b1e14d8282b7e3b1b04dd7
695,499
def get_total_timings(results, env, overhead_time): """ Sum all timings and put their totals in the env """ total_framework_time = 0 total_strategy_time = 0 total_compile_time = 0 total_verification_time = 0 total_kernel_time = 0 if results: for result in results: if 'framework_time' not in result or 'strategy_time' not in result or 'compile_time' not in result or 'verification_time' not in result: #warnings.warn("No detailed timings in results") return env total_framework_time += result['framework_time'] total_strategy_time += result['strategy_time'] total_compile_time += result['compile_time'] total_verification_time += result['verification_time'] total_kernel_time += sum(result['times']) if 'times' in result.keys() else 0 # add the seperate times to the environment dict env['total_framework_time'] = total_framework_time env['total_strategy_time'] = total_strategy_time env['total_compile_time'] = total_compile_time env['total_verification_time'] = total_verification_time env['total_kernel_time'] = total_kernel_time env['overhead_time'] = overhead_time - (total_framework_time + total_strategy_time + total_compile_time + total_verification_time + total_kernel_time) return env
b6d7fe606ca3241f3030cf306e585c806f5c5d5b
695,500
import re def _get_vfab_pprofile_index(vfab_id, pprofile, mac_address, running_config): """Gets the index for vfab pprofile.""" index = None match = re.search( r"^vfab\s+{vfab_id}\s+pprofile\s+(\d+)\s+" r"vsiid\s+mac\s+{mac}\s+{pid}\b".format( vfab_id=vfab_id, mac=mac_address, pid=pprofile), running_config, re.MULTILINE) if match: index = match.group(1) return index
45e8a03be840b0738f589aefefbd0ac4383f17c4
695,501
import re def check_no_splcharacter(test): """Check the string ``test`` have no special character listed below. :param test: :return: """ # noinspection RegExpRedundantEscape string_check = re.compile('[@!#$%^&*()<>?/\|}{~:]') if string_check.search(test) is None: return True else: return False
65eb3222d3bbbcc5fc876c2aecc47dddf1a12223
695,502
def _get_nc_attr(var, attr, default=""): """ _get_nc_attr This function, looks inside a netCDF 4 Dataset object for the attributes of a variable within the object. If the attribute specified for the variable exists then the attribute is returned. If the attribute is not associated with the variable then an empty string is returned as default :param var: variable name [string] :param attr: attribute name [string] :param default: Default value to be returned [string] :return: variable attribute (default is empty) [string] """ if attr in var.ncattrs(): return getattr(var, attr) else: return default
553c44a41a4bd1abf0dea721eff2bc6c19496a73
695,503
def shift_origin_dict(dictionary1, origin_list): """ New version of "new_origin" used for dictionaries. Shifts all coordinates of the elements in the model by the number given in origin_list. Returns the model with all items shifted according to new origin. """ new_dict = {} for (x_co, y_co, z_co) in dictionary1.keys(): new_dict[x_co+origin_list[0], y_co+origin_list[1], z_co+origin_list[2] ] = dictionary1[(x_co, y_co, z_co)] return new_dict
bbc145ab8c942c958b8a7b0e8312920b8ec30972
695,504
def trivial(array: list, lenght: int) -> int: """Get the maximum minimum using a trivial solution""" maximum = min(array[:lenght]) for i in range(len(array) - lenght + 1): minimum = min(array[i : lenght + i]) maximum = max(maximum, minimum) return maximum
f3f43954e8d02db24e08f7a78b1599666882c28b
695,505
import argparse def parseargs(): """"parse the arguments""" parser = argparse.ArgumentParser(description='Decodes the content of a pcap file', epilog="Usually used to extract the file data from the a network trace (pcap) file. Remember last option wins.") parser.add_argument('-i', '--in', dest='input_file', required=True, help='The pcap file to read') parser.add_argument('-o', '--out', dest='output_file', required=False, help='The output file to save.') data_action = parser.add_mutually_exclusive_group() data_action.add_argument('--hex', default=False, action='store_true', dest='hex_mode', help='Output Hex Dump. Overrides --data') data_action.add_argument('--data', action='store_false', dest='hex_mode', help='Output data. This is probably what you want. Overrides --hex') return parser.parse_args()
d64a6e4ccfde75c9ccc37602bfee452d55e2678c
695,507
def chr_or_input(m): """Return ascii character for the ordinal or the original string.""" i = int(m.groups()[0]) if i > 31 and i < 128: return chr(i) else: return "__%d__" % i
d8cc94fe33e65a0d698f1828650406312530c813
695,508
import requests import json def yelp_search(api_key, params): """ Makes an authenticated request to the Yelp API api_key: read text file containing API key parameters: term: keywords to search (tacos, etc.) location: location keywords (Seattle, etc.) Returns JSON """ search_url = "https://api.yelp.com/v3/businesses/search" headers = {"Authorization": "Bearer %s" % api_key} response = requests.get(search_url, params=params, headers=headers) data = json.loads(response.text) return data
cc4e79c72822f805c49f9be021297985cef396c3
695,509
def get_valid_image_ranges(experiments): """Extract valid image ranges from experiments, returning None if no scan""" valid_images_ranges = [] for exp in experiments: if exp.scan: valid_images_ranges.append(exp.scan.get_valid_image_ranges(exp.identifier)) else: valid_images_ranges.append(None) return valid_images_ranges
ddfcb091fabb0c70f7a6a3fce8b43396574a797c
695,510
def isc_250m_to_1km ( i_sc_250m ) : """ return the 1km grid index cross track of a 250m pixel """ return i_sc_250m / 4.
a183dbf59bad703e70ce9c7e09c2bac7a6417fc2
695,511
import re def extract_fasta_id(seqIdString): """ Extract the id in the fasta description string, taking only the characters till the '|'. """ seqId = None regex1 = re.compile(r'^\s*?([^|]+?)(\s|\||$)') seqIdSearch = re.search(regex1, seqIdString) if seqIdSearch: seqId = seqIdSearch.group(1) return seqId
fa3cc6a57fc2ded2f1ee62a80254076eada1df63
695,512
def compile_vprint_function(verbose): """Compile a verbose print function Args: verbose (bool): is verbose or not Returns: [msg, *args]->None: a vprint function """ if verbose: return lambda msg, *args: print(msg.format(*args)) return lambda *_: None
d2fe3b93b09011f63df54eb162e270303c328cb9
695,513
from typing import Callable def custom_splitter(separator: str) -> Callable: """Custom splitter for :py:meth:`flatten_dict.flatten_dict.unflatten()` accepting a separator.""" def _inner_custom_splitter(flat_key) -> tuple[str, ...]: keys = tuple(flat_key.split(separator)) return keys return _inner_custom_splitter
88417ecc172986e4cb8423f443da470e917406d2
695,515
import pathlib def explore_directories(path_dict: dict): """ Recursively explores any path binded to a key in `path_dict` eg: | root |deadbeef |file1.txt |colosseum | battle.rst |face |file2.jpg >>>explore_directories(path_dict ={ "hello":"there", "deadbeef" : path_to_deadbeef }) { "hello":"there", "deadbeef":{ "file1" : path_to_file1.txt "colosseum":{ "battle":path_to_battle.rst } } "face":{ "file2":path_to_file2.txt } } """ for key, val in path_dict.items(): t_path = pathlib.Path(val) if t_path.is_dir(): temp_path_dict = {} for _path in pathlib.Path(val).glob("*"): t_path = pathlib.Path(_path) temp_path_dict[t_path.stem] = _path explore_directories(temp_path_dict) path_dict[key] = temp_path_dict return path_dict
65abfdcdcae509785097a8f15c2a775249481c8d
695,516
def get_required_capacity_types_from_database(conn, scenario_id): """ Get the required type modules based on the database inputs for the specified scenario_id. Required modules are the unique set of generator capacity types in the scenario's portfolio. :param conn: database connection :param scenario_id: int, user-specified scenario ID :return: List of the required type modules """ c = conn.cursor() project_portfolio_scenario_id = c.execute( """SELECT project_portfolio_scenario_id FROM scenarios WHERE scenario_id = {}""".format(scenario_id) ).fetchone()[0] required_capacity_type_modules = [ p[0] for p in c.execute( """SELECT DISTINCT capacity_type FROM inputs_project_portfolios WHERE project_portfolio_scenario_id = ?""", (project_portfolio_scenario_id, ) ).fetchall() ] return required_capacity_type_modules
bdd4f101465c55b712eb3f54797bbf213ed50b80
695,517
def radius_sonic_point(planet_mass, sound_speed_0): """ Radius of the sonic point, i.e., where the wind speed matches the speed of sound. Parameters ---------- planet_mass (``float``): Planetary mass in unit of Jupiter mass. sound_speed (``float``): Constant speed of sound in unit of km / s. Returns ------- radius_sonic_point (``float``): Radius of the sonic point in unit of Jupiter radius. """ grav = 1772.0378503888546 # Gravitational constant in unit of # jupiterRad * km ** 2 / s ** 2 / jupiterMass return grav * planet_mass / 2 / sound_speed_0 ** 2
15965142ad7bd9ef3bdab1a215c0ed906b1e9102
695,518
def sampling_points_matsubara(b, whichl): """Computes "optimal" sampling points in Matsubara domain for given basis""" return b.sampling_points_matsubara(whichl)
6bd572e0502f44f28d9143ea1c39f858a47b6785
695,519
def _process_type(type_): """Process the SQLAlchemy Column Type ``type_``. Calls :meth:`sqlalchemy.sql.type_api.TypeEngine.compile` on ``type_`` to produce a string-compiled form of it. "string-compiled" meaning as it would be used for a SQL clause. """ return type_.compile()
ac9cc08faf958ad226da1bf08381b4bedd400e49
695,520
import pathlib import typing def _get_file_format_id(path: pathlib.Path, file_format: typing.Optional[str]) -> str: """Determine the file format for writing based on the arguments.""" formats = { "yaml": path.name.endswith((".yml", ".yaml")), "toml": path.name.endswith(".toml"), "json": path.name.endswith(".json"), } finder = (k for k, v in formats.items() if file_format == k or v) return next(finder, "json")
d2fc516ba1a1fae1c7d91e6bac351ca7bead5f04
695,521
from typing import Union from pathlib import Path def find_path_of_common_folder(files) -> Union[None, Path]: """finds a common path in a list of files""" fparts = [Path(f).resolve().parts for f in files] maxlen = len(max(fparts, key=lambda x: len(x))) minlen = len(min(fparts, key=lambda x: len(x))) sets_per_part = [(i, list(set([fp[i] for fp in fparts]))) for i in range(0, minlen)] sets_len_is_1 = [i[1][0] for i in sets_per_part if len(i[1]) == 1] # [i for i in sets_len1 if i != '/'] if not sets_len_is_1: return None if sets_len_is_1[0] == Path.home().root: # == '/' : # common_path = Path(Path.home().root).joinpath(Path("/".join(map(str,sets_len_is_1[1::] )))) common_path = Path(sets_len_is_1[0]) for i in sets_len_is_1[1::]: common_path = common_path.joinpath(i) return common_path
89e8cff1b14597d6e58ab7c7f0452c3b18f5c428
695,522
def async_partial(coro, *added_args, **added_kwargs): """ Like functools.partial(), but for coroutines. """ async def wrapped(*args, **kwargs): return await coro(*added_args, *args, **added_kwargs, **kwargs) return wrapped
4138d0a253e8713ecff9b96345794a90116ee7bb
695,523
def read_paralogs_log(paralog_log_file): """Get list of paralogous gene-chains.""" f = open(paralog_log_file) gene_chains = [x for x in f.read().split("\n") if x != ""] f.close() return set(gene_chains)
c863f1b680854e8130fcad9e58b0b160cd39f735
695,524
def remove_common_elements(package_list, remove_set): """ Remove the common elements between package_list and remove_set. Note that this is *not* an XOR operation: packages that do not exist in remove_set (but exists in remove_set) are not included. Parameters ---------- package_list : list List with string elements representing the packages from the requirements file. Assumes that the list has "==" to denote package versions. remove_set : set Set with the names of packages to be removed from requirements. Returns ------- list List of packages not presented in remove_set. """ package_not_in_remove_set = [] for package in package_list: package_name = package.split("==")[0].strip() if package_name not in remove_set: package_not_in_remove_set.append(package) return package_not_in_remove_set
99e6fc3d7273de551d9fc8e4f8dd5b1628b93dda
695,525