content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import math def mitzenmacher_theorem(alpha, fpr, fnr, b, zeta, m): """ alpha - usually predetermined (~0.6185), the rate at which fpr falls in a generic bloom filter is alpha^b where b is bits per item fpr - lbf false positive rate (emp.) fnr - lbf false negative rate (emp.) b - bits per item zeta - size of learned classifier m - number of keys in the in set () returns - scalar value, if it is greater than or equal to 0, the LBF can potentiallly save space! """ lhs = zeta / float(m) inner_term = fpr + (1 - fpr)*alpha**(b/ fnr) rhs = math.log(inner_term, alpha) -b return rhs - lhs
ca171bf1c7b7bad03f19d66a98ad86769d59f6d4
692,719
def get_module_name(name: str) -> str: """Get the cog name from the module.""" return name.split(".")[-1]
dda70b4c2a6f1aa2666c0f1df01c6aa6aabde847
692,720
def assign_staging_jobs_for_missing_clusters( support_and_staging_matrix_jobs, prod_hub_matrix_jobs ): """Ensure that for each cluster listed in prod_hub_matrix_jobs, there is an associated job in support_and_staging_matrix_jobs. This is our last-hope catch-all to ensure there are no prod hub jobs trying to run without an associated support/staging job. Args: support_and_staging_matrix_jobs (list[dict]): A list of dictionaries representing jobs to upgrade the support chart and staging hub on clusters that require it. prod_hub_matrix_jobs (list[dict]): A list of dictionaries representing jobs to upgrade production hubs that require it. Returns: support_and_staging_matrix_jobs (list[dict]): Updated to ensure any clusters missing present in prod_hub_matrix_jobs but missing from support_and_staging_matrix_jobs now have an associated support/staging job. """ prod_hub_clusters = {job["cluster_name"] for job in prod_hub_matrix_jobs} support_staging_clusters = { job["cluster_name"] for job in support_and_staging_matrix_jobs } missing_clusters = prod_hub_clusters.difference(support_staging_clusters) if missing_clusters: # Generate support/staging jobs for clusters that don't have them but do have # prod hub jobs. We assume they are missing because neither the support chart # nor staging hub needed an upgrade. We set upgrade_support to False. However, # if prod hubs need upgrading, then we should upgrade staging so set that to # True. for missing_cluster in missing_clusters: provider = next( ( hub["provider"] for hub in prod_hub_matrix_jobs if hub["cluster_name"] == missing_cluster ), None, ) prod_hubs = [ hub["hub_name"] for hub in prod_hub_matrix_jobs if hub["cluster_name"] == missing_cluster ] new_job = { "cluster_name": missing_cluster, "provider": provider, "upgrade_support": False, "reason_for_support_redeploy": "", "upgrade_staging": True, "reason_for_staging_redeploy": ( "Following prod hubs require redeploy: " + ", ".join(prod_hubs) ), } support_and_staging_matrix_jobs.append(new_job) return support_and_staging_matrix_jobs
3e8809272046dde50cf148dd6dd9ada3e63df9d8
692,721
def winner_loser(D): """Returns 1 for winner and 0 for loser, .5 for ties""" a = D["scores"][0]["value"] b = D["scores"][1]["value"] if a > b: return 1,0 if b > a: return 0,1
effb0c279f7cd83b7414681e1032531b16529b18
692,722
from typing import List from typing import Counter def delete_and_earn(numbers: List[int]) -> int: """https://leetcode.com/problems/delete-and-earn/""" counter = Counter(numbers) using, avoid = 0, 0 previous_value = None for value in sorted(counter): if value - 1 == previous_value: avoid, using = max(avoid, using), value * counter[value] + avoid else: avoid, using = max(avoid, using), value * \ counter[value] + max(avoid, using) previous_value = value return max(avoid, using)
eca3931fb58b6fbe39a4180dd51aa8cbc0627676
692,724
def _getIfromRGB(rgb): """Retrieve if from a specific layer color. Parameters ---------- rgb : Returns ------- """ red = rgb[2] green = rgb[1] blue = rgb[0] RGBint = (red << 16) + (green << 8) + blue return RGBint
6ad071b7f5022efe412a2b121fa295c972d87855
692,725
def _search(text, font, deleting): """ >>> from fontTools.agl import AGL2UV >>> from defcon import Font >>> font = Font() >>> for glyphName, value in AGL2UV.items(): ... _ = font.newGlyph(glyphName) ... font[glyphName].unicodes = [value] ... _ = font.newGlyph(glyphName + ".alt") ... _ = font.newGlyph(glyphName + ".xxx") >>> _search("e", font, False) ('e', 'e') >>> _search("eg", font, False) ('eg', 'egrave') >>> _search("e.", font, False) ('e.', 'e.alt') >>> _search("eight.al", font, True) ('eight.al', 'eight') """ # no text if not text: return text, None glyphNames = font.keys() match = None # direct match if text in glyphNames: match = text # character entry elif len(text) == 1: uniValue = ord(text) match = font.unicodeData.glyphNameForUnicode(uniValue) if match is not None: text = "" # fallback. find closest match if match is None: glyphNames = list(sorted(glyphNames)) if not deleting: for glyphName in glyphNames: if glyphName.startswith(text): match = glyphName break else: matches = [] for glyphName in glyphNames: if text.startswith(glyphName): matches.append(glyphName) elif match is not None: break diff = None for m in matches: d = len(m) - len(text) if diff is None or d < diff: match = m return text, match
9acf3a5c309f4c680facb72e06946f866f933866
692,726
def _number_power(n, c=0): """ This is an odd one, for sure. Given a number, we return a tuple that tells us the numerical format to use. It makes a little more sense if you think of the second number in the tuple as a pointer into the list: ('', 'hundred', 'thousand', 'million', ...) ...so, given the input, '100', we return (1, 1), which can be read as (1, 'hundred') This could easily have been a lookup table of reasonable size, but what's the fun of that when we can use recursion? >>> _number_power('1') (1, 0) >>> _number_power('10') (10, 0) >>> _number_power('23') (10, 0) >>> _number_power('100') (1, 1) >>> _number_power('200') (1, 1) >>> _number_power('1000') (1, 2) >>> _number_power('1234') (1, 2) >>> _number_power('10000') (10, 2) >>> _number_power('100000') (100, 2) >>> _number_power('987654') (100, 2) >>> _number_power('1000000') (1, 3) >>> _number_power('10000000') (10, 3) >>> _number_power('100000000') (100, 3) >>> _number_power('1000000000') (1, 4) """ s = str(n) # Regardless of the number passed-in, we only want a leading '1' followed by a string of '0's. bits = ['1'] for ch in s: bits.append('0') s = ''.join(bits[:-1]) n = int(s) l = len(s) if l > 3: num, new_c = _number_power(s[:-3], c + 1) return (num, new_c) elif n == 100: return (100 if c > 0 else 1, c + 1) elif n == 10: return (10, c + 1 if c > 0 else 0) else: return (1, c + 1 if c > 0 else 0)
67911888f928647de52f92e1d468fe89f7d4bdc1
692,727
from datetime import datetime def convert_time(timestamp): """Convert timestamp to datetime.""" return datetime.utcfromtimestamp(timestamp)
38ce9dce7cbebf99838f422b282629bfe7afc10d
692,729
def convert_value(val): """Convert values in operation conditions dictionaries.""" return { 'min_value': float(val['min']) if val['min'] is not None else None, 'max_value': float(val['max']) if val['max'] is not None else None, 'values': [float(x) for x in val['values']], 'units': str(val['units']), }
0142ce6414d0e8c6a05eaa6064a558f8ff6d4cff
692,730
import click def output_warning(text): """Click echo warning.""" return click.style( "WARNING: {}".format(text), fg="bright_yellow", bold=True )
f03eda44854df3ac13569c80d82aafc05070df13
692,731
def score(x_pred, sparse_activity, sparse_dictionary): """ DESCRIPTION ----------- 1. weight_function would cause the score function called to be time dependent. PARAMETERS ---------- RETURNS ------- """ R = sparse_activity @ sparse_dictionary error = (x_pred - R) ** 2 # sqrt(sum([(x_act[i] - x_pred[i]) ** 2 for i in range(x_pred)])) return error
ee45e2ac2d7cf7958c4bf76577626af187a22100
692,732
import os def _make_subtempdir(tmpdir): """ Makes a temp dir UNDERNEATH the current tempdir """ subtempdir = os.path.join(str(tmpdir), 'fixture_tmp') os.mkdir(subtempdir) return subtempdir
45b3654d76ef46a6c78742fed1349701be4db04b
692,733
import networkx as nx def to_nx(bdd, roots): """Convert functions in `roots` to `networkx.MultiDiGraph`. The resulting graph has: - nodes labeled with: - `level`: `int` from 0 to `len(bdd)` - edges labeled with: - `value`: `False` for low/"else", `True` for high/"then" - `complement`: `True` if target node is negated @type bdd: `BDD` @type roots: iterable of edges, each a signed `int` """ g = nx.MultiDiGraph() for root in roots: assert abs(root) in bdd, root Q = {root} while Q: u = Q.pop() u = abs(u) i, v, w = bdd._succ[u] assert u > 0, u g.add_node(u, level=i) # terminal ? if v is None or w is None: assert w is None, w assert v is None, v continue # non-terminal r = (v < 0) v = abs(v) w = abs(w) if v not in g: Q.add(v) if w not in g: Q.add(w) assert v > 0, v assert w > 0, w g.add_edge(u, v, value=False, complement=r) g.add_edge(u, w, value=True, complement=False) return g
4aa17c7ee6fa15a42ef27a7d80c574eb707f8f0e
692,735
import re def parse_dsymutil(data, wanted_symbol): """Parse the symbol file.""" ret = 0 # get the system map for line in data.splitlines(): ents = line.split() match = re.search("\[.*?\(([^\)]+)\)\s+[0-9A-Fa-z]+\s+\d+\s+([0-9A-Fa-f]+)\s'(\w+)'", line) if match: (sym_type, addr, name) = match.groups() sym_type = sym_type.strip() addr = int(addr, 16) if addr == 0 or name == "": continue if name == wanted_symbol: ret = addr break return ret
5505df4544f6821018ef9d54c4edb776d3e4ed5f
692,738
def _CalculateTau(alignment, order): """Calculate Kendall's Tau.""" src_num = len(order) if src_num <= 1: return 1.0 errors = 0 for i in range(src_num - 1): for j in range(i + 1, src_num): if alignment[order[i]] > alignment[order[j]]: errors += 1 tau = 1.0 - float(errors) / (src_num * (src_num - 1) / 2) return tau
055c9234212625ac768cb40a538c03ea59f4058a
692,739
def input_str(option1=None, option2=None): """Returns a string variable taken from the user, otherwise displays an error message. If option1 and option2 are specified, the function returns one of those two options, otherwise it displays an error message. Parameters: option1: First option to choose of type string. option2: Second option to choose of type string. """ while True: input_data = input() if option1 is None and option2 is None: if ''.join(input_data.split()).isalpha(): break else: print("You put invalid data, please try again") if option1 is not None or option2 is not None: input_data = input_data.upper().strip() if input_data.isalpha() and (input_data == option1 or input_data == option2): break else: print("You put invalid data, please try again") return input_data
4c05036fc47bc8524704cd9773e1cc93afc15770
692,741
def count_trailing_newlines(s): """count number of trailing newlines this includes newlines that are separated by other whitespace """ return s[len(s.rstrip()):].count('\n')
349190d9e53ce6580d037b26f813cd228f92dc98
692,742
def read_pop(filename): """Read Text output""" print(f"Reading in {filename}") inlist = [] fitness = [] with open(filename,'r') as myfile: for line in myfile: if (len(line) > 1): x,fit,pop = eval(line) inlist.append(pop) fitness.append(fit) return inlist, fitness
b2a8c58fce1b858731bda995eb78fa74162fa0c0
692,743
def in_ranges(value, ranges): """Check if a value is in a list of ranges""" return all(low <= value <= high for low, high in ranges)
91a9b8fb3d225438ddcb21f3e9b4d981edeed29c
692,745
def effacer(ch, d=None, f=None): """Efface les caractères à partir de l'indice d (inclus) jusqu'à l'indice f exclus.""" if type(ch) == str: if d is None and f is None: return "" elif d is None: return ch[f:] elif f is None: return ch[:d] if f > d: return ch[:d] + ch[f:] return ch raise TypeError("ch doit être de type str")
8cc0f29b56af469c56241cb73c423d0ed05cd30b
692,746
def get_log(db, job_id): """ Extract the logs as a big string :param db: a :class:`openquake.server.dbapi.Db` instance :param job_id: a job ID """ logs = db('SELECT * FROM log WHERE job_id=?x ORDER BY id', job_id) out = [] for log in logs: time = str(log.timestamp)[:-4] # strip decimals out.append('[%s #%d %s] %s' % (time, job_id, log.level, log.message)) return out
fec660a4464a8005a564bb1ef581061d416d2bf3
692,747
def __part1by1_64(n): """64-bit mask""" n &= 0x00000000ffffffff # binary: 11111111111111111111111111111111, len: 32 n = (n | (n << 16)) & 0x0000FFFF0000FFFF # binary: 1111111111111111000000001111111111111111, len: 40 n = (n | (n << 8)) & 0x00FF00FF00FF00FF # binary: 11111111000000001111111100000000111111110000000011111111, len: 56 n = (n | (n << 4)) & 0x0F0F0F0F0F0F0F0F # binary: 111100001111000011110000111100001111000011110000111100001111, len: 60 n = (n | (n << 2)) & 0x3333333333333333 # binary: 11001100110011001100110011001100110011001100110011001100110011, len: 62 n = (n | (n << 1)) & 0x5555555555555555 # binary: 101010101010101010101010101010101010101010101010101010101010101, len: 63 return n
c7a6cf274a88b0280d257594a00e04b433bd4f7b
692,748
def fill_with_zeroes(df, *cols): """ This functions returns the column names as input and return the dataframe with the null values in those columns replace by 0. """ for col in cols: df[col] = df[col].fillna(0) return df
8ccb6d16298d771c4f9a724586ccc9a1efb2a2c5
692,749
import os import sqlite3 def create_db(close_conn=True): """ Opens a connection to SQLite database. If no database exists, creates a new one. Args: close_conn (bool): Controls whether to close or return a connection. """ conn = None db_path = os.getcwd() + "/movies.db" try: conn = sqlite3.connect(db_path) print("SQLite3 -- {}".format(sqlite3.version)) except sqlite3.Error as e: msg = "@connect_db -- {}".format(e) print(msg) finally: if conn and close_conn == True: conn.close() else: return conn
ee58a61798602317c6db04660d247376a63eb6c5
692,750
def normalize_3d_coordinate(p, padding=0): """ Normalize coordinate to [0, 1] for unit cube experiments. Corresponds to our 3D model Args: p (tensor): point padding (float): conventional padding paramter of ONet for unit cube, so [-0.5, 0.5] -> [-0.55, 0.55] """ raise NotImplemented p_nor = p / (1 + padding + 10e-4) # (-0.5, 0.5) p_nor = p_nor + 0.5 # range (0, 1) # f there are outliers out of the range if p_nor.max() >= 1: p_nor[p_nor >= 1] = 1 - 10e-4 if p_nor.min() < 0: p_nor[p_nor < 0] = 0.0 return p_nor
140f66be576b30b15b3a550012a3a0e7c94a1b63
692,751
def is_admissible(action: int, x: int) -> bool: """ Determine whether the `action` (0 or 1 corresponding to T0 or T1) is feasible\ for x. T0 is feasible for `x` iff `x` is even and T1 iff `x` is odd. """ return x%2 == action
b1d5ddc71a4ab3372a4c509f35c5dc1cd75cc5bf
692,752
def pretty_print_timediff(delta): """ formats timediff in seconds to readable format """ ms = delta - int(delta) h = int(delta) / 3600 t = int(delta) % 3600 m = t / 60 s = t % 60 return (h, m, float(s) + ms)
544d0eaaf08e826cbbcd473d7b1426b9b1380d68
692,753
def format_size(size): """ Auxiliary function to convert bytes to a more readable human format. """ suffixes = ['B', 'KB', 'MB', 'GB'] i = 0 while size >= 1024 and i < len(suffixes) - 1: size = size / 1024 i += 1 return f"{size:.2f} {suffixes[i]}"
90bc93c9170fc89f7ac9e66d43eb28c04366f1c6
692,754
def recipelist(recipebook): """ Returns a list of recipes, for command line output """ out = "" out += "Recipes:" for index, recipe in enumerate(recipebook.recipes): out += "\n{}. {}".format(index, recipe.name) return out
e8b05d57a2219fc79c8d25f9d4d6d18780b79574
692,755
def handleYear(year): """Handle two-digit years with heuristic-ish guessing Assumes 50-99 becomes 1950-1999, and 0-49 becomes 2000-2049 ..might need to rewrite this function in 2050, but that seems like a reasonable limitation """ year = int(year) # No need to guess with 4-digit years if year > 999: return year if year < 50: return 2000 + year else: return 1900 + year
1c2eecb7cad7af16814f68dccd0f0a4b4aedd482
692,756
def _start_of_option(value: str) -> bool: """Check if the value looks like the start of an option.""" return value[0] == "-" if value else False
0430d39de6260d3cc3d9e9725ef1cc4df8d202cb
692,757
def parse_bibfile_to_dict(bib_path): """Read a bibtex file into a dictionary. Returned dictionary has the form: {'alberga2018prediction': ['title={Prediction of ... }'], 'hawkins2007comparison': ['title={Comparison of ... }'],\ ... } """ dct = {} with open(bib_path, 'r') as f: txt = f.readlines() citekey = None entries = [] for line in txt: line = line.strip() if line: if line.startswith('@'): if citekey is not None: dct[citekey] = entries entries = [] start = line.find('{') end = line.find(',') citekey = '%s' % line[start+1:end].lower() entries.append(line) if citekey is not None: dct[citekey] = entries return dct
1ef3500208432d81ba4e3b8de34ef829c8b15778
692,758
import re def match_array_index(pattern, node): """ match child by [0] """ return node.type != 'value' and re.match(r'\[(\d+)\]',pattern)
c896491092c4b332b259d9e9fd3b983bb6bfbb4b
692,759
def fmt(fmt_str): """ Helper to convert a format string into a function that applies its argument to the format. :param fmt_str: string appropriate for using with string.format :return: A function which applies its single argument to fmt_str """ return fmt_str.format
64cd64a036372d754c46b625ca901665d4dd6d16
692,760
def get_post_gallery_images(context): """ Returns a list of img related objects selected as 'gallery' """ try: post = context['object'] return post.image_set.filter(img_type='gallery') except: return []
d9ba16e99d55d893f696bfa92063536d8a402b9b
692,761
import tempfile def io_error(): """Base of all file system errors. AKA EnvironmentError / OSError.""" try: open(tempfile.mktemp(), 'r') except IOError: return "reading a missing file"
a79aca71ef68af0dfc65858bd44a6c68bb2f8f6c
692,762
def my_add(x, y): """Simple function to add two integers together""" return x + y
983629b3c155259269a496100973bc993d8c6724
692,763
def looks_camel_case(x): """ checks if there is lower UPPER sequence """ for i in range(len(x)-1): a = x[i] b = x[i+1] if a.isalpha() and b.isalpha(): if a.islower() and b.isupper(): return True return False
e8997b86b6f4cea2eb4ac97d66b6b48b71d709a9
692,764
def dyck_words_str(n): """ All words consisting of n pairs of correctly matched parentheses """ def dyck_recur(S): a = S.count("(") b = S.count(")") if len(S) > 2*n or b > a: return None elif a == b == n: yield S else: yield from dyck_recur(S+"(") yield from dyck_recur(S+")") yield from dyck_recur("")
4f70db40656ec7505f695918eb944a3bba511087
692,765
import os def is_exist(directory: str) -> bool: """Return True if that directory is dir. """ return os.path.exists(os.path.join(os.getcwd(), directory))
7836bf1e096358d90b2148ec448747336741fd22
692,766
def f(): """ -> int""" # r : range r = range(1, 11) # i : int for i in r: print(i) return r[4]
30c97c4bdf95703d4b71148a4bad86c2cf7206a1
692,767
import os def mkdir(dir): """ create directory Args: dir: directory path Returns: Name of directory or False """ if not os.path.exists(dir): try: os.makedirs(dir) except Exception: return False return dir
a75ba6c6622a5a2731250d478c464630216a6a7f
692,768
def create_or_update_user( self, new_user: bool, user_pk: str, first_name: str, last_name: str, phone: str, email: str, status: str, role: str, username: str, password: str, repeat_password: str, two_factor_email: bool, two_factor_app: bool, create_display_time: str = "", ) -> bool: """Create new user or update existing user .. list-table:: :header-rows: 1 * - Swagger Section - Method - Endpoint * - user - POST - /users/{newUser} :param new_user: ``True`` for creating new user, ``False`` for updating existing user :type new_user: bool :param user_pk: Primary key of the user object. Use empty string if creating new user :type user_pk: str :param first_name: First name of user :type first_name: str :param last_name: Last name of user :type last_name: str :param phone: Phone number for user :type phone: str :param email: Email address for user :type email: str :param status: Enable the user, takes values ``Active`` or ``Inactive`` :type status: str :param role: Use value ``Admin Manager`` for admin role and ``Network Monitor`` for monitor role :type role: str :param username: Username for user :type username: str :param password: Password for user :type password: str :param repeat_password: Confirm matching password for user :type repeat_password: str :param two_factor_email: ``True`` to enable two-factor auth via email for user, ``False`` to disable :type two_factor_email: bool :param two_factor_app: ``True`` to enable two-factor auth via app for user, ``False`` to disable :type two_factor_app: bool :param create_display_time: Description missing from Swagger :type create_display_time: str, optional :return: Returns True/False based on successful call :rtype: bool """ data = { "userPk": user_pk, "firstName": first_name, "lastName": last_name, "phone": phone, "email": email, "createDisplayTime": create_display_time, "status": status, "role": role, "username": username, "password": password, "repeatPassword": repeat_password, "isTwoFactorEmail": two_factor_email, "isTwoFactorTime": two_factor_app, } return self._post( "/users/{}".format(new_user), data=data, expected_status=[204], return_type="bool", )
06e4f0916d05a84db2ae555cec5c6b64fec73976
692,769
def map_from_mi(mi,lfrom,lto): """ create mapping from one level of the multiindex to the other.""" return {k:v for k,v in zip(*([mi.get_level_values(lfrom),mi.get_level_values(lto)]))}
ff66f03a684a659d1e22135dd268102991aea3a3
692,770
def isolate_column(df, column): """Splits a given dataframe into a column and the rest of the dataframe.""" return df.drop(columns=[column]).values.astype('float32'), df[column].values.astype('float32')
434fd304de69fb748690b92da2f136a5c109f3a0
692,771
import os def carry_over_to_files(carry_over, invalid_svg_dir, num_files): """ Converts the carry_over to individual svg files place in the invalid_svg_dir carry_over: string of the carry_over text invalid_svg_dir: path or name of desired directory to hold svg files num_files: number to keep track of how many files created """ char_index = 0 while carry_over[char_index: char_index+6] != "</svg>": char_index = char_index + 1 index_end_of_file = char_index+6 #the entire file is the carryover until the </svg> end tag file_content = carry_over[:index_end_of_file] carry_over = carry_over[index_end_of_file:] f = open(str(num_files) + ".svg","w+") f.write(file_content) f.close() #moving all the files into the invalid_svg_dir since none of them have been validated yet os.rename(str(num_files) + ".svg", invalid_svg_dir + "/" + str(num_files) + ".svg") #recursivley converts the carry_over to files until there is just carry_over without any </svg> end tags if "</svg>" in carry_over: return carry_over_to_files(carry_over, invalid_svg_dir, num_files+1) else: return (carry_over, num_files+1)
293bee66729b3cf9a8a9ab5d14557a4db9310375
692,772
import os import argparse def arg_dir(arg): """ Verify the specified argument is a directory """ if not os.path.isdir(arg): raise argparse.ArgumentTypeError("not a directory: %s" % arg) return arg
1418035e8e692d7f4480e24a314ad00083bb195c
692,773
def variable_om_cost_rule(mod, g, tmp): """ Variable O&M costs are applied only to the storage discharge, i.e. when the project is providing power to the system. """ return mod.Stor_Discharge_MW[g, tmp] * mod.variable_om_cost_per_mwh[g]
b174a88464bb94ad7b60eaf45fc8720111949cb4
692,774
def new_batch_order(self, batchOrders:list): """ | | **Place Multiple Orders (TRADE)** | *Post a new batch order* :API endpoint: ``POST /dapi/v1/batchOrders`` :API doc: https://binance-docs.github.io/apidocs/delivery/en/#place-multiple-orders-trade :parameter batchOrders: list :parameter recvWindow: optional int **Notes** - Paremeter rules are same with New Order - Batch orders are processed concurrently, and the order of matching is not guaranteed. - The order of returned contents for batch orders is the same as the order of the order list. batchOrders (list): order list. Max 5 orders - batchOrders is the list of order parameters in JSON | """ params = {"batchOrders": batchOrders} url_path = "/dapi/v1/batchOrders" return self.sign_request("POST", url_path, params, True)
d8fc3c72dc6f6de507fa9b37818c9ef9b6e3177d
692,775
def get_donottrack(request): """ Returns ``True`` if ``HTTP_DNT`` header is ``'1'``, ``False`` otherwise. """ return request.META.get('HTTP_DNT') == '1'
0f490a13bf48569022f276b222b1aa83adb78e41
692,776
def getCbsdsNotPartOfPpaCluster(cbsds, ppa_record): """Returns the CBSDs that are not part of a PPA cluster list. Args: cbsds : List of CBSDData objects. ppa_record : A PPA record dictionary. Returns: A list of CBSDs that are not part of the PPA cluster list. """ cbsds_not_part_of_ppa_cluster = [] # Compare the list of CBSDs with the PPA cluster list for cbsd in cbsds: if cbsd['id'] not in ppa_record['ppaInfo']['cbsdReferenceId']: cbsds_not_part_of_ppa_cluster.append(cbsd) return cbsds_not_part_of_ppa_cluster
3593ed41851e5d63243dae4665ccf593eccc763e
692,777
def factorial_rec(n): """Recursively compute the factorial of n.""" if n == 0: return 1 else: return n * factorial_rec(n-1)
7276f99d82079d8b65e657813c6855d2f5290cd3
692,778
from typing import Dict async def test_http() -> Dict[str, bool]: """Simple response for HTTP test requests.""" return {"success": True}
1ccc54c9e17cdfa6f48d21231f7d38b480b76df6
692,779
def format_decimal(number, digits=1): """Converts number to string with space separated thoudands and one digit after decimal point.""" return 'bez dat' if number is None \ else ('{:,}'.format(round(number, digits)).replace(',', ' ').replace('.', ','))
95a0b4644aa57df608062fd7f32318fa48d42581
692,780
def reverse(arr): """ This program simply reverse the given array Time Complexity : O(n) Space Complexity : O(1) """ return arr[::-1]
687721b15de3ec94f54443f3b7bf55022d561382
692,781
def dfs(graph, s, visited=[]): """ DFS """ if s not in visited: visited += s for v in [v for (u, v) in graph if u == s and v not in visited]: dfs(graph, v, visited) return visited
528d71566ca08ce8f66219d09da56c4a25bb1669
692,782
def phylogeny_paper(): """Returns the 50 bacterial species in Table 2 (Zaharia et al., 2018) in phylogenetic order. :return: list of species under study in phylogenetic order """ proteobacteria = ['eco', 'ype', 'vco', 'spc', 'pae', 'xfa', 'rso', 'nme', 'afi', 'ara', 'rrj', 'gsu'] inter_proteo_terra = ['nde', 'aca', 'din', 'fnu', 'dap', 'tid', 'aae'] terrabacteria = ['bsu', 'lmo', 'sau', 'lac', 'snd', 'cpe', 'mpn', # Firm. 'syn', 'pma', # Cyanobacteria 'cau', # Chloroflexi 'bbv', 'cgl', 'mtv', 'sco', # Actinobacteria 'dra', 'tth', # Thermi 'fgi'] # Armatimonadetes inter_terra_fcb = ['amo', 'tmm', 'cex', 'dth'] fcb_bacteria = ['fsu', 'gau', 'cph', 'bfr'] pvc_bacteria = ['rba', 'cpn', 'ote'] post_pvc = ['bbn', 'emi', 'heo'] phylogeny = list() for bacteria in proteobacteria, inter_proteo_terra, terrabacteria, \ inter_terra_fcb, fcb_bacteria, pvc_bacteria, post_pvc: phylogeny.extend(bacteria) return phylogeny
8958e0282d22ca8ac6a0e63e0543c15255b527d5
692,783
def countSolutionsLogfile(logfile_path): """ Count the number of solutions in a CryptoMiniSat Logfile """ with open(logfile_path, "r") as logfile: logged_solutions = 0 for line in logfile: if "s SATISFIABLE" in line: logged_solutions += 1 return logged_solutions return -1
83a054be17b829aba11d9340a0873f54da275988
692,785
import torch def load_state_dicts(checkpoint_file, map_location=None, **kwargs): """Load torch items from saved state_dictionaries. """ if map_location is None: checkpoint = torch.load(checkpoint_file) else: checkpoint = torch.load(checkpoint_file, map_location=map_location) for key, value in kwargs.items(): value.load_state_dict(checkpoint[key]) epoch = checkpoint.get('epoch') if epoch: return epoch
7c636d433d883c91de55191bf9a764cabe86a4a7
692,786
def get_case_id(line): """ Returns the case id for a given csv line :param str line: The csv line in question :return str: The case_id string """ try: return line.strip().split(",")[10] except Exception as e: print("Error: " + str(e)) return ""
ab9f1e0bd4e45565ff54302bdb17361c68e27d08
692,788
def _get_parameters_string(**kwargs): """Used to create identifiers for output""" _id = "" if kwargs: _id = "_" + ''.join('{}={}_'.format(key, val) for key, val in sorted(kwargs.items()))[:-1] return _id
553019b82ece4275ed300c926a4cfb95b6d27f9b
692,789
import io def load(file_name): """ Returns a list which containing .opam file data line by line. It opens file in read mode and split that line by line and append it to he file_data. """ file_data = [] with io.open(file_name, "r", encoding="utf-8") as f: file_data = [line.rstrip('\n') for line in f] return file_data
8c2c3f53b6689a9f328784c0447e7abd4d4f439b
692,790
def field_mapping(some_class): """ returns a dictionary mapping model field names to lookukp values :param some_class: Any django model class with extra field properties :return: A dict mapping model field names to lookukp values """ field_mapping_dict = {} for x in some_class._meta.get_fields(): try: field_mapping_dict[(x.extra['data_lookup']).lower().strip()] = x.name except: pass return field_mapping_dict
cda1dc9777cecb6ea14207220996e22ea123172e
692,791
def same_types(obj1, obj2): """ Recursively check that obj1 and obj2 are of the same types. Better than type(obj1) == type(obj2) because it recursively checks inside lists, sets, dicts, and tuples """ t = type(obj1) if t is not type(obj2): return False if t in {list, set, dict}: for iterables in ([(obj1, obj2), (obj1.values(), obj2.values())] if t is dict else [(obj1, obj2)]): lst = [i for o in iterables for i in o] if not all(same_types(lst[0], o) for o in lst[1:]): return False if t is tuple: return len(obj1) == len(obj2) and all(same_types(o1, o2) for o1, o2 in zip(obj1, obj2)) return True
0c95c45ab01c950b40ecb009cc623213afecdbf1
692,792
def rm_docstring_from_source(source): """ Remote the docstring from the source code of a function or a class **Parameters** > **source:** `str` -- Source code of a function or a class **Returns** > `str` -- Source code of a class without docstring """ source = source.split('"""') if len(source) > 1: del source[1] # remove docstring source = "".join(source) # to handle intendation inside functions and classes source = source.split("\n") nb_indent = len(source[0]) - len(source[0].lstrip()) for i in range(len(source)): source[i] = "\t" + source[i][nb_indent:] source = "\n".join(source) return source
f379c2511bdec13235ee28650218ace99350fd18
692,793
import os def getImgPath(base_from_path, image_dir): """provide the base directory path (absolute path) where images are stored """ # log.info(" base_from_path:{} \n image_dir:{}".format(base_from_path, image_dir)) base_from_path_job = os.path.sep.join(base_from_path.split(os.path.sep)[:-1]) imgpath = os.path.join('images',image_dir) base_path_img = os.path.join(base_from_path_job, imgpath) # log.info("base_path_img: {}".format(base_path_img)) return imgpath, base_path_img
b15dd5cdf6b9b11d31e6a44de1522d5e4690225f
692,794
def getIpIntStr(ipInt): """ Converts an IP address in host order integer to a string representation. :param ipInt: an IP address integer :rtype: str :return: A string representation of the IP address """ return ".".join(map(lambda n: str(ipInt >> n & 0xFF), [0, 8, 16, 24]))
c833d0946524cde93aadb2a6b721a17e9c00ab2c
692,795
def upload_image_to(instance, filename): """ custom path for saving images Returns: str: image path """ asset_path = f'article/{str(instance.title)}/images/{filename}' return asset_path
5dcdf6e5d80cc67678e0cfae990885c9e68d6733
692,796
def hasspec(value, specs): """Check whether any of the keys are in a dict.""" for s in specs: if s in value: return True return False
15866fc140d394169d5a7f1932977031a7cd6832
692,797
def presentation_transform(row): """Transform a row from the frontend_presentation table so it matches our statistics schema """ if row[2] == 't': row[2] = 'true' else: row[2] = 'false' return row
56b87e15ed60d7a87364c72e830064341e05d1a6
692,798
def ipkg_meta_from_pkg(pkg): """Return meta dict for Installed pkg from a PackageDescription instance.""" meta = {} for m in ["name", "version", "summary", "url", "author", "author_email", "license", "download_url", "description", "platforms", "classifiers", "install_requires", "top_levels"]: meta[m] = getattr(pkg, m) return meta
7c73546854fe022005bb7cd65711d850fc744645
692,799
import subprocess def parse_cmd(cmd, split=True): """Parse the output of a shell command... and if split set to true: split into a list of strings, one per line of output. Args: cmd (str): the shell command to be executed. split (bool): whether to split the output per line Returns: (list[str]): the strings from each output line. """ output = subprocess.check_output(cmd, shell=True).decode("utf-8") if split: output = [x for x in output.split("\n") if x] return output
f443b863d9d6da6635243004a9c8cad672a38637
692,800
def getDictFromTuple(values: tuple, keys: list, includeNone: bool = True): """returns a dict based on the tuple values and assigns the values to the keys provided\n for instance, values=(1, "bill", 5} and keys=["id", "name", "age"] returns {"id": 1, "name": "bill", "age": 5} """ _obj = {} for _i in range(len(values)): if includeNone or (values[_i] is not None): _obj[keys[_i]] = values[_i] return _obj
b4a182ee561d2640004aa57b6c75f669af9261b3
692,801
def prune_existing_records(db, records_to_be_saved): """ Return a list of records which are not already present in the db from the input list """ ok_records = [] fetch_dates = set([rec['fetch_date'] for rec in records_to_be_saved]) pre_existing = set() for fd in fetch_dates: stocks = db.asx_prices.distinct('asx_code', {'fetch_date': fd}) for stock in stocks: pre_existing.add("{}-{}".format(stock, fd)) for rec in records_to_be_saved: key = "{}-{}".format(rec['asx_code'], rec['fetch_date']) if key not in pre_existing: ok_records.append(rec) return ok_records
d7c05b2d98701a84a041b0a84c802db75a53841b
692,802
def get_bundle_id(issn_id, year, volume=None, number=None, supplement=None): """ Gera Id utilizado na ferramenta de migração para cadastro do documentsbundle. """ if all(list(map(lambda x: x is None, [volume, number, supplement]))): return issn_id + "-aop" labels = ["issn_id", "year", "volume", "number", "supplement"] values = [issn_id, year, volume, number, supplement] data = dict([(label, value) for label, value in zip(labels, values)]) labels = ["issn_id", "year"] _id = [] for label in labels: value = data.get(label) if value: _id.append(value) labels = [("volume", "v"), ("number", "n"), ("supplement", "s")] for label, prefix in labels: value = data.get(label) if value: if value.isdigit(): value = str(int(value)) _id.append(prefix + value) return "-".join(_id)
3db7f8e252410fadf2e9a5ce1663ddb31b7a2dc0
692,803
import re def filter_per_field_fail(field, filename, filename_filter_per_field): """Check if filename fails the criteria in the field-specific filename filter""" if filename_filter_per_field is None: return False for filter_field, filename_filter in filename_filter_per_field.iteritems(): if filter_field != field: continue if not re.search(filename_filter, filename): return True return False
4984a6e8be16d6ee6d66d703b15bc490fd2a0b73
692,805
import pathlib import venv def create_venv(lib_name: str, py_version: str) -> pathlib.Path: """ creates the new virtual environment :param lib_name: name of library :param py_version: string representation of two-digit python version (ie 37) :return: path to venv """ venv_name = f"{lib_name}-go-{py_version}" venv_path = pathlib.Path(f"~/venvs/{venv_name}").expanduser() try: venv_path.mkdir(parents=True, exist_ok=False) except FileExistsError as error: raise error venv.create(env_dir=str(venv_path), with_pip=True, system_site_packages=True) return venv_path
c1cc853f121011805b801d35aa272c6b4477a8dc
692,807
def _get_fpath_to_parsed_file_map(parsed_files): """Creates a map: filepath -> Parser from the given list of Parser """ fpath_to_file = dict() for f in parsed_files: fpath_to_file[f.filepath] = f return fpath_to_file
5ecc68ba5d9918ef4171abc94de87d56b2af8e59
692,808
def to_kebab(value: str) -> str: """ snake_case to kebab-case """ try: return value.replace('_', '-') except Exception as e: raise Exception(e)
42b18fa6ec2d483a5c12064016190d500837b6fc
692,809
def select_all(_): """ Returns True for all particles. """ return True
34e277c1ae59a9032e5d09e45cf27732185d9c49
692,810
def list_numbering_start(attrs): """extract numbering from list element attributes""" if 'start' in attrs: return int(attrs['start']) - 1 else: return 0
0b771944904dcb4bdcb442f8f88e9a415cc9171d
692,811
def append_slash(url, append=True): """Append a slash to a URL, checking if it already has one.""" if url.endswith("/"): if append: return url else: return url[:-1] else: if append: return url + "/" else: return url
d3bcb71674fca2e984b9c9104bfab70c434ce324
692,812
def load_header(filename): """ load header from WFDB files """ HEADER = open(filename + ".hea", 'r') header = HEADER.readlines() HEADER.close() return header
fc21158d0e45f810c2ff4c7af2689329253d7885
692,814
import sqlite3 from typing import Optional def execute_query( connection: sqlite3.Connection, query: str, args: Optional[dict] = None ) -> list: """Given sqlite3.Connection and a string query (and optionally necessary query args as a dict), Attempt to execute query with cursor, commit transaction, and return fetched rows""" cur = connection.cursor() if args is not None: cur.execute(query, args) else: cur.execute(query) connection.commit() results = cur.fetchall() cur.close() return results
d0f89247281d672cd74ffcd71fa6c401064512d8
692,815
import fnmatch def matchPattern(string, pattern): """ > matchPattern("nameTest1", "nameTest") False > matchPattern("nameTest1", "nameTest*") True """ return fnmatch.fnmatch(string, pattern)
ff8bf4ee28af701139e9e4b900171338c6a354d1
692,816
def pad4(seq): """ Pad each string in seq with zeros up to four places. Note that there is no reason to actually write this function; Python already does this sort of thing much better. It's just an example! """ return_value = [] for thing in seq: return_value.append("0" * (4 - len(thing)) + thing) return return_value
0b14d8051f82139f53844bb376b45eb25c6599c8
692,817
def extract_tables_from_query(sql_query: str): """ return a list of table_names """ return [word for word in sql_query.split(" ") if len(word.split(".")) == 3]
bdf577a5987b4b55a6b676947be5e66b4f06c346
692,818
def is_scalar(value): """Checks if the supplied value can be converted to a scalar.""" try: float(value) except (TypeError, ValueError): return False else: return True
3e19932bdce589bee65947096f594dc856ed22e7
692,819
import torch from typing import Optional def alpha_blending( foreground: torch.Tensor, background: Optional[torch.Tensor] = None ) -> torch.Tensor: """ Here you can find all formulas https://en.wikipedia.org/wiki/Alpha_compositing#Alpha_blending http://web.cse.ohio-state.edu/~parent.1/classes/581/Lectures/13.TransparencyHandout.pdf Arguments: foreground (torch.Tensor): foreground.shape == [H * W, 4], where 4 is decomposed to RGBA background (Optional[torch.Tensor]): same as foreground Outputs: output (torch.Tensor): output.shape == [H * W, 3], where 3 is decomposed to RGB """ # Wea assume that the first 3 is RGB and last 4'th is alpha foreground_rgb, foreground_alpha = foreground.split([3, 1], dim=-1) # In this case we suggest that background is white and fully opaque # and thus each pixel has color (1.0, 1.0, 1.0) and 1.0 alpha if background is None: return foreground_rgb * foreground_alpha + (1 - foreground_alpha) # Otherwise we apply premultiply alpha blending procedure background_rgb, background_alpha = foreground.split([3, 1], dim=-1) image = foreground_rgb * foreground_alpha + background_rgb * background_alpha * (1 - foreground_alpha) image /= foreground_alpha + background_alpha * (1 - foreground_alpha) return image
8b22aeee804b6ca8e3df13aecd6a48e98b7836dc
692,820
def unrecoverable_error_should_crash(): """For testing an UnrecoverableError should crash""" return True
4530490f6d93baefba639a480d9a7fa0686b6b85
692,821
import os def paramImport(paramfile, path, param_no=3): """Importer for Device Parameter File""" params = [] for i in range(param_no): params.append({}) with open(os.path.join(path, paramfile), 'r') as pfile: for line in pfile: splitline = line.strip().split('\t') name, values = splitline[0], splitline[1:] for i in range(param_no): params[i][name] = float(values[i]) return params
547212b5454accc55a6f2cedb9a83b2df68dad85
692,822
from typing import Generator import os def get_subdirectories(directory: str) -> Generator[str, None, None]: """Returns a list of subdirectories of the given directory.""" return (f.path for f in os.scandir(directory) if f.is_dir())
9cf9bd13d1a63c7b3bec641824affa53abce90de
692,823
def linear_model(x, a, b, c, d, e): """Summary Args: x (TYPE): Description a (TYPE): Description b (TYPE): Description c (TYPE): Description d (TYPE): Description Returns: TYPE: Description """ return e + a*x[0] + b*x[1] + c*x[2] + d*x[3]
22507cd17aac3befb3c5910d92fb67bf2300d565
692,824
import os def _get_ycsb_file_paths(directory_path): """ Recursively search the directory tree starting at `directory_path` for files whose name starts with "test_screen_capture.log" and return a list of their fully qualified paths. """ file_paths = [] for sub_directory_path, _, filenames in os.walk(directory_path): for filename in filenames: if filename.startswith("test_output.log"): file_paths.append(os.path.join(sub_directory_path, filename)) return file_paths
a644e812827e187aa58cf476be855eea39b8ad0e
692,825
def _compute_diffs(instrs, fill_bytes): """ compute the diffs needed to replace the displaced bytes """ i = 0 addr = instrs[0].addr diffs = [] for ins in instrs: bytes = ins.bytes if hasattr(ins, 'bytes_before'): bytes = ins.bytes_before for b in bytes: if b!=fill_bytes[i]: diffs.append((addr+i, b, fill_bytes[i])) i += 1 return diffs
ccb7536d59d897cdc1617d248585cb1e07b75c1c
692,826
import string import random def generate_unique_name(rootname, suffix='', n=6): """Generate a new Random name given a rootname and, optionally a suffix Parameters ---------- rootname : Root name to which add 6 Random chars suffix : Suffix to be added (Default value = '') n : Number of random characters in the name, defaults to 6 Returns ------- """ char_set = string.ascii_uppercase + string.digits uName = ''.join(random.choice(char_set) for _ in range(n)) unique_name = rootname + "_" + uName if suffix: unique_name += "_" + suffix return unique_name
457bb75d90a7e052d1961f475aa48575e9fe0270
692,827
import six def force_unicode(s, encoding='utf-8'): """Convert a given string into a Unicode (decoded) string if it isn't already. Args: s (:obj:`str`): String object to convert. encoding (:obj:`str`, optional): The encoding of **s** if it is encoded. Defaults to 'utf-8'. Returns: :obj:`str`: A Unicode (decoded) version of **s**. """ if s is None or isinstance(s, six.text_type): return s return s.decode(encoding)
8a14522150d6a184006c528369f86f716438d46a
692,828
from dateutil.parser import parse def timestamp(date_time): """Convert a date string to number of seconds since 1 Jan 1970 00:00 UTC date: e.g. "2016-01-27 12:24:06.302724692-08" """ t0 = parse("1970-01-01 00:00:00+0000") t = parse(date_time) return (t-t0).total_seconds()
a736d92f09325252639c0505a894550dd55121f9
692,829
import math def polysum(n: int, s: int) -> float: """ Computes the sum of the area + perimeter squared of a regular polygon. The area of a regular polygon is : 0.25∗𝑛∗𝑠2𝑡𝑎𝑛(𝜋/𝑛) The perimeter of a polygon is : length of the boundary of the polygon --------------------------------------------------------- Input: n : int the number of sides of the polygon s : int the length of each side of the polygon --------------------------------------------------------- Returns : float """ area = (0.25 * n * s**2)/(math.tan(math.pi/n)) perimeter = n * s return round(area + perimeter**2, 4)
b481a17be80075b417748986ae9de892541d335c
692,830