content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import os import getpass def get_auth(s): """ Parse USER[:PASS] strings and prompt for password if none was supplied. """ user, _, password = s.partition(':') password = password or os.environ.get('PASS') or getpass.getpass() return user, password
98b20551077e515d08b5e2042bb4d722b7877358
694,512
def _R2FromGaussian(sigmax, sigmay, pixel=0.1): """ R2. """ return (sigmax*pixel)**2 + (sigmay*pixel)**2
90a2db0d8c7f9c29870308e83afa23ac8747af61
694,513
import logging import sys def init_logger(quiet_mode, log_level=logging.INFO): """ Initializes the Python logging module for DVR-Scan. The logger instance used is 'dvr_scan'. """ logger = logging.getLogger('dvr_scan') logger.setLevel(log_level) if quiet_mode: for handler in logger.handlers: logger.removeHandler(handler) return handler = logging.StreamHandler(stream=sys.stdout) handler.setLevel(log_level) handler.setFormatter(logging.Formatter(fmt='[DVR-Scan] %(message)s')) logger.addHandler(handler) return logger
51273ed7e1c82a96fd800eac14d801caf92df41e
694,514
import glob import os def FindFilesIn(directory, pattern): """ Returns the list of all files in 'directory' matching the glob 'pattern'. """ return glob.glob(os.path.join(directory, pattern))
bac4bd208c4cda0558791e691ede52c5c479a45b
694,515
import math def freqToBark(freq): """ Converts from frequency into Bark scale. """ return 13 * math.atan(0.00076 * freq) + 3.5 * math.atan( (freq/7500) ** 2)
0d2a4b9fbb0398f1e1452e861058f30eec50be79
694,517
def merge_nested_dicts(base, more): """Given two dictionaries that may contain sub-dictionarie, merge *more* into *base*, overwriting duplicated values. """ for key, val in more.items(): if isinstance(val, dict): base_val = base.setdefault(key, {}) if not isinstance(base_val, dict): raise Exception('trying to merge a dictionary named "%s" into a non-dictionary %r' % (key, base_val)) merge_nested_dicts(base_val, val) else: base[key] = val return base
d6a2d835dfcb56739dd10554c4138f747b4b1f0e
694,518
def convert(number): """ Convert a number into a string that contains raindrop sounds corresponding to certain potential factors. """ raindrop = "" if number % 3 == 0: raindrop += "Pling" if number % 5 == 0: raindrop += "Plang" if number % 7 == 0: raindrop += "Plong" if raindrop == "": raindrop += str(number) return raindrop
671804dbe1122cdc86e3aac16ee1415b00b3b274
694,519
from pathlib import Path def validate_path(path, allow_none=True): """ When we have multiple types of files and directories, some may be allow to be None as they will not be required whilst others like the working directory will always be required. This method is a generalisation of individual setters. :param path: Path to a directory or file :type path: str :param allow_none: Defaults to True, if true if a path is set to none it will just return None. If False, an assertion will be run to validate that it is not none. In both cases, should the file not be None, then the path is validated via Path.exists() :type allow_none: Bool :return: Path to the current file or directory if None return is not allowed, otherwise the Path return is optional and the return may be none. """ if allow_none and not path: return None else: assert path and Path(path).exists(), f"Path is invalid: {path}" return Path(path)
7cfffa844438b76ee00a69b30b5ba8347c48615c
694,520
def _xor(mat,other,obj,m): """ Can only be used with '^' operator """ if mat.BOOL_MAT: if isinstance(other,obj): if mat.dim!=other.dim: raise ValueError("Dimensions of the matrices don't match") if not other.BOOL_MAT: raise TypeError("Can't compare bool matrix to non-bool matrix") d0,d1 = mat.dim o = other.matrix true,false = mat.DEFAULT_BOOL[True],mat.DEFAULT_BOOL[False] data = [] #Reduce multiple columns into one #Remove rows with false boolean values for i in range(d0): mrow,orow = m[i],o[i] false_mrow = false in mrow false_orow = false in orow if false_mrow == false_orow: data.append([false]) continue data.append([true]) return obj(dim=[d0,1], data=data, features=mat.features[:1], index=mat.index[:], implicit=True,BOOL_MAT=True,DEFAULT_BOOL={True:true,False:false}) else: d0,d1 = mat.dim true,false = mat.DEFAULT_BOOL[True],mat.DEFAULT_BOOL[False] data = [] if isinstance(other,obj): if mat.dim!=other.dim: raise ValueError("Dimensions of the matrices don't match") if other.BOOL_MAT: raise TypeError("Can't compare non-bool matrix to bool matrix") o = other.matrix for i in range(d0): mrow,orow = m[i],o[i] data.append([true if (bool(mrow[j]) != bool(orow[j])) else false for j in range(d1)]) elif isinstance(other,list): if mat.d1!=len(other): raise ValueError("Length of the list doesn't match matrix's column amount") for i in range(d0): mrow = m[i] data.append([true if (bool(mrow[j]) != bool(other[j])) else false for j in range(d1)]) else: for i in range(d0): mrow = m[i] data.append([true if (bool(mrow[j]) != bool(other)) else false for j in range(d1)]) return obj(dim=[d0,d1], data=data, features=mat.features[:], index=mat.index[:], implicit=True,BOOL_MAT=True,DEFAULT_BOOL={True:true,False:false})
8e3f08236897c91edabf6a80383f0b886c35cfb6
694,521
def _default_config() -> dict: """ Creates a default configuration, used if none was provided or if the provided configuration did not cover all values. Please be careful with the spelling of the dictionary. :return: The default configuration of the program. """ default_config = {} # Program Wide Attributes default_config["seed"] = 11 default_config["transformations"] = 50 # Supported are "global" and "per_class" (Spelling is important!) default_config["transformationscope"] = "global" # Transformer Related Attributes default_config["AddUnusedVariableTransformer"] = True default_config["UnusedVariableStringRandomness"] = "full" default_config["AddCommentTransformer"] = True default_config["AddCommentStringRandomness"] = "full" default_config["RenameParameterTransformer"] = True default_config["RenameParameterStringRandomness"] = "full" default_config["RenameVariableTransformer"] = True default_config["RenameVariableStringRandomness"] = "full" default_config["AddNeutralElementTransformer"] = True default_config["LambdaIdentityTransformer"] = True return default_config
80e3c380153976ffb50fd71f969810fe93fa5c68
694,522
def expandBox(box, facs): """Expands a `box` about its center by the factors ``(x-factor, y-factor)``. The box is given as ``(x0, y0, x1, y1)``""" w, h = box[2]-box[0], box[3]-box[1] cen = cx, cy = (box[2]+box[0])/2.0, (box[1]+box[3])/2.0 nw2 = w*facs[0]/2.0 nh2 = h*facs[1]/2.0 box = [cx-nw2, cy-nh2, cx+nw2, cy+nh2] return box
4d80e5d6e0b1db31cc615f1b1a6fdb265623775f
694,523
def sum_hex_digits(ascii_hex_str): """ This method will take an ascii hex string and sum each of the bytes returning the result as hex. :param ascii_hex_str: The ascii hex string to sum :return: """ len_of_ascii_hex = len(ascii_hex_str) if len_of_ascii_hex % 2 != 0: raise ValueError("The ASCII Hex string is not divisible by 2.") x = 0 # Iterate through each byte of ascii hex for index in range(0, len_of_ascii_hex, 2): # Convert each byte to an int and add it to the existing summation x += int(ascii_hex_str[index:index+2], 16) # Return the resultant summation as hex return hex(x)
2976111c89477aa8e34b6b89124f3f73c89a02c7
694,524
def get_unique_name(name, elems): """ Return a unique version of the name indicated by incrementing a numeral at the end. Stop when the name no longer appears in the indicated list of elements. """ digits = [] for c in reversed(name): if c.isdigit(): digits.append(c) else: break stem = name[0:len(name) - len(digits)] val = ''.join(digits)[::-1] or 0 i = int(val) while True: i += 1 new_name = ''.join([stem, str(i)]) if new_name not in elems: break return new_name
ead72253480ae774b830b82119e91db848504348
694,525
def remove_comment(stream): """ Remove the tokens until the end of the comment is reached. Assumes the comment *is* there. :param stream: A token stream. :type stream: shlex.shlex :returns: new_stream :rtype: shlex.shlex """ stream.get_token() for token in stream: if token == "`": stream.push_token(token) break # End of file elif token == "": break return stream
cd0394a1c526dee46add38d2d9972903579f1739
694,526
from argparse import ArgumentParser from argparse import RawTextHelpFormatter def create_parser(): """Parse command line args""" descr = '''Trigger build on Jenkins using a configuration from yaml files''' op = ArgumentParser(description=descr, formatter_class=RawTextHelpFormatter) op.add_argument('--base_url', dest='base_url', required=False, help='Base URL to use for Layerindex transform.') op.add_argument('--branch', dest='branch', required=True, help='Branch to use from LayerIndex.') op.add_argument('--input', dest='input', required=True, choices=['restapi-web', 'restapi-files'], help='Format of the LayerIndex input.') op.add_argument('--output', dest='output', required=True, help='Which directory to place the transformed output into.') op.add_argument('--source', dest='source', required=True, help='Where to get the LayerIndex source. If input format is restapi-web,\n' 'then source is http link to a LayerIndex instance. If the input format is\n' 'restapi-files, then source is a local directory.') op.add_argument("--output_format", dest="output_format", required=False, default='django', choices=['django', 'restapi'], help="Format of the transform output.") op.add_argument("--split", dest="split", required=False, action='store_true', default=False, help="Whether to split the transform output.") return op
4c8518d90bac2ffdcad92e682e54612dbc79bb00
694,527
import os def get_api_key() -> str: """Gets the api key from environment variable. In the future, this can be extended to also get from disk or something.""" API_KEY = os.environ.get("RIOT_API_KEY", "") return API_KEY
e9f6700f893028ea9305e9226872da3ea7522ccc
694,528
import re def flair_template_checker(input_text): """Small function that checks whether a given input is valid as a Reddit post flair ID. """ try: regex_pattern = r"^[a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12}$" valid = re.search(regex_pattern, input_text) except TypeError: return False if valid: return True else: return False
242717f499a632d57765754f9872728ae6433fcc
694,529
def _index_to_timestamp(index): """Convert a pandas index to timestamps if needed. Needed to parse pandas PeriodIndex to pyplot plotting functions.""" return index.to_timestamp() if hasattr(index, 'to_timestamp') else index
93fdd7bff3e32247b9b4648c75e75e63244ad17c
694,530
import time def gen_timestamp() -> int: """gen_timestamp. Generate a timestamp. Args: Returns: int: Timestamp in integer representation. User `str()` to transform to string. """ return int(1.0 * (time.time() + 0.5) * 1000)
1197eee6d349d6c1e3e5fd017920fa56f15a0f0b
694,531
def read_rttm(rttm, lines): """Reads in SPEAKER lines from RTTM file.""" with open(rttm) as f: for line in f: if "SPEAKER" in line: fname = line.split()[1] if fname not in lines: lines[fname] = [] lines[fname].append(line) return lines
d45a836a9c04918e97dbe4c715dfe42336fd1dd4
694,532
def receive_data_in_chunks(sock, buffersize): """Receive data in chunks of size buffersize from the socket""" chunk = sock.recv(buffersize) chunks = [chunk] # keep reading until chunks are available while len(chunk.strip()): chunk = sock.recv(buffersize) chunks.append(chunk) data = b''.join(chunks).strip() return data
a3953a9240c021b41495f58e06ae77d2f5e0d9c9
694,534
def italic(s): """Returns the string italicized. Source: http://stackoverflow.com/a/16264094/2570866 :param s: :type s: str :return: :rtype: str """ return r'\textit{' + s + '}'
333685c359212d4db1177a951e873f7652faa65d
694,535
def pilatus_300K_mask(): """Hard coded mask regions for a Pilatus 300K instrument.""" return [[1, 487, 196, 212], [1, 487, 408, 424]]
2bad00c5ec3c1fd2bcb0685e7fc2416d3d8d0f9a
694,536
def _nt_sum(cobj, prop, theta): """ Create sum expressions in n-t forms (sum(n[i]*theta**t[i])) Args: cobj: Component object that will contain the parameters prop: name of property parameters are associated with theta: expression or variable to use for theta in expression Returns: Pyomo expression of sum term """ # Build sum term i = 1 s = 0 while True: try: ni = getattr(cobj, f"{prop}_coeff_n{i}") ti = getattr(cobj, f"{prop}_coeff_t{i}") s += ni * theta**ti i += 1 except AttributeError: break return s
7ef4674b27069d2e254ef2cb1839fbc67c571029
694,537
def normalize(f, means, stddevs): """ """ normalized = (f/255 - means) / stddevs return normalized
67000bbc2ce8e9bf0c1dadd15c52a395460478ee
694,538
def progress_sidebar_dialog(): """ Generate the HTML to display analyze progress in a sidebar. """ html_message = ''' <script> var interval = 1250; // ms var expected = Date.now() + interval; setTimeout(progressUpdate, 10); function progressUpdate() { var dt = Date.now() - expected; // the drift (positive for overshooting) if (dt > interval) { // something really bad happened. Maybe the browser (tab) was inactive? // possibly special handling to avoid futile "catch up" run } google.script.run.withSuccessHandler(refreshProgress).getAnalyzeProgress(); expected += interval; setTimeout(progressUpdate, Math.max(0, interval - dt)); // take into account drift } function refreshProgress(prog) { var table = document.getElementById('progressTable') table.innerHTML = '<i>Analyzing, ' + prog + '% complete</i>' } var table = document.getElementById('progressTable') table.innerHTML = '<i>Analyzing, 0% complete</i>' </script> <center> <table stype="width:100%" id="progressTable"> </table> </center> ''' action = {'action': 'showProgressbar', 'html': html_message} return action
07f4ed824e9a67936414400d5c1ba30caea13cba
694,539
def calc_max_length(tensor): """Find the maximum length of any tensor""" return max(len(t) for t in tensor)
21ad43f14d8952261a45b8efcd927b82eadc83bd
694,540
def make_tuple(t): """ return the input if it's already a tuple. return a tuple of the input if the input is not already a tuple. """ return t if isinstance(t, tuple) else (t, t)
70fd74c76db30f866b3d248d6444c2d02b31f36c
694,541
def intersection(v1, v2): """ Returns the intersection of v1 and v2. Note however that these are not 'bezier lines', x1, y1, x3, and y3 are all *changes* in x, not describing a point. So, rather than x0 + (x1 - x0)*t, its just x0 + x1*t. It just made the algebra slightly easier. list v1 = [x0, x1, y0, y1], list v2 = [x2, x3, y2, y3] """ x = v1[0:2] + v2[0:2] y = v1[2:4] + v2[2:4] if( x[3] == 0 ): #To avoid a divide by zero, if x[3] is 0 then we just solve for where lineA equals x[2] t1 = (x[2] - x[0])/\ (x[1]) return [ v1[0] + v1[1]*t1, v1[2] + v1[3]*t1 ] else: t1 = ( y[0] - y[2] + (y[3]/x[3])*(x[2] - x[0]) )/\ ( (y[3]*x[1])/x[3] - y[1] ) return [ v1[0] + v1[1]*t1, v1[2] + v1[3]*t1 ]
159015b0c46a1079712f89ba7105eee034bb691c
694,542
def char_array_to_string(arr): """ Converts a NumPy array of byte-long ASCII codes into an ASCII string. e.g. [65, 67, 71, 84] becomes "ACGT". """ return arr.tostring().decode("ascii")
b5bc74ad96a34d619311ca6226075a3378989f3d
694,544
def get_column_headers(worksheet) -> list: """Get list of column headers from Excel sheet.""" num_columns = worksheet.max_column headers = [] for j in range(1, num_columns+1): cell_obj = worksheet.cell(row=1, column=j) headers.append(cell_obj.value) return headers
3181f8e9d3e9fa1e0967f4edd1090032e0f773ed
694,545
def combine_atomco_dict(dict_1, dict_2): """ Combine 2 dict of atomco_dict. Return a new combined dict. >>> a.atomco_dict >>> {'C': [['2.01115823704755', '2.33265069974919', '10.54948252493041']], 'Co': [['0.28355818414485', '2.31976779057375', '2.34330019781397'], ['2.76900337448991', '0.88479534087197', '2.34330019781397']]} """ new_atomco_dict = {} for atom_type_str in dict_1: if atom_type_str in dict_2: new_atomco_dict[atom_type_str] = dict_1[atom_type_str] + \ dict_2[atom_type_str] else: new_atomco_dict[atom_type_str] = dict_1[atom_type_str] for atom_type_str in dict_2: if atom_type_str in dict_1: pass else: new_atomco_dict[atom_type_str] = dict_2[atom_type_str] return new_atomco_dict
d892443e28f69493e4ded5bc50a42fa74fcc2395
694,546
def unpacked_properties(full_prop_name, count=2): """Return properties that "unpack" a tuple property For example, if a class defines:: x, y = unpacked_properties('pos') then ``obj.x`` will return the same as ``obj.pos[0]``, and setting ``obj.x`` will update ``obj.pos`` to (new x, old y). """ def get_prop(index): def getter(self): return getattr(self, full_prop_name)[index] def setter(self, value): old = getattr(self, full_prop_name) new = tuple(value if i == index else v for i, v in enumerate(old)) setattr(self, full_prop_name, new) doc_template = 'Getter/setter for self.{0}[{1}]' return property(getter, setter, doc=doc_template.format(full_prop_name, index)) return [get_prop(i) for i in range(count)]
cfd911afc1a313d8a5fda7d82e2a3825566ea59a
694,547
import re def get_coffecient_3d(equation): """ Returns the coefficients and intercept of the 2nd degree term in an equation. :param : eg: 2x+3y=7 :return: eg: (2,3,7) """ try: coef_x = re.findall('-?[0-9.]*[Xx]', equation)[0][:-1] except: coef_x = 0.0 try: coef_y = re.findall('-?[0-9.]*[Yy]', equation)[0][:-1] except: coef_y = 0.0 try: coef_z = re.findall('-?[0-9.]*[Zz]', equation)[0][:-1] except: coef_z = 0.0 intercept = re.sub("[+-]?\d+[XxYyZz]|[+-]?\d+\.\d+[XxYyZz]", "", equation) intercept = re.findall('[+-]+\d+', intercept)[0] if coef_x == '': coef_x = 1.0 elif coef_x == '-': coef_x = -1.0 if coef_y == '': coef_y = 1.0 elif coef_y == '-': coef_y = -1.0 if coef_z == '': coef_z = 1.0 elif coef_z == '-': coef_z = -1.0 return [float(coef_x), float(coef_y), float(coef_z), float(intercept)]
89ebb176941f10a659ad03f789a0ccd1617f44c6
694,548
import os def find_packages(): """adapted from IPython's setupbase.find_packages()""" packages = [] for dir,subdirs,files in os.walk('zmq'): package = dir.replace(os.path.sep, '.') if '__init__.py' not in files: # not a package continue packages.append(package) return packages
61ea0267f414f0e2769c0ca24f363e5ac1a37790
694,550
def SetWriterMolProps(Writer, Mol): """Setup molecule properties for a writer to output. Arguments: Writer (object): RDKit writer object. Mol (object): RDKit molecule object. Returns: object : Writer object. """ PropNames = list(Mol.GetPropNames()) if len(PropNames): Writer.SetProps(PropNames) return Writer
2e4553c99fbd9c82e1ef451530d8bc10d7e18cf7
694,551
def retrive_experiments(experiment_paths): """ Grab all experiments from paths """ all_dat_files = [] all_info_files = [] all_recording_message_files = [] for experiment_loc in experiment_paths: dat_files = list(experiment_loc.glob("**/*.dat")) info_files = list(experiment_loc.glob("**/structure.oebin")) recording_message_files = list(experiment_loc.glob("**/sync_messages.txt")) all_dat_files += dat_files all_info_files += info_files all_recording_message_files += recording_message_files return all_dat_files, all_info_files, all_recording_message_files
41a92787732f78539dd4f94b7aa2373e783c5e44
694,552
def apply_noun_verb(initial_program, noun, verb): """ Applies noun verb to program >>> [1,2,3,4] [1,12,2,4] >>> [5,5,5,5] [5,12,2,5] """ return initial_program[:1] + [noun,verb] + initial_program[3:]
fe3c400ecb053b9d6e60e8c82657b32914562e71
694,553
def init_task(parts, name='init-pdpart'): """Create a doit task to initialize a Partitioned directory. Parameters ---------- parts : pdpart.Partitioned Partitioned object to be initialized name : str name of task, defaults to 'init-pdpart' """ def _wrapper(): """withhold return value for compatibility with doit""" parts.init_dir() return { 'name': name, 'actions': [(_wrapper, [], {})], 'file_dep': [], 'targets': [parts.fn_meta], 'uptodate': [True], }
a8c1cb27461b48b5c9f0a8733b058a24403a5227
694,554
def check_created_project_structure(context): """Behave step to check the subdirectories created by kedro new.""" def is_created(name): """Check if path exists.""" return (context.root_project_dir / name).exists() for path in ("README.md", "src", "data"): assert is_created(path)
1e92ec7bf16d2c2cfb40a6be43c60b835765e757
694,555
def partition(l, size): """ Partition the provided list into a list of sub-lists of the provided size. The last sub-list may be smaller if the length of the originally provided list is not evenly divisible by `size`. :param l: the list to partition :param size: the size of each sub-list :return: a list of sub-lists """ return [l[i:i + size] for i in range(0, len(l), size)]
6d24bdf1b8e46450b7070c2819180cf40fd418b3
694,556
from datetime import datetime def time_fin(start_time): """ Return the execution time of a script. It requires the initialization with the function time_ini(). """ return str(datetime.now()-start_time)
1a9f6c1f665edb86425f57006c24b2cb71011f2e
694,558
def is_even(x): """ Return whether or not an integer ``x`` is even, e.g., divisible by 2. EXAMPLES:: sage: is_even(-1) False sage: is_even(4) True sage: is_even(-2) True """ try: return x.is_even() except AttributeError: return x % 2 == 0
6149a4e266070d6d0dd0b7f03df30e3ee1685edf
694,559
import logging def get_logger(name='default'): """Return the logger with name value automatically or editable by name param.""" name = locals().get('__name__') if name == 'default' else name return logging.getLogger(name)
f73f507b588e085dd1d261cef6f72a9d390b3285
694,561
def drop(n): """Drop n items from collection (first in dropped).""" def generator(coll): for i, item in enumerate(coll): if i >= n: yield item return generator
1852389372010ba6652e653e9c605a94859a24ca
694,562
def key_set(d): """A set of all the keys of a dict.""" return set(d.iterkeys())
e5e6ad8d1ad25003689d5a1135c2bd9919ae7729
694,563
def gimmick(message): """Detect if a message is the result of a gib gimmick.""" if message is None: return False if message.lower().count("oob") > 3: return True if message.lower().count("ob") > 4: return True if message.isupper(): return True return False
91f8ffa77e8ddddeb7b7de2b39809b964ffb502b
694,564
import string def get_first_link(links: str): """ Get first link from base64 string """ index = 0 length = len(links) while index < length: if not links[index] in string.printable: return links[:index] index += 1
c250098043cb1393732a73946aa3f40056803e0b
694,565
import logging def find_files(wavdir, txtdir): """Search for files in given directories.""" files_dict = {} dir_txt_list = list(txtdir.glob("**/*.txt")) for wav in wavdir.glob("**/*.wav"): stem = wav.stem txt = None for item in dir_txt_list: if item.stem == stem: if txt is not None: raise ValueError(f"Duplicate found: {stem}") txt = item if txt is None: logging.error(f"No text found for {stem}.wav") else: files_dict[stem] = (wav, txt) return files_dict
80eec845ae0df97a750e3114e2ed527483abd226
694,566
def print_results(content: dict): """ :param content: :return: """ print("[+] Listing results ({}) :".format(len(content["result"]))) for index, elt in enumerate(content["result"]): print("[-] {}-) {}".format(index + 1, elt["key"])) return int(input("\n[?] Your choice (0 to quit):"))
bb96e7aec631c18304cd865add191160a9491fa5
694,567
import yaml def load_yaml(data): """Parse a unicode string containing yaml. This calls ``yaml.load(data)`` but makes sure unicode is handled correctly. See :func:`yaml.load`. :raises yaml.YAMLError: if parsing fails""" class MyLoader(yaml.SafeLoader): def construct_yaml_str(self, node): # Override the default string handling function # to always return unicode objects return self.construct_scalar(node) MyLoader.add_constructor( 'tag:yaml.org,2002:str', MyLoader.construct_yaml_str) return yaml.load(data, Loader=MyLoader)
f55e144582707d603d263066ef3555049d2ea377
694,568
def getReadTimeout(): """Returns the read timeout in milliseconds for all client-to-gateway communication. This is the maximum amount of time allowed for a communication operation to complete. The default is 60,000 ms (1 minute). Returns: int: The current read timeout, in milliseconds. Default is 60,000 ms (one minute). """ return 60000
107d30b1c95fad3db963fa87eba9e1071c93c0bf
694,569
def convert_number(n): """ Convert number to , split Ex: 123456 -> 123,456 :param n: :return: """ if n is None: return '0' n = str(n) if '.' in n: dollars, cents = n.split('.') else: dollars, cents = n, None r = [] for i, c in enumerate(str(dollars)[::-1]): if i and (not (i % 3)): r.insert(0, ',') r.insert(0, c) out = ''.join(r) if cents: out += '.' + cents return out
4c722bb1a0ca65d956ef8fa3118acd7c23359e5e
694,570
def _serialize_agent(controlamp): """ Serialize a connected ``ControlAMP`` to the address of its peer. :return: A string representation of the Twisted address object describing the remote address of the connection of the given protocol. :rtype str: """ return str(controlamp.transport.getPeer())
f3f7382ed9a117e3575349c7067215964dfa33a2
694,571
def merge(l_arr, r_arr): """ :param l_arr: :param r_arr: :return: """ l_idx = r_idx = 0 res = [] while len(l_arr) > l_idx and len(r_arr) > r_idx: if l_arr[l_idx] < r_arr[r_idx]: res.append(l_arr[l_idx]) l_idx += 1 else: res.append(r_arr[r_idx]) r_idx += 1 res += l_arr[l_idx:] res += r_arr[r_idx:] return res
c55a548a56eb3a996029dbe26506e483b49354a6
694,572
from typing import Dict from typing import List def process_group_mappings( group_mapping_config: Dict[str, dict], sso_attributes: dict, groups: List[str] ): """Processes the groups from the mapping rule config into discourse compatible formats Args: group_mapping_config (Dict[str, dict]): Predefined mapping rules for the keycloak group tree { "<group_name>": { "name": "<group name in discourse>", "isMod": false, "isAdmin": false }, ... } sso_attributes (dict): SSO Attributes as they will be processed by the bridge groups (List[str]): List of groups from the userinfo endpoint """ grps = list() for group in groups: mapping_rule = group_mapping_config.get(group) if mapping_rule is None: continue if mapping_rule.get("isAdmin", False): sso_attributes["admin"] = "true" elif mapping_rule.get("isMod", False): sso_attributes["moderator"] = "true" else: grps.append(mapping_rule["name"]) # Make sure the mod and admin privileges are pinned to false to trigger permission reset on group change. for priv in ["admin", "moderator"]: if priv not in sso_attributes: sso_attributes[priv] = "false" sso_attributes["groups"] = ",".join(grps) return sso_attributes
53643ab55607837ce04ce4a7a3637b558780fd08
694,573
import hashlib def _plan_hash_str(fn): """returns a hash digest for a file, ideally a super-unique one""" # from: http://stackoverflow.com/questions/3431825/generating-a-md5-checksum-of-a-file afile = open(fn, 'rb') hasher = hashlib.sha256() blocksize = 65536 buf = afile.read(blocksize) while len(buf) > 0: hasher.update(buf) buf = afile.read(blocksize) ret = hasher.hexdigest() afile.close() return ret
62841a3dc2ca44b9e7f1902175462c884494b758
694,574
def get_literal_type(value): """Get the data type :param value: The data :return: The data type as a string :rtype: str """ return type(value).__name__
ed0fe2541f2439f98be14345bc622c9d49b3165b
694,575
import yaml def parsed_site_config(site_config_yml): """Fixture that returns the parsed contents of the example site config YAML file in the resource directory""" return yaml.load(site_config_yml, Loader=yaml.SafeLoader)
71c692dc80cb631d813bf9e53d98b0e1cd9e5a9d
694,576
import numpy def _centers_dense(X, sample_weight, labels, n_clusters, distances, X_sort_index): """ M step of the K-means EM algorithm. Computation of cluster centers / means. :param X: array-like, shape (n_samples, n_features) :param sample_weight: array-like, shape (n_samples,) The weights for each observation in X. :param labels: array of integers, shape (n_samples) Current label assignment :param n_clusters: int Number of desired clusters :param distances: array-like, shape (n_samples) Distance to closest cluster for each sample. :param X_sort_index: array-like, shape (n_samples, n_features) index of each feature in all features :return: centers, array, shape (n_clusters, n_features) The resulting centers """ dtype = X.dtype n_features = X.shape[1] n_samples = X.shape[0] centers = numpy.zeros((n_clusters, n_features), dtype=dtype) weight_in_cluster = numpy.zeros((n_clusters,), dtype=dtype) for i in range(n_samples): c = labels[i] weight_in_cluster[c] += sample_weight[i] empty_clusters = numpy.where(weight_in_cluster == 0)[0] if len(empty_clusters) > 0: # pragma: no cover # find points to reassign empty clusters to far_from_centers = distances.argsort()[::-1] for i, cluster_id in enumerate(empty_clusters): far_index = far_from_centers[i] new_center = X[far_index] * sample_weight[far_index] centers[cluster_id] = new_center weight_in_cluster[cluster_id] = sample_weight[far_index] if sample_weight.min() == sample_weight.max(): # to optimize for i in range(n_clusters): sub = X[labels == i] med = numpy.median(sub, axis=0) centers[i, :] = med else: raise NotImplementedError( # pragma: no cover "Non uniform weights are not implemented yet as " "the cost would be very high. " "See https://en.wikipedia.org/wiki/Weighted_median#Algorithm.") return centers
509e0e64f6a4cebac1ec6fc4f78857becee73c64
694,578
import inspect import six def _GetArgSpecFnInfo(fn): """Gives information pertaining to computing the ArgSpec of fn. Determines if the first arg is supplied automatically when fn is called. This arg will be supplied automatically if fn is a bound method or a class with an __init__ method. Also returns the function who's ArgSpec should be used for determining the calling parameters for fn. This may be different from fn itself if fn is a class with an __init__ method. Args: fn: The function or class of interest. Returns: A tuple with the following two items: fn: The function to use for determing the arg spec of this function. skip_arg: Whether the first argument will be supplied automatically, and hence should be skipped when supplying args from a Fire command. """ skip_arg = False if inspect.isclass(fn): # If the function is a class, we try to use it's init method. skip_arg = True if six.PY2 and hasattr(fn, '__init__'): fn = fn.__init__ else: # If the function is a bound method, we skip the `self` argument. is_method = inspect.ismethod(fn) skip_arg = is_method and fn.__self__ is not None return fn, skip_arg
939d7e0a5f49f317d881638fd77be26f379f5e9a
694,579
def parse_raw_data_catalog(raw_data_catalog): """ Clean up the raw data catalog dataframe """ filtered_data_catalog = raw_data_catalog[ [ "tocL1", "tocL2", "tocL3", "reference_designator", "platform_code", "mooring_code", "instrument_code", "beginTime", "endTime", "method", "stream_content", "stream_type", "stream_rd", "instrument", "manufacturer", "model", "parameter_rd", "standard_name", "unit", "display_name", "description", ] ] filtered_data_catalog.columns = [ "array_name", "site_name", "infrastructure_name", # noqa "reference_designator", "site_rd", "infrastructure_rd", # noqa "instrument_rd", "begin_date", "end_date", "stream_method", "stream_content", "stream_type", # noqa "stream_rd", "instrument_name", "instrument_manufacturer", # noqa "instrument_model", "parameter_rd", "standard_name", # noqa "unit", "display_name", "description", ] final_data_catalog = filtered_data_catalog.copy() # Ensure that cabled array sub regions are highlighted final_data_catalog.loc[ :, "array_name" ] = filtered_data_catalog.array_name.apply( # noqa lambda row: f"{row} (Cabled Array)" if row in ["Cabled Continental Margin", "Cabled Axial Seamount"] # noqa else row ) # noqa del filtered_data_catalog return final_data_catalog
adc0fcc2da25ccaa899f5f833dc56b339d1d838b
694,580
from typing import Dict from typing import Any from typing import Tuple import os def check_status(data: Dict[str, Any]) -> Tuple[int, str]: """Check if the process is alive. Return (pid, sockname) on success. Raise SystemExit(<message>) if something's wrong. """ if 'pid' not in data: raise SystemExit("Invalid status file (no pid field)") pid = data['pid'] if not isinstance(pid, int): raise SystemExit("pid field is not an int") try: os.kill(pid, 0) except OSError as err: raise SystemExit("Daemon has died") if 'sockname' not in data: raise SystemExit("Invalid status file (no sockname field)") sockname = data['sockname'] if not isinstance(sockname, str): raise SystemExit("sockname field is not a string") return pid, sockname
e5f243885b9bdcb8ab92be2c925eeb6b4996bf5e
694,581
import torch def sample_gumbel(score, sampling_number=5, eps=1e-10): """ Args: score : [batch x num_tasks] """ tmax, _ = torch.max(score, dim=-1) tmin, _ = torch.min(score, dim=-1) score = score / (tmax - tmin).unsqueeze(1) score = score * score.size(1) batch_size = score.size(0) num_tasks = score.size(1) U = torch.rand([batch_size, sampling_number, num_tasks]) return score.unsqueeze(1) - torch.log(-torch.log(U + eps) + eps)
61df970ab25aa9c32d539252f60738c526823617
694,582
def to_str(name=None, email=None): """ To Str """ string = "" if name: string += name if name and email: string += " " if email: string += "<" + email + ">" return string
1cc8a6a5dc68f05d8193ce2c41e88d76a012a2f6
694,583
def gradezero(evtdata): """ Only accept counts with GRADE==0. Parameters ---------- evtdata: FITS data class This should be an hdu.data structure from a NuSTAR FITS file. Returns ------- goodinds: iterable Index of evtdata that passes the filtering. """ # Grade filter grade_filter = ( evtdata['GRADE'] == 0) inds = (grade_filter).nonzero() goodinds = inds[0] return goodinds
370bbf16b25d9543b7e485f0e366b709d6261dfb
694,584
def unicode_replacements(latex): """Take in latex and replaces specific unicode characters with latex symbols.""" operations = ( # unicode operators ("\xe2\x88\x92", '-'), ("\xc3\x97", r'\times'), # unicode_superscripts ("\xc2\xb0", r'\text{$^\circ$}'), ("\xe2\x81\xbb\xc2\xb9", r'^{-1}'), ("\xc2\xb2", r'^{2}'), ("\xc2\xb3", r'^{3}'), ("\xe2\x84\x83", r'^{\circ}C'), # unicode_punctation_spacing ("\xc2\xa0", ' '), # unicode_symbols ("\xce\xa9", r'\ensuremath{\Omega}'), ("\xe2\x82\xac", r'\euro'), # latex_replacements ('\xc2\xb7', r'\ensuremath{\cdot}'), ("\xb7", r'\ensuremath{\cdot}'), ('\xb5', r'\ensuremath{\mu}'), ('\u03bc', r'\ensuremath{\mu}'), ('μ', r'\ensuremath{\mu}'), ('µ', r'\ensuremath{\mu}')) for old_string, new_string in operations: latex = latex.replace(old_string, new_string) return latex
0d0c7a00bd2d5bd327398639833d447d433f7156
694,585
def exists_db(connection, name): """Check whether a database exists. :param connection: A connection :param name: The name of the database :returns: Whether the db exists :rtype: bool """ exists = connection.execute("SELECT 1 FROM pg_database WHERE datname = {}".format(name)).first() connection.execute("COMMIT") return exists is not None
2a729b85cbf9e9cdc30970bcaa4258a7ff8a97fb
694,586
def _ggm_qcondwait_whitt_ds3(cs2): """ Return the approximate E(V^3)/(EV)^2 where V is a service time; based on either a hyperexponential or Erlang distribution. Used in approximation of conditional wait time CDF (conditional on W>0). Whitt refers to conditional wait as D in his paper: See Whitt, Ward. "Approximations for the GI/G/m queue" Production and Operations Management 2, 2 (Spring 1993): 114-161. This is Equation 4.3 on p146. Note that there is a typo in the original paper in which the first term for Case 1 is shown as cubed, whereas it should be squared. This can be confirmed by seeing Eq 51 in Whitt's paper on the QNA (Bell Systems Technical Journal, Nov 1983). Parameters ---------- cs2 : float squared coefficient of variation for service time distribution Returns ------- float mean wait time in queue """ if cs2 >= 1: ds3 = 3.0 * cs2 * (1.0 + cs2) else: ds3 = (2 * cs2 + 1.0) * (cs2 + 1.0) return ds3
fb6f4cadafeae2f3cb29200a4ae3402a0317c63a
694,587
def is_blank_line(p): """ If p is all whitespace then p.strip will be null (hence false) thus not p.strip is true if p is all whitespace """ if not p.strip(): return True return False
e0ecf3eba7b2a5a4dcc193150cc483457df557cc
694,589
import argparse def _get_parser(): """Returns an argument parser for this script.""" parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--output', required=True, help='The output file name') parser.add_argument('--name', required=True, help='The library name') parser.add_argument('--description', default='', help='The description of the library') parser.add_argument('--version', required=True, help='The version of the library') parser.add_argument('--requires', action='append', default=[], help='Packages for Required') parser.add_argument('--requires-private', action='append', default=[], help='Packages for Required.private') parser.add_argument('--libs', action='append', default=[], help='Libraries for Libs') parser.add_argument('--libs-private', action='append', default=[], help='Libraries for Libs.private') parser.add_argument('--cflags', action='append', default=[], help='Compiler flags') return parser
35cfc8ea92e7b97e0f1ebcd810860e793dc251a2
694,590
import re import click def validate_mfa_device_name(ctx, param, value): """Validates a virtual MFA device name.""" try: if not all([re.match(r"[\w+=/:,.@-]+", value), 9 <= len(value) <= 256]): raise ValueError(value) return value except ValueError: click.echo('Invalid device name given, only ASCII characters and + = / : , . @ - are allowed with a length ' 'between 9 and 256 with no spaces.') value = click.prompt(param.name) return validate_mfa_device_name(ctx, param, value)
be3801383ebe2e4c2aed42b2e29601987dffc283
694,591
import re def listWithRedshift(filenames): """reorder the files by redshift and return a dictionary {z,filenames} """ shortdict=[] longdict={} for filename in filenames: match=re.search('_z([0-9.]+)',filename) if match: z=float(match.group(1)) else: #initialisation boxes are labelled with z=0.0 in 21cmFast #standard output or without a redshift label z=0.0 shortdict.append((z,filename)) if z in longdict.keys(): longdict[z].append(filename) else: longdict[z]=[filename] return longdict return shortdict
90b928fa17a89a953d8e4f51d8f541d0a32339f1
694,592
import typing from datetime import datetime def create_timestamp( time: typing.Optional[typing.Union[datetime, int, float]] = None, style: typing.Optional[str] = None, ) -> str: """Creates a markdown timestamp from the given datetime object or unix timestamp. Parameters ---------- time: Optional[Union[:class:`datetime.datetime`, :class:`builtins.int`, :class:`builtins.float`]] The timestamp to use. If not given, The result of :meth:`datetime.datetime.now` is used. If a datetime object is given, The epoch timestamp would be extracted from it. If a float is given, It would be rounded of. style: :class:`builtins.str` The style for the timestamp. If not provided, The default style is used, See :class:`TimestampStyle` for all possible values. .. note:: This parameter is not validated by the library in case Discord adds a new style. You should consider validating it yourself. Returns ------- :class:`builtins.str` The created timestamp in proper format. """ if time is None: time = round(datetime.now().timestamp()) elif isinstance(time, datetime): time = round(time.timestamp()) elif isinstance(time, float): time = round(time) if style is None: return f"<t:{time}>" else: return f"<t:{time}:{style}>"
38a18e5f8f5d00092ff9bee5efa25534451667d7
694,593
def is_prime(n, s=[], ps=[]): """ Check whether a reasonably small number is prime or not. The user has the responsability to provide a big enough list of primes. The function will return True if it can't find a prime that divides n. Parameters ---------- n: int Number to check s: list pre-calculated sieve to use ps: list list of primes to use for divisibility checks """ if n < len(s) and len(s) > 0: return s[n] for p in ps: if n % p == 0: return False return True
4b6ff4891ea3eb59313dc849d383828ecc9876d2
694,594
import os def file_status(filename): """ confirms that a raw file isn't empty """ if os.path.exists(filename): with open(filename, "r") as testfile: line = testfile.readline() if line: status = "ok" else: status = "bad" else: status = "not there" return status
f284a85cf5d5e3a3c51cf826f8cfd35142cf7304
694,595
def set_vacuum_chamber(the_line): """.""" # -- default physical apertures -- for i in range(len(the_line)): the_line[i].hmin = -0.018 the_line[i].hmax = +0.018 the_line[i].vmin = -0.018 the_line[i].vmax = +0.018 return the_line
43b95d7e4110c4620e26f6fb56c23cd80331dba6
694,596
import json def write_to_json_file(obj, filename): """ Write on file filename the object in JSON format :param obj: object to be written on the JSON file :param filename: filename to be used to store the obj :return: success (bool) """ f = open(filename, "w") try: json.dump(obj, f) success = True except: success = False finally: f.close() return success
c074e379c51b0cc3468f812552ee3176e64eda51
694,597
def orbital_eccentricity(t: float) -> float: """ Calculate the eccentricity of earth's orbit around the sun. Parameters: t (float): The time in Julian Centuries (36525 days) since J2000.0 Returns: float: The earth's orbital eccentricity at the given time """ # t must be in the range from year 1901 thru year 2099 if (t < -0.99 or 1.00 < t): raise ValueError("t must be beteween -0.99 and 1.00: " + str(t)) return -0.0000001267 * pow(t, 2) - 0.000042037 * t + 0.016708634
d971c059259c875b00041a7cd558af628af1df09
694,598
def chain(*brules): """ Compose a sequence of brules so that they apply to the expr sequentially """ def chain_brl(expr): if not brules: yield expr return head, tail = brules[0], brules[1:] for nexpr in head(expr): for nnexpr in chain(*tail)(nexpr): yield nnexpr return chain_brl
bec1967e391963bcf05e60d6ad4667de29e7c046
694,600
def tagsAssoPerson(cnx,person,domain): """ Function to determine tags related to a person in the given domain :param cnx: connection object :param person: user with whom the tags are associated :param domain: domain id of the desired topic :return: list of tags associated with the user """ cursor = cnx.cursor() cursor2 = cnx.cursor() # procedure to determine user tags associated with domain cursor.callproc('associated_tags',[person, domain]) result = [] for result1 in cursor.stored_results(): for row in result1.fetchall(): getTag="select tag_details from tags where tagId='"+str(row[0])+"'" cursor2.execute(getTag) tagName = cursor2.fetchone() result.append(tagName[0]) if result: return result return ["None Found"]
188363fdee34a1ccbc94941e5fdb60dcbc529b3a
694,601
def type_prefix(typ) -> bytes: """ Return the prefix string for the given type. The prefix determine the type of the expected object. """ return b'o'
852d5802e912d0efd685e99c581e11670674d23e
694,602
import sys def is_interactive() -> bool: """Determines whether shell is interactive. A shell is interactive if it is run from `python3` or `python3 -i`. """ return hasattr(sys, "ps1")
52d058166381b9900e8ee032fec585b9148ad0f0
694,603
def parseEntries(data, type): """ parse the results of CEA or CPA task in both old and new notation returns new notation as in { key1: X, key2: x, mapped: y } """ # no data at all res = [] if not data: pass # new notation elif isinstance(data, list): res = data # old notation else: for key, value in data.items(): ids = key.split(',') if type == 'CEA': # CEA res.append({'row_id': ids[0], 'col_id': ids[1], 'mapped': value}) elif type == 'CPA': # CPA res.append({'sub_id': ids[0], 'obj_id': ids[1], 'mapped': value}) else: # CTA res.append({'col_id': key, 'mapped': value}) # remove empty entries return [el for el in res if el['mapped']]
ac4d7cc03ae9708e255a9d4a5a469ff84eade2d5
694,604
import argparse def create_arg_parser(): """"Creates and returns the ArgumentParser object.""" parser = argparse.ArgumentParser(description='Converts Dataturks output JSON file for Image bounding box to Pascal VOC format.') parser.add_argument('dataturks_JSON_FilePath', help='Path to the JSON file downloaded from Dataturks.') parser.add_argument('image_download_dir', help='Path to the directory where images will be dowloaded (if not already found in the directory).') #parser.add_argument('number_of_train', # help='Number Pascal VOC XML files will be stored.') return parser
65c835561719036dbc1a8daa944fdd73dabe6e51
694,605
def get_options_dict(activation, lstm_dims, lstm_layers, pos_dims): """Generates dictionary with all parser options.""" return { "activation": activation, "lstm_dims": lstm_dims, "lstm_layers": lstm_layers, "pembedding_dims": pos_dims, "wembedding_dims": 100, "rembedding_dims": 25, "hidden_units": 100, "hidden2_units": 0, "learning_rate": 0.1, "blstmFlag": True, "labelsFlag": True, "bibiFlag": True, "costaugFlag": True, "seed": 0, "mem": 0, }
3188383baf1023b2bd0bc4352dbad14e184192af
694,606
def adjust_learning_rate(optimizer, epoch, cfg): """decrease the learning rate at 200 and 300 epoch""" lr = cfg['lr'] if epoch >= 20: lr /= 10 if epoch >= 60: lr /= 10 for param_group in optimizer.param_groups: param_group['lr'] = lr return lr
51f702c665bd933cf67faa933adb9fcc32e75031
694,607
def update_grad_w(grad, grad_old, grad_new): """Update the global gradient for W Parameters ---------- grad: theano tensor The global gradient grad_old: theano tensor The previous value of the local gradient grad_new: theano tensor The new version of the local gradient Returns ------- grad: theano tensor New value of the global gradient """ return grad - grad_old + grad_new
74015b8987fa7af0bc6bbb4d9de3d96c1d83b5d8
694,608
def product(list): """ Returns a product of a List :param list: :return: """ prod = 1 for e in list: prod = prod * e return prod
900ff8b30be04a0f6e440fa47ece772846c890e3
694,609
def fake_chat_dict(): """Return a fake, minimalist Telegram chat as dict.""" return { 'id': 123, 'type': 'group' }
012b4a07d02e46870a5cbd27e09f2a9a089de365
694,610
def map_indices_py(arr): """ Returns a dictionary with (element, index) pairs for each element in the given array/list """ return dict([(x, i) for i, x in enumerate(arr)])
d045d57ec95be50f2c1ab3f46e22b93ab3568637
694,611
import re def update_pr_links(repo_name, notes_list): """ Replace the internal repo PR link with the external repo PR link in each release note. """ result = [] matcher = re.compile(".*\(#(?P<pr_number>[0-9]+)\)$") # e.g. gets the PR number ("12") from "some description (#12)" for note in notes_list: match = matcher.match(note) if match: internal_pr_number = "#{}".format(match.groupdict()["pr_number"]) external_pr_number = "{}{}".format(repo_name, internal_pr_number) result.append(note.replace(internal_pr_number, external_pr_number)) else: result.append(note) return result
edb6e399af5a0aeded6c970c1602e6b742119055
694,612
import numpy def _img_color_distance(img, color): """calculate the distance of each pixel to a given color :param img: a numpy.array of shape (y, x, 3) :param color: a color tuple (r, g, b) :return: a numpy.array of shape (y, x) where each value is a float 0-1 representing the distance to this color """ return numpy.sqrt(numpy.sum((img - color)**2, 2))
af85f22d68d2e0b2a1d0ba0da8bb7c9452f11d09
694,613
def group_data_by_column(dataframe, columns=('label',)): """ Transform any dataframe to dataframe with groupped data by given fields :param dataframe: pandas dataframe :param columns: fields to group by :return: """ return dataframe.groupby(list(columns))
36596d116da94ae43844f7d29dd1600c93c23b2f
694,614
import pathlib import os def create_hugectr_backend_config( model_path, model_repository_path='/models'): """Creates configurations definition for HugeCTR backend.""" p = pathlib.Path(model_path) model_version = p.parts[-1] model_name = p.parts[-2] model_path_in_repository = os.path.join(model_repository_path, model_name, model_version) dense_pattern = f'{model_name}_dense_*.model' dense_path = [os.path.join(model_path_in_repository, path.name) for path in p.glob(dense_pattern)][0] sparse_pattern = f'{model_name}[0-9]_sparse_*.model' sparse_paths = [os.path.join(model_path_in_repository, path.name) for path in p.glob(sparse_pattern)] network_file = os.path.join(model_path_in_repository, f'{model_name}.json') config_dict = dict() config_dict['supportlonglong'] = True model = dict() model['model'] = model_name model['sparse_files'] = sparse_paths model['dense_file'] = dense_path model['network_file'] = network_file config_dict['models'] = [model] return config_dict
750c33370628a34824403431bcc1e8d1b70da816
694,615
import requests def _download_collection(url): """ Download all pages in a paginated mgnify data collection This returns a single structure with all entries appended to result['data'] """ result = {'data': []} while url is not None: resp = requests.get(url, params={'page_size': 100}).json() result['data'] += resp['data'] url = resp['links']['next'] return result
31c5af3480a40f0cc700cfb81235cca7ab094469
694,616
def get_lineitems(actblue_values, mapping): """ Parse information we need from ActBlue line items. Returns a list of dictionaries, one dict per line item. """ knack_lineitems = [] lineitems = actblue_values['lineitems'] amount_key = mapping['lineitems#amount'] entity_key = mapping['lineitems#entityId'] for lineitem in lineitems: knack_lineitem = {} knack_lineitem[amount_key] = lineitem.get('amount') knack_lineitem[entity_key] = lineitem.get('entityId') knack_lineitems.append(knack_lineitem) return knack_lineitems
31e0a00635332a44b6815403e0c2dc3b51eb2461
694,617
def has_function(faker, key): """ helper function to check if a method is available on an object""" return hasattr(faker, key)
c57020222be5f7a336f2773a2e80a85c4eac867f
694,619
def outlierStats(outlier_list): """Takes a list of outliers and computes the mean and standard deviation""" try: outlierMean = outlier_list.mean() outlierStdev = outlier_list.std() return outlierMean, outlierStdev except TypeError : explanation = "Cannot compute statistics on a list of non-numerical elements." return explanation
98aeec98faa3eff4ce576e9ad4a3331f54d7a25d
694,620
import os.path def get_c_sources_dir(): """ Return directory containig sources for building your own modules. """ return os.path.abspath(os.path.dirname(__file__))
d9017fdd9b4baf5a9d0f65a1b288adbe05a331d6
694,621