content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def convert_phrase_to_url(phrase): """ Converts a phrase such as word1 word2 to a wikipedia URL of the form http://en.wikipedia.org/wiki/word1_word2 and returns it""" prefix = 'http://en.wikipedia.org/wiki/' url = prefix + '_'.join(phrase.split()) return url
57633b17c06a4aad6e8ff3911a3e520390ad4952
692,617
def psudo_graph(transactions): """ Key prefix - out going edges postfix - in comming edges """ pg=dict() for transaction_id, transaction in enumerate(transactions): for i in range(len(transaction)): u=transaction[i] if i != len(transaction) - 1: v=transaction[i+1] if u not in pg: pg[u]={"prefix":{transaction_id:v}, "postfix": dict()} else: pg[u]["prefix"][transaction_id]=v if v not in pg: pg[v]={"prefix":dict(), "postfix": {transaction_id:u}} else: pg[v]["postfix"][transaction_id]=u return pg
9cc11be18fea3f0584e80c4fb9579ec6fa9faa1d
692,618
def serialize_sqla(data, serialize_date=True): """ Serialiation function to serialize any dicts or lists. This is needed for conversion of sqlalchemy objects to JSON format. """ # If has to_dict this is asumed working and it is used if hasattr(data, 'to_dict'): return data.to_dict(serialize_date=serialize_date) # DateTime objects should be returned as isoformat if hasattr(data, 'isoformat') and serialize_date: return str(data.isoformat()) # Items in lists are iterated over and get serialized separetly if isinstance(data, (list, tuple, set)): return [serialize_sqla(item, serialize_date=serialize_date) for item in data] # Dictionaries get iterated over if isinstance(data, dict): result = {} for key, value in list(data.items()): result[key] = serialize_sqla(value, serialize_date=serialize_date) return result # Try using the built in __dict__ functions and serialize that seperately if hasattr(data, '__dict__'): return serialize_sqla(data.__dict__, serialize_date=serialize_date) # Just hope it works return data
2cc74c71f51794ad6252fa65eb0109e3c096c1bf
692,619
def mac_addr_is_unicast(mac_addr): """Returns True if mac_addr is a unicast Ethernet address. Args: mac_addr (str): MAC address. Returns: bool: True if a unicast Ethernet address. """ msb = mac_addr.split(':')[0] return msb[-1] in '02468aAcCeE'
443d349315e2d1ef9997ab023e2a5a243fd52150
692,620
def make_bare_labels(subsystem_count, *args): """ For two given subsystem states, return the full-system bare state label obtained by placing all remaining subsys_list in their ground states. Parameters ---------- subsystem_count: int number of subsys_list inside Hilbert space *args: tuple(int, int) each argument is a tuple of the form (subsys_index, label) Returns ------- tuple Suppose there are 5 subsys_list in total. Let (subsys_index1=0, label1=3), (subsys_index2=2, label2=1). Then the returned bare-state tuple is: (3,0,1,0,0) """ bare_labels = [0] * subsystem_count for subsys_index, label in args: bare_labels[subsys_index] = label return tuple(bare_labels)
30fb36fc230f2fa4e9dde55e535996ba7549ed7b
692,621
from typing import Counter def get_most_freq_c(data, n): """ Finds the n most frequent items in data args: data (list of tuples (name, sex)), n returns: list of names (str) """ cnt = Counter() for name, sex in data: cnt[name] += 1 return cnt.most_common(n)
9cb32f9780afbd73e5f9cbfba8e4809d5b81768f
692,622
def _tablify_result(data): """Convert the JSON dict structure to a regular list.""" if isinstance(data, dict): keys = [i for i in list(data.keys()) if i != "_meta"] if len(keys) == 1: data = data[keys[0]] if not isinstance(data, list): data = [data] return data
c00312eedb015d11aa91f61fcbf9b17b0b0ee365
692,623
def fn_oostatus2bool(str_status): """Convert OmniOutliner checked/unchecked to boolean""" return (str_status == 'checked')
beb11b8b8aca12f22bb9418bf9005bd5f9ad1b48
692,624
from typing import Callable import click def superuser_username_option(command: Callable[..., None], ) -> Callable[..., None]: """ An option decorator for a superuser username. """ function = click.option( '--superuser-username', type=str, default='admin', help=( 'The superuser username is needed only on DC/OS Enterprise ' 'clusters. ' 'By default, on a DC/OS Enterprise cluster, `admin` is used.' ), )(command) # type: Callable[..., None] return function
4ca1d661e5a44e9f293f7af411f6a2f85e18e51c
692,625
import torch def get_onehot(data_list, categories) -> torch.Tensor: """Transform lists of label into one-hot. Args: data_list (list of list of int): source data. categories (int): #label class. Returns: torch.Tensor: one-hot labels. """ onehot_labels = [] for label_list in data_list: onehot_label = torch.zeros(categories) for label in label_list: onehot_label[label] = 1.0 / len(label_list) onehot_labels.append(onehot_label) return torch.stack(onehot_labels, dim=0)
ea7ed1c1e292dd3872000e4d16ecb1a23401b92a
692,626
import os import click import subprocess def resample_mp3(inpath, outpath, bitrate='128'): """ Resample input file with given bitrate to target basedir. lame --mp3input -b 128 input.mp3 output.mp3 """ if not outpath.endswith('.mp3'): raise ValueError('Dest %s must end in .mp3' % outpath) if os.path.exists(outpath): click.secho('WARN: %s already exists, skipping' % outpath, fg='yellow') return None # cmd = ["ffmpeg", "-i", inpath, "-ab", "%sk" % bitrate, outpath] # lame is slower !? (90sec ffmpeg, 100sec lame) # lame preserves tags! cmd = ['lame', '--mp3input', '-b', bitrate, inpath, outpath] proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # noqa: S603 stdout, stderr = proc.communicate() if proc.returncode != 0: click.secho('Error: non-zero exit code: %i' % proc.returncode, fg='red') if stdout: click.secho(stdout, fg='red') if stderr: click.secho(stderr, fg='red') return proc.returncode
d4a4d838b68fbbd5be8451c34bb00a0f84b57e34
692,627
def filter_sentence(sentence): """Filter sentence. Filter sentence: - head mention equals tail mention - head mentioin and tail mention overlap Args: sentence: A python dict. sentence example: { 'tokens': ['Microsoft', 'was', 'founded', 'by', 'Bill', 'Gates', '.'] 'h': {'pos':[[0]], 'name': 'Microsoft', 'id': Q123456}, 't': {'pos':[[4,5]], 'name': 'Bill Gates', 'id': Q2333}, 'r': 'P1' } Returns: True or False. If the sentence contains abnormal conditions above, return True. Else return False Raises: If sentence's format isn't the same as described above, this function may raise `key not found` error by Python Interpreter. """ head_pos = sentence["h"]["pos"][0] tail_pos = sentence["t"]["pos"][0] if sentence["h"]["name"] == sentence["t"]["name"]: # head mention equals tail mention return True if head_pos[0] >= tail_pos[0] and head_pos[0] <= tail_pos[-1]: # head mentioin and tail mention overlap return True if tail_pos[0] >= head_pos[0] and tail_pos[0] <= head_pos[-1]: # head mentioin and tail mention overlap return True return False
f65c57e05543a721b33baa5bc34c327381d12994
692,628
import traceback import time def repeat_if_failed(retry_interval=100,retry=5,f_is_failed=None,retry_message=None): """ repeat execute the method if some exception is thrown during executing or f_is_failed(result) return True if f_is_failed is not None. retry: retry times, -1 means alwasy retry retry_interval: the waiting milliseconds between retry retry_message: print to stdout before retry, have three positioning paramters.1. current retry times. 2. total retry times. 3. retry interval(milliseconds) """ def _decrator(func): _retry_interval = retry_interval / 1000.0 _retry = retry _retry_message = retry_message _f_is_failed = f_is_failed _func = func def _wrapper(*args,**kwargs): times = 0 while True: #can run up to retry times plus 1 try: result = _func(*args,**kwargs) if _f_is_failed and _f_is_failed(result): if times >= _retry and _retry >= 0: #already retry specified retry times return result else: return result except KeyboardInterrupt: raise except: if times >= _retry and _retry >= 0: #already retry specified retry times raise traceback.print_exc() times += 1 try: if _retry_message: print(_retry_message.format(times,_retry,int(_retry_interval * 1000))) time.sleep(_retry_interval) except: #interrupted raise return _wrapper return _decrator
d8ab0a7f3d6c7bb3c9da729d4b08db7a8f6c959b
692,629
import os def report_vcs(path): """ Returns `None` if no repository is found. @path Root directory to start searching for a vcs repository. """ assert os.path.exists(path), 'bad arguments' if os.path.exists(os.path.join(path, '.git')): return 'git'
e48ec39ce62c1794c33f805f3f7ea5abfb3b788a
692,630
def bellman_ford(g, source): """Return distance where distance[v] is min distance from source to v. This will return a dictionary distance. g is a Graph object which can have negative edge weights. source is a Vertex object in g. """ distance = dict.fromkeys(g, float('inf')) distance[source] = 0 for _ in range(len(g) - 1): for v in g: for n in v.get_neighbours(): distance[n] = min(distance[n], distance[v] + v.get_weight(n)) return distance
45b8226148abe7417c7446655ddb66a334a72146
692,631
def ind_dict2list(dic): """ :param dic: dictionary form object ot index, starting from zero :return: """ l = list(range(len(dic))) for item, index in dic.items(): l[index] = item return l
57bf53491d1f82dd08245d10fe4830d3c79d49aa
692,632
import torch def sparsity_2D(tensor): """Create a list of sparsity levels for each channel in the tensor 't' For 4D weight tensors (convolution weights), we flatten each kernel (channel) so it becomes a row in a 3D tensor in which each channel is a filter. So if the original 4D weights tensor is: #OFMs x #IFMs x K x K The flattened tensor is: #OFMS x #IFMs x K^2 For 2D weight tensors (fully-connected weights), the tensors is shaped as #IFMs x #OFMs so we don't need to flatten anything. To measure 2D sparsity, we sum the absolute values of the elements in each row, and then count the number of rows having sum(abs(row values)) == 0. """ if tensor.dim() == 4: # For 4D weights, 2D structures are channels (filter kernels) view_2d = tensor.view(-1, tensor.size(2) * tensor.size(3)) elif tensor.dim() == 2: # For 2D weights, 2D structures are either columns or rows. # At the moment, we only support row structures view_2d = tensor else: return 0 num_structs = view_2d.size()[0] nonzero_structs = len(torch.nonzero(view_2d.abs().sum(dim=1))) return 1 - nonzero_structs / num_structs
57dc9ee68d42e84db99cc25a48ae6843a88b8c3e
692,633
def toBeTested(): """DOCSTR""" print("i'm tested!") return 3
e6e6e9a3fcfa75d89e3614ab7d2d25bda17b228a
692,634
from typing import Type from typing import Tuple from typing import Optional from typing import List from typing import TypeVar def parse_hint(hint: Type) -> Tuple[Type, Optional[List]]: """Parse a typing hint into its type and and arguments. For example: >>> parse_hint(Union[dict, list]) (typing.Union, [<class 'dict'>, <class 'list'>]) >>> parse_hint(int) (<class 'int'>, None) """ if hasattr(hint, "__origin__"): # This is a type hint (eg typing.Union) # Filter out TypeVars such as KT & VT_co (they generally # indicate that no explicit hint was given) hint_args = [a for a in getattr(hint, "__args__", []) if not isinstance(a, TypeVar)] return hint.__origin__, hint_args or None else: # This is something other than a type hint # (e.g. an int or datetime) return hint, None
4900f319240f5feeda8a9b0be768d794f8b24a70
692,635
def get_sequencing_data(): """ for strain sequencing pie chart """ result = {} result["data"] = list([ 120000, 7000, 7600, 750, 350, 1300, ]) result["labels"] = list([ "archive", "gDNA extracted", "sent for sequencing", "sequenced - on progress", "sequenced - failed QC", "sequenced - annotated", ]) return result
364bf5c83f93a30e9bf3c2dac8bb44b077b929eb
692,636
import string def digitsOnly(s): """Return True if s only contains digits only and False otherwise.""" return (s == ''.join([c for c in s if c in string.digits]))
0592f825ff014c25266d7b55774da7a55916a5e1
692,637
def transform_json_from_studio_to_vulcan(studio_json): """Transforms a json from the studio format to the vulcan format.""" # Initialize variables vulcan_json = [] # Loop through all studio images for studio_image in studio_json['images']: # Initialize vulcan prediction vulcan_pred = {'outputs': [{'labels': {'discarded': [], 'predicted': []}}]} predicted = [] discarded = [] for metadata in ['location', 'data']: if metadata in studio_image: vulcan_pred[metadata] = studio_image[metadata] # Loop through all studio predictions for studio_pred in studio_image['annotated_regions']: # Build vulcan annotation annotation = { 'label_name': studio_pred['tags'][0], 'score': studio_pred['score'], 'threshold': studio_pred['threshold'] } # Add bounding box if needed if studio_pred['region_type'] == 'Box': annotation['roi'] = { 'bbox': { 'xmin': studio_pred['region']['xmin'], 'xmax': studio_pred['region']['xmax'], 'ymin': studio_pred['region']['ymin'], 'ymax': studio_pred['region']['ymax'] } } # Update json if annotation['score'] >= annotation['threshold']: predicted.append(annotation) else: discarded.append(annotation) # Sort by prediction score of descending order predicted = sorted(predicted, key=lambda k: k['score'], reverse=True) discarded = sorted(discarded, key=lambda k: k['score'], reverse=True) vulcan_pred['outputs'][0]['labels']['predicted'] = predicted vulcan_pred['outputs'][0]['labels']['discarded'] = discarded # Update vulcan json vulcan_json.append(vulcan_pred) return vulcan_json
4e423817fef284ed7552e26a4836e351cfb661a9
692,638
def _initialize_headers(headers): """Creates a copy of the headers. Args: headers: dict, request headers to copy. Returns: dict, the copied headers or a new dictionary if the headers were None. """ return {} if headers is None else dict(headers)
61e348c065e1895321ca622572978e60a9dfed47
692,639
def linear_regression(x, y): """ Calculates a linear regression model for the set of data points. Args: x: a 1-d numpy array of length N, representing the x-coordinates of the N sample points y: a 1-d numpy array of length N, representing the y-coordinates of the N sample points Returns: (m, b): A tuple containing the slope and y-intercept of the regression line, both of which are floats. """ m = (((x-x.mean())*(y-y.mean())).sum())/((x - x.mean())**2).sum() b = y.mean() - m*x.mean() return (m,b)
33037a2d57172ff2eb386ed35787cda08eb8f11d
692,640
def ask_for_int(sentence: str) -> int: """ Ask the user for an integer. """ while True: try: return int(input(sentence)) except ValueError: print("Invalid input. Please try again.")
a494b2d5c1a40948b04fe83c6d73c4d867b7db1f
692,641
def crop_image(img, crop_idx): """ Returns a cropped version of img with the provided crop indices """ upper, lower = crop_idx[0] left, right = crop_idx[1] return img[upper:lower, left:right]
23062d1070053664fe8c1d88e6a04883bea33eb3
692,642
def guess_subsystem(host: str) -> str: """Guess the subsystem based on the host name.""" host = host.replace("_", "-").lower() if "-vac" in host: return "Vacuum" if "-optics" in host: return "Optics" if "-motion" in host: return "Motion" if "-vonhamos" in host: return "Motion" if "-sds" in host: return "SDS" try: return host.split("-")[1].upper() except Exception: return "PythonLogDaemon"
7c0ecda32517d110c4fd5bceb177b0da77898268
692,643
def parse_search(data): """ parse search result from tunein """ qr_name = data['head']['title'] search_result = [] search_error = {} if data['head']['status'] == "200": for r in data['body']: if 'type' in r: if r['type'] == 'audio': search_result.append({ 'text': r['text'], 'img': r['image'], 'url': r['URL'] }) elif 'children' in r: search_error = {'text': 'No result'} else: search_error = {'text': r['text']} else: search_error = {'code': data['head']['status']} return qr_name, search_result, search_error
af80ec062bca084afd174e8315447f02bedd0fa6
692,644
def uccsd_convert_amplitude_format(single_amplitudes, double_amplitudes): """Re-format single_amplitudes and double_amplitudes from ndarrays to lists. Args: single_amplitudes(ndarray): [NxN] array storing single excitation amplitudes corresponding to t[i,j] * (a_i^\dagger a_j - H.C.) double_amplitudes(ndarray): [NxNxNxN] array storing double excitation amplitudes corresponding to t[i,j,k,l] * (a_i^\dagger a_j a_k^\dagger a_l - H.C.) Returns: single_amplitudes_list(list): list of lists with each sublist storing a list of indices followed by single excitation amplitudes i.e. [[[i,j],t_ij], ...] double_amplitudes_list(list): list of lists with each sublist storing a list of indices followed by double excitation amplitudes i.e. [[[i,j,k,l],t_ijkl], ...] """ single_amplitudes_list, double_amplitudes_list = [], [] for i, j in zip(*single_amplitudes.nonzero()): single_amplitudes_list.append([[i, j], single_amplitudes[i, j]]) for i, j, k, l in zip(*double_amplitudes.nonzero()): double_amplitudes_list.append([[i, j, k, l], double_amplitudes[i, j, k, l]]) return single_amplitudes_list, double_amplitudes_list
54ecedcda46950b81802f0ea23df54a7c7cddd7a
692,645
def transposition_string_012_format(move_sequence_string): """Return a transposition string in the following format. 0 is a separator between columns 1 represent disks from the beginning player and 2 disks from the other player. Between the separators each column is described from below. For example the transposition 0000000 0000000 0000000 0000000 0002000 0001000 is represented as "00012000". 0000000 0000000 0002000 0001000 0102000 0201021 is represented as "02100121200201". """ board = [[], [], [], [], [], [], []] player = "1"; for move in move_sequence_string: board[int(move)].append(player) if player == "1": player = "2" else: player = "1" return "0".join(["".join(column) for column in board])
592ca9ad0c9742caac8b493fcead586bee072457
692,646
import string def convert_to_valid_filename(topic): """ Convert python path to valid filename :param topic: Topic to use :return: Valid filename, with all non-ASCII characters dropped from it """ valid_filename_chars = "-_.() {letters}{digits}".format(letters=string.ascii_letters, digits=string.digits) return ''.join(ch for ch in topic if ch in valid_filename_chars)
b51dbeea60b7bc2ca7641695882e314c7f749855
692,647
from typing import Optional def get_size_param(parameter: dict) -> Optional[str]: """Get the size of the given parameter.""" size = parameter.get("size", {}) return size.get("value", None)
ef70ffe2e0333c2765fcf9683547fd7c4ead783f
692,648
def schema_type_code_recoder(): """ adds the SCHEMA_TYPE_CODE column to the row CHAR(3) row: dict """ return "MUD"
b324f3ab6ededcccab2fdd2cb74997c41d0b14fb
692,649
def tricky_tt(request): """Return tricky TT lines to parse.""" return request.param
46a0f99caab6aea29e1d353c6e57357c3b8e74b1
692,650
def guess_identifier_format(identifier_str): """Guess identifier format. :param str identifier_str: Chemical identifier string. :return: 'inchi' or 'smiles' string. :rtype: :py:class:`str` """ if identifier_str.startswith('InChI='): return 'inchi' else: return 'smiles'
37128f15f16da64e533b1e7a878c288711016dd9
692,651
def get_domcfg_points(): """The points are hard coded at hand to be sure to not introduce errors from the reading of the names""" domcfg_points = { "nav_lon": "T", "nav_lat": "T", "jpiglo": None, "jpjglo": None, "jpkglo": None, "jperio": None, "ln_zco": None, "ln_zps": None, "ln_sco": None, "ln_isfcav": None, "glamt": "T", "glamu": "U", "glamv": "V", "glamf": "F", "gphit": "T", "gphiu": "U", "gphiv": "V", "gphif": "F", "e1t": "T", "e1u": "U", "e1v": "V", "e1f": "F", "e2t": "T", "e2u": "U", "e2v": "V", "e2f": "F", "ff_f": "F", "ff_t": "T", "e3t_1d": "T", "e3w_1d": "W", "e3t_0": "T", "e3u_0": "U", "e3v_0": "V", "e3f_0": "F", "e3w_0": "W", "e3uw_0": "UW", "e3vw_0": "VW", "top_level": "T", "bottom_level": "T", "stiffness": "T", "gdept_0": "T", "gdepw_0": "W", "gdepu": "U", "gdepv": "V", "ht_0": "T", "hu_0": "U", "hv_0": "V", "tmask": "T", "umask": "U", "vmask": "V", "fmask": "F", "tmaskutil": "T", "umaskutil": "U", "vmaskutil": "V", "gdept_1d": "T", "gdepw_1d": "W", "mbathy": "T", "misf": "T", "isfdraft": "T", } return domcfg_points
5bd49353944a40ac4e274b43c9c07c30773d2d9d
692,652
def expected_da(): """Expected serialization when danish chosen.""" return { 'id': 'text', 'title_l10n': 'Tekst', 'description_l10n': 'Publikationer', 'icon': 'file-o', 'props': {}, }
8716922fabd89d14a343411fb2498d9a96816a3e
692,653
def extract_extension_attributes(schema: dict) -> dict: """Extract custom 'x-*' attributes from schema dictionary Args: schema (dict): Schema dictionary Returns: dict: Dictionary with parsed attributes w/o 'x-' prefix """ extension_key_format = 'x-' extensions_dict: dict = { key.replace(extension_key_format, '').replace('-', '_'): value for key, value in schema.items() if key.startswith(extension_key_format) } return extensions_dict
80829a1e222b7e55d41483592e20b06bb63ea8a2
692,654
import os def is_exe(fpath): """ Is `fpath' executable? Arguments: - `fpath`: str Return: bool Exceptions: None """ return os.path.exists(fpath) and os.access(fpath, os.X_OK)
1898182d2df9102f7ae96b6d0345dc95a730843a
692,655
def max_contig_sum(L): """ L, a list of integers, at least one positive Returns the maximum sum of a contiguous subsequence in L """ ############# This is getting the biggest powerset of L # def powerset(s): # x = len(s) # masks = [1 << i for i in range(x)] # for i in range(1 << x): # yield [ss for mask, ss in zip(masks, s) if i & mask] # # max_value = 0 # for i in list(powerset(L)): # if sum(i) > max_value: # max_value = sum(i) # return max_value ############ This is getting the maximum contiguous subsequence max_value = 0 value = 0 for i in range(len(L)): value = value + L[i] if value < 0: value = 0 if max_value < value: max_value = value return max_value
33e73e4a98943adadfda75af103588e7caa2115f
692,656
import os def get_data(dir_name): """ Get data from imagenet as dict. Yields: data (dict of list): imagenet data list which contains dict. """ map_file = os.path.join(dir_name, "labels_map.txt") if not os.path.exists(map_file): raise Exception("map file {} not exists".format(map_file)) label_dict = {} with open(map_file) as fp: line = fp.readline() while line: labels = line.split(" ") label_dict[labels[1]] = labels[0] line = fp.readline() # get all the dir which are n02087046, n02094114, n02109525, ... dir_paths = {} image_dir = os.path.join(dir_name, "images") for item in label_dict: real_path = os.path.join(image_dir, label_dict[item]) if not os.path.isdir(real_path): print("warning: {} dir is not exist".format(real_path)) continue dir_paths[item] = real_path if not dir_paths: raise Exception("not valid image dir in {}".format(image_dir)) # get the filename, label and image binary as a dict data_list = [] for label in dir_paths: for item in os.listdir(dir_paths[label]): file_name = os.path.join(dir_paths[label], item) if not item.endswith("JPEG") and not item.endswith("jpg"): print("warning: {} file is not suffix with JPEG/jpg, skip it.".format(file_name)) continue data = {} data["file_name"] = str(file_name) data["label"] = int(label) # get the image data image_file = open(file_name, "rb") image_bytes = image_file.read() image_file.close() data["data"] = image_bytes data_list.append(data) return data_list
855994ff67080d2db6a11d0385ee17b096eda4b2
692,657
import argparse import sys def parse_args(): """ Parse input arguments """ parser = argparse.ArgumentParser(description='Func test ULPR') parser.add_argument('--data_dir', dest='data_dir', required=True, help='data dir', default=None, type=str) parser.add_argument('--output_dir', dest='output_dir', required=True, help='output dir', default=None, type=str) parser.add_argument('--batch_size', dest='batch_size', required=False, help='batch size', default=32, type=int) parser.add_argument('--lr', dest='lr', required=False, help='learning rate', default=0.01, type=float) parser.add_argument('--epoch', dest='epoch', required=False, help='epoch', default=10, type=int) parser.add_argument('--net', dest='net', required=True, help='the net to be trained, (char, judge, mrcnn)', default=None, type=str) parser.add_argument('--gpu', dest='gpu', required=False, help='which gpu to use', default='0', type=str) parser.add_argument('--weights', dest='pretrained_model', required=False, help='use pretrained model', default=None, type=str) if len(sys.argv) == 1: parser.print_help() sys.exit(1) args = parser.parse_args() return args
78a3e2539fb8120cf7d4d6b145b77fbb2c7f124e
692,658
def exact_value_scoring(values_list1, values_list2, values1, values2): """ pass this two lists of values from a pair of facets and it will give a score for exact value matches """ if len(values_list1) > 0 and len(values_list2) > 0: total_attributes = len(values_list1) + len(values_list2) matching_attributes = len(set(values_list1) & set(values_list2)) match_freq = 0 # print(values_list1) # print(values_list2) for k in values_list1: if k in values_list2: freq = values1.get(k) + values2.get(k) match_freq = match_freq + freq total_freq = sum(values1.values()) + sum(values2.values()) score = ((matching_attributes * 2) / (total_attributes)) * (match_freq / total_freq) return score else: score = 0 return score
9afb685d6ce7556f66480d6c85c06ed8a2ec9f0d
692,659
import logging def user_insert_cols(df_user,new_features_list): """ 增加用户新的特征列,方便后续提取并补充值 :param df_user: 用户信息 :return: df_user: 新用户信息dataframe """ logging.info("正在扩展用户新特征列...") col_name = list(df_user.columns) col_name = col_name + new_features_list df_user = df_user.reindex(columns=col_name, fill_value=0) logging.info("用户新特征列扩展完成") return df_user
9e6792a5cacc438b2319f73d77138a24ec4222d3
692,660
def fixture_delete_tags_old_only(): """date desc order""" return [ 'old-staging', 'old-prod', ]
0b2339311ee745a45eb765575f90c0e80104ac2b
692,661
def recompile(self): """Recompile all Python Scripts""" base = self.this() scripts = base.ZopeFind(base, obj_metatypes=('Script (Python)',), search_sub=1) names = [] for name, ob in scripts: if ob._v_change: names.append(name) ob._compile() ob._p_changed = 1 if names: return 'The following Scripts were recompiled:\n' + '\n'.join(names) return 'No Scripts were found that required recompilation.'
a3b10c4fed610818e9a76fd123a817185f8b2ef1
692,662
def is_palindrome(s): """ Input: s, a string Returns True if s is a palindrome, False otherwise """ def to_chars(s): s = s.lower() ans = '' for char in s: if char in 'abcdefghijklmnopqrstuvwxyz': ans = ans + char return ans def is_pal(s): if len(s) <= 1: return True else: return s[0] == s[-1] and is_pal(s[1:-1]) return is_pal(to_chars(s))
a956ee66f20d57eb58dae99c7108739b84bf313d
692,663
def exporttogeojson(geojsonfilename, geo_df): """Write geopandas dataframe to geo_df Keyword arguments: geojsonfilename -- geojson to create geo_df -- geopandas dataframe """ #geo_df.to_file(geojsonfilename, driver='GeoJSON', crs=from_epsg(4326)) geo_df.to_file(geojsonfilename, driver='GeoJSON') return geojsonfilename
5f083e58f6b56bda25daffb9ecfa9b3f47ae5091
692,664
def remove_metadata(module_data, start_line, start_col, end_line, end_col): """Remove a section of a module file""" lines = module_data.split('\n') new_lines = lines[:start_line] if start_col != 0: new_lines.append(lines[start_line][:start_col]) next_line = lines[end_line] if len(next_line) - 1 != end_col: new_lines.append(next_line[end_col:]) if len(lines) > end_line: new_lines.extend(lines[end_line + 1:]) return '\n'.join(new_lines)
876c665ad7d7f9c3fe7e4f5b67752a706604f155
692,665
def sec0to1(val): """ Converts the system security values into values between 0 and 1 """ retval = 0.0 if val < 0: retval = 0.0 elif val > 1: retval = 1.0 else: retval = round(val, 1) return retval
69f63794a851a32ebc8a9f5c209a78354bce6586
692,666
import re def filter_timestamps(in_str_list): """ Filter out timestamps and core IDs in OpTiMSoC STDOUT/STM/CTM log files The timestamps depend at least on the compiler, but also on other variables. For functional tests we are only interested in the output, not the timing of the output. """ filter_expr = re.compile(r'^\[\s*\d+, \d+\] ', flags=re.MULTILINE) return [filter_expr.sub(repl='', string=l) for l in in_str_list]
c43661b49b75c18df1a436d7a548c74284629fa3
692,667
def build_nyiso_url(month, data_type, zone): """Builds a string that is the URL address for a NYISO data file. Args: month: pandas timestamp for the first day of the month of data requested data_type: string denoting the type of NYISO data to retrieve, examples include "damlbmp" which stands for "day ahead market location based marginal price" or "outSched" for "outage schedule" zone: string denoting the NYISO geographic zone of the data to be requested. This is required if data_type == "damlbmp" Returns: url: string giving the URL address of a NYISO data file, similar to the following example URL: 'http://mis.nyiso.com/public/csv/damlbmp/20180201damlbmp_zone_csv.zip' """ # Raise an error if the zone isn't defined when it needs to be. if data_type == 'damlbmp' and zone == None: raise RuntimeError("Zone must be specified when data_type == 'damlbmp'") def _to_yyyymmdd(timestamp): """Returns the yyyymmdd format date given a pandas timestamp object""" s = str(timestamp) r = s[0:4] + s[5:7] + s[8:10] return r url = "http://mis.nyiso.com/public/csv/" url = url + data_type + "/" url = url + _to_yyyymmdd(month) + data_type if zone != None: url = url + "_zone" url = url + "_csv.zip" return url
e3f53df7b9136aaa00796247989e3cd5b01d1216
692,668
from typing import OrderedDict def json_encoder(ob): """ method to transform an object into a version of it that won't loose information when saved in json """ if isinstance(ob, dict): return {"__items__": [(json_encoder(k), json_encoder(v)) for k, v in ob.items()], "__type__": "__dict__"} elif isinstance(ob, OrderedDict): return {"__items__": [(json_encoder(k), json_encoder(v)) for k, v in ob.items()], "__type__": "__OrderedDict__"} elif isinstance(ob, set): return {"__items__": [json_encoder(v) for v in ob], "__type__": "__set__"} elif isinstance(ob, tuple): return {"__items__": [json_encoder(v) for v in ob], "__type__": "__tuple__"} elif isinstance(ob, list): return [json_encoder(v) for v in ob] else: return ob
da4d51dff98d54f989501c44bbb865775858552f
692,669
def POWER(number, power): """Raise a number to a given power. Parameters ---------- number : float or int number you would like to raise a power to. power : float or int number that you would like the number argument raised to. Returns ------- int or float The number raised to the power specified. """ if type(number) == int or type(number) == float: if type(power) == int or type(power) == float: return(number ** power) else: print('Invalid type: power must be int or float.') else: print('Invalid type: number must be int or float.')
016c1a1fe70b5a164827a25f892d6f9cbb3eb99c
692,670
def np_chunk(tree): """ Return a list of all noun phrase chunks in the sentence tree. A noun phrase chunk is defined as any subtree of the sentence whose label is "NP" that does not itself contain any other noun phrases as subtrees. """ list_nodes = list() # get all the NP nodes for noun_nodes in tree.subtrees(): if noun_nodes.label() == 'NP': list_nodes.append(noun_nodes) return list_nodes
9f80a677fe62377191985ed0690cf86f196721d6
692,671
def comp_width_opening(self): """Compute the average opening width of the Slot Parameters ---------- self : Slot A Slot object Returns ------- W0: float Average opening width of the slot [m] """ line_list = self.build_geometry() Z1 = line_list[0].get_begin() Z2 = line_list[-1].get_end() return abs(Z2 - Z1)
fa7252f17bbd1ea42a582ed665d57015fc7348a1
692,672
def arg_validate(args_array, valid_func): """Function: arg_validate Description: Validates data for certain options based on a dictionary list. Arguments: (input) args_array -> Array of command line options and values. (input) valid_func -> Dictionary list of options & functions. (output) status -> True|False - If format is valid. """ args_array = dict(args_array) status = True for item in set(valid_func.keys()) & set(args_array.keys()): # Call function from function list. if not valid_func[item](args_array[item]): print("Error: Invalid format: {0} '{1}'" .format(item, args_array[item])) status = False return status
cd00506c4f0ca923c34b76d3324bce745f6960d3
692,673
def protocol(request): """Protocol which is used on http(s) service/proxy/backend""" return request.param
ca996ed85c7486704f98a2c507387a23592b2f96
692,674
import binascii def bin2macaddress(data): """Convert a byte-string to a MAC address.""" mac = binascii.b2a_hex(data) chunks = list() for i in range(len(mac)): if i % 2 == 0: chunks.append(mac[i : i + 2]) result = b":".join(chunks) return result.decode()
2c3f6989810adb6257cd169b817ab3d06f58feff
692,675
from typing import List import random def create_session(attendees: List[list], room_size: int) -> dict: """ Function to generate a possible number of scenrios for sessions everyone to meet each other. Parameters: attendees : List - A list of the attendees for the session room_size : int - A numerical value for the number of people per room Return: scenarios : dict - A dict of possible scenarios, key is the number of sessions and the value is attendees in session """ randomised_attendees = attendees[:] random.shuffle(randomised_attendees) session = {} room_num = 1 while bool(randomised_attendees) is True: room_attendess = [] for i in range(room_size): if bool(randomised_attendees) is False: room_attendess.append("") else: room_attendess.append(randomised_attendees.pop()) session[f"Room {room_num}"] = room_attendess room_num += 1 return session
b7431afc17e95308e9d48c8f68d92d5f2564bb70
692,676
def bit_to_state_permutation(bit_permutation): """ Args: bit_permutation (function(reg_sizes: tuple[int], bit_position: int, other_vals: tuple[int]) : int): Returns: function(reg_sizes: tuple[int], reg_vals: tuple[int]) : tuple[int]): """ def permute(sizes, vals): permuted = sum( ((vals[0] >> i) & 1) << bit_permutation(sizes, i, vals[1:]) for i in range(sizes[0])) return (permuted,) + tuple(vals[1:]) return permute
cd3e589b6fcb363116577863b9ea5c0ceb899c06
692,677
def match_args(macro, args): """ Match args names with their values """ if 'args' not in macro: return {} return dict(list(zip(macro['args'], args)))
1360771bb397b62f849a3227ffcf82f78302143c
692,678
def _get_key(subspace): """Get key. """ return ','.join(sorted(subspace, key=lambda k: (k.strip('-+|'), len(k), k)))
2d164997af4264e14bf83e995f8ebf2de1b77f40
692,679
def geometric_pmi_score(pdict, wlist1, wlist2): """ Calculate geometric mean of PMI over all word pairs in two word lists, given pre-computed PMI dictionary - If geometric PMI is undefined, return -inf - The geometric mean is undefined if: - Any of the PMIs are negative - None of the word pairs have a defined PMI """ product_pmi = None for word1 in wlist1: for word2 in wlist2: # Enforce alphabetical order in pair pair = tuple(sorted([word1, word2])) wi, wj = pair if wi in pdict and wj in pdict[wi]: if product_pmi is None: product_pmi = 1 pmi = pdict[wi][wj] # Check if PMI is negative if pmi > 0: product_pmi *= pmi else: product_pmi = float("-inf") break # If PMI is negative, break out of the loop completely if product_pmi == float("-inf"): break if product_pmi is None: # None of the word pairs had a defined PMI return float("-inf") elif product_pmi == float("-inf"): # At least one word pair had a negative PMI return float("-inf") else: return product_pmi ** (1/len(wlist1)/len(wlist2))
b513f0d643c062d91010b2c8d8c666373b0c86ad
692,680
def _multicast_groups(subpages, metadata): """Order the subpages of a multicast page into groups according to hints given in the metadata. Arguments: subpages(list): list of subpages of the multicast page metadata(dict): the metadata dictionary of the multicast page. The only control parameters so far is: 'items_per_page' Returns: a list of lists where each list represents one group of subpages that is to appear on one output page. """ n = metadata.get('MC_PAGINATION', 1) if n < 1000: # interpret pagination parameter as number of items per page return [subpages[k:k + n] for k in range(0, len(subpages), n)] else: # interpret pagination parameter as size, i.e. number of characters # per page groups = [] group = [] cnt = 0 for sp in subpages: size = len(sp) if len(group) == 0 or size + cnt <= n: group.append(sp) cnt += size else: groups.append(group) group = [sp] cnt = size return groups
89032beaa5f3bd8a6a54db8b6edc2d6c7ef38a04
692,681
import re def find_phone_number(text): """ Spain Mobile numbers have ten digit. I will write that pattern below. Parameters ---------- text: str Text selected to apply transformation Examples: --------- ```python find_phone_number("698887776 is a phone number of Mark from 210,North Avenue") >>> '698887776' ``` """ line = re.findall(r"(\+34|0034|34)?[ -]*(6|7)[ -]*([0-9][ -]*){8}", text) return line
807536949e0fefe6bd0ab5c3c70b14903c19a078
692,682
import os def _ParseDeps(base_dir): """Returns a tuple of (deps, hooks).""" f = open(os.path.join(base_dir, "win32", "DEPS")) global_context = {} local_context = {} exec(f.read(), global_context, local_context) return local_context.get("deps", {}), local_context.get("hooks", [])
0e43585626ad8e048ec4b6194da095e61d8cf575
692,683
import os from typing import Counter def read_data(fname, count, word2idx): """ :param fname:文本路径 :param count: [];词频 :param word2idx:{}; {单词:索引} :return: """ if os.path.isfile(fname): with open(fname) as f: lines = f.readlines() else: raise("[!] Data %s not found" % fname) words = [] for line in lines: words.extend(line.split()) # <eos>表示空,有len(lines)行 if len(count) == 0: count.append(['<eos>', 0]) count[0][1] += len(lines) # 单词的频数 count.extend(Counter(words).most_common()) if len(word2idx) == 0: word2idx['<eos>'] = 0 for word, _ in count: if word not in word2idx: word2idx[word] = len(word2idx) # data存储单词索引 data = list() for line in lines: for word in line.split(): index = word2idx[word] data.append(index) data.append(word2idx['<eos>']) print("Read %s words from %s" % (len(data), fname)) return data
0aed0f10d1b3642f9301258abeb33a09e297deb1
692,684
def check_value_type(value): """ Check value type so that we can process them differently :param value: :return: """ if isinstance(value, int): return int(value) elif isinstance(value, float): return float(value) else: return str(value).strip()
818cb3961530b8a427483f46880f7f75a71e0f31
692,685
def new_list_with_dict_ids(old: list): """Create new list with dicts This function aggregate dict elements with only one key "id" in new list. :param old: The initial list with dicts. :returns: New list with dicts that contain only id. :rtype: list """ new_list = [] for elem in old: if isinstance(elem, dict): new_elem = {"id": elem.get("id")} new_list.append(new_elem) return new_list
12e0d8f46230231e72c8b091801749365071e87d
692,686
def get_minions(employee): """ Given an employee objects, return a list of employees under his/her supervision. The first element of list will be the intial employee. """ ret = [employee] for minion in employee.employee_set.all(): # ret.append(get_minions(minion)) ret.extend(get_minions(minion)) return ret
8e56ebc5e2c9190510f213f1c29686c27102f78e
692,687
def str_to_bool(val: str) -> bool: """Takes string and tries to turn it into bool as human would do. If val is in case insensitive ( "y", "yes", "yep", "yup", "t", "true", "on", "enable", "enabled", "1" ) returns True. If val is in case insensitive ( "n", "no", "f", "false", "off", "disable", "disabled", "0" ) returns False. Else Raise ValueError.""" val = val.lower() if val in { "y", "yes", "yep", "yup", "t", "true", "on", "enable", "enabled", "1", }: return True elif val in {"n", "no", "f", "false", "off", "disable", "disabled", "0"}: return False else: raise ValueError(f"Invalid truth value {val}")
cbd1ecc22a96ff8f80c64ef47d7593877a9da1c1
692,688
def number_of_yang_modules_that_passed_compilation(in_dict: dict, position: int, compilation_condition: str): """ Return the number of the modules that have compilation status equal to the 'compilation_condition'. Arguments: :param in_dict (dict) Dictionary of key:yang-model, value:list of compilation results :param position (int) Position in the list where the 'compilation_condidtion' is :param compilation_condition (str) Compilation result we are looking for - PASSED, PASSED WITH WARNINGS, FAILED :return: the number of YANG models which meet the 'compilation_condition' """ t = 0 for k, v in in_dict.items(): if in_dict[k][position - 1] == compilation_condition: t += 1 return t
d40d10a5601589518aa179822d851628d6b24a0a
692,689
def parse_xgcm_attributes(ds, xc='xt_ocean', xg='xu_ocean', yc='yt_ocean', yg='yu_ocean', zc='st_ocean', zg='sw_ocean'): """ Adds axis attributes needed for xgcm to recognize the grid""" if xc is not None: ds[xc] = ds[xc].assign_attrs(axis='X') if xg is not None: ds[xg] = ds[xg].assign_attrs(axis='X') ds[xg] = ds[xg].assign_attrs(c_grid_axis_shift=0.5) if yc is not None: ds[yc] = ds[yc].assign_attrs(axis='Y') if yg is not None: ds[yg] = ds[yg].assign_attrs(axis='Y') ds[yg] = ds[yg].assign_attrs(c_grid_axis_shift=0.5) if zc is not None: ds[zc] = ds[zc].assign_attrs(axis='Z') if zg is not None: ds[zg] = ds[zg].assign_attrs(axis='Z') ds[zg] = ds[zg].assign_attrs(c_grid_axis_shift=0.5) return ds
c508035b9fcbd8f56ef1c0d3dcd54c42d3804bae
692,690
import os def get_env_vars(keys): """ Retrieves the values of the environment variables in `keys`. """ def get_env_var(key): value = os.environ.get(key) assert value is not None, "Please set the environment variable '{}'.".format(key) return value return map(lambda key: get_env_var(key), keys)
8118ca71d56bf8c8ddc80ab678ce71e94de4597c
692,691
def name_to_htmltitle(name): """ Return an html version of a course name >>> name_to_htmltitle('123 567 9') '123 567 9' >>> name_to_htmltitle('123 567 9012 45') '123<br>567<br>9012<br>45' """ if len(name) <= 10: return name else: return name.replace(' ', '<br>')
2705e245d80d436a6dba5b25b217318778d4d194
692,692
import os def gen_save_filename(files, file_ext): """ Generates a new filename for saving zipfile images based on the name naming convention as the original file Args: file (str): original image filename file_ext (str): extension to be used for saving the processed image Returns: str: a new filename which will be used in the saved zip file """ new_names = [] for file in files: # Split path to original filename with a new extension base = os.path.basename(file) new_name, _ = os.path.splitext(base) # Rename if filename already exists if new_name in new_names: new_name = new_name + '_1' # Append to list of new names new_names.append(new_name) # Append extension new_names = [i + file_ext for i in new_names] # Return filename with appended extension return new_names
d24f7d89d63c0a3cb3cd004d0bb74747abe5fffb
692,693
def get_pair_hit_query(track, query_start, query_stop, sort_order, coords): """Return pair hit query.""" query = { "query": { "bool": { "must": [ # disable match on version to allow matchups across S1-SLC versions #{ # "term": { # "system_version": system_version # } #}, { "term": { "metadata.trackNumber": track } }, { "bool": { "should": [ { "range": { "metadata.sensingStart": { "from": query_start.isoformat(), "to": query_stop.isoformat() } } }, { "range": { "metadata.sensingStop": { "from": query_start.isoformat(), "to": query_stop.isoformat() } } } ] } } ] } }, "sort": [ { "starttime": { "order": sort_order } } ], "partial_fields" : { "partial" : { "exclude" : ["city", "context"], } } } # restrict to polygon query['query'] = { "filtered": { "filter": { "geo_shape": { "location": { "shape": { "type": "Polygon", "coordinates": coords } } } }, "query": query['query'], } } return query
acd8f9372a89f86faf7c695a3b9965b4dab870d0
692,694
def cm(inch: float) -> float: """ inch to cm """ return inch * 2.54
6edb993aa1a3cdd8d04ae876f03dec4f9cd60491
692,695
from typing import Iterable from typing import Optional from typing import Union from typing import Counter def is_valid_token_sequence(tokens: Iterable[str], seq: str, sep: Optional[str] = ';') -> Union[None, bool]: """ Checks whether a string consists of a sequence of unique tokens from a fixed set of tokens, separated by a given separator. It is used to check the correctness of values in several OED columns such as ``(Acc|Cond|Loc|Pol|Reins)Peril`` and also ``(Acc|Cond|Loc|Pol|Reins)PerilsCovered``, which must be ``;``-separated sequence of OED peril codes, e.g. ``AA1;WTC;WEC``. :param tokens: The iterable of tokens to check the string tokens against :type tokens: list, tuple, set :param seq: The string to be checked :type seq: str :param sep: (Optional) The separator to use/expect - default is ``;`` :type sep: str :return: Whether the string is valid :rtype: bool """ if not isinstance(seq, str) or any(not isinstance(t, str) for t in tokens) or any(len(t) == 0 for t in tokens): return seq_tokens = [t for t in seq.split(sep) if t] seq_tokens_cntr = Counter(seq_tokens).values() return not ( any(t not in tokens for t in seq_tokens) or any(v > 1 for v in seq_tokens_cntr) )
5668003e8df23229de6e4864289ea0c74d093cfe
692,696
def nodes_create_unwind(labels, property_parameter=None): """ Generate a :code:`CREATE` query using :code:`UNWIND` for batch creation of nodes.:: UNWIND $props AS properties CREATE (n:Gene) SET n = properties Pass the node properties as parameter to the query, e.g. with a :py:obj:`py2neo.Graph`:: graph.run(query, props=[{'id': 1}, {'id': 2}, ...]) You can optionally set the name of the parameter with the argument :code:`property_parameter`:: query = nodes_create_unwind(['Foo'], query_parameter='mynodes') graph.run(query, mynodes=[{'id': 1}, {'id': 2}, ...]) :param labels: Labels for the create query. :type labels: list[str] :param property_parameter: Optional name of the parameter used in the query. Default is 'props'. :type property_parameter: str :return: Query """ if not property_parameter: property_parameter = 'props' return "UNWIND ${0} AS properties CREATE (n:{1}) SET n = properties RETURN count(n) as cnt".format(property_parameter, ":".join(labels))
1723f8e84535ba6d3dcc47578a40e3da511ebcea
692,697
def learning_rate_decay(step, init_lr=5e-4, decay_steps=100000, decay_rate=0.1): """Continuous learning rate decay function. The computation for learning rate is lr = (init_lr * decay_rate**(step / decay_steps)) Args: step: int, the global optimization step. init_lr: float, the initial learning rate. decay_steps: int, the decay steps, please see the learning rate computation above. decay_rate: float, the decay rate, please see the learning rate computation above. Returns: lr: the learning for global step 'step'. """ power = step / decay_steps return init_lr * (decay_rate**power)
ddca7b9a87bc1662c73fafdf26b42fa402a376c3
692,698
def GetPopList(sample_blocks): """ Get a list of populations in the sample_blocks ---------- sample_blocks : list of [hap_blocks] each hap_block is a dictionary with keys 'pop', 'chrom', 'start', 'end' Returns ------- poplist : list of str list of populations represented in the blocks """ poplist = set() for i in range(len(sample_blocks)): for sb in sample_blocks[i]: poplist.add(sb['pop']) return list(poplist)
5f383177832d602c0fbe5464d9bb3fd672287065
692,699
def is_sarif_struct(struct): """A quick check to verify that `struct` is in fact a SARIF tree. """ return type(struct) == dict and "$schema" in struct and \ "sarif" in struct["$schema"] and "version" in struct
7168f32b03cf8e731ab22dfca2b8cb4e406d9d74
692,700
import os def writelist(mylist, outfile, mode='w'): """Write list of strings to an output file with each row an element of the list""" outfile = os.path.abspath(os.path.expanduser(outfile)) with open(outfile, mode) as f: for s in mylist: f.write(str(s) + '\n') return(outfile)
c0f6dc18580ee469a0fa35c9d083aaf5f8d6a9b7
692,702
import pickle def read_block(file): """Reads a block from a specified data path. Args: file (string) - a file name of a block to read """ f = open(file, 'rb') return pickle.load(f)
eaa4d2e0e065217f4151bd8babba3dc75cdb80ce
692,703
import traceback def get_curw_sim_hash_ids(pool, run_table, model=None, method=None, obs_end_start=None, obs_end_end=None, grid_id=None): """ Retrieve specific set of hash ids from curw_sim run tables :param pool: database connection pool :param model: target model :param method: interpolation method :param obs_end_start: start of the considering obs_end range, inclusive :param obs_end_end: end of the considering obs_end range, inclusive :param grid_id: grid id pattern, escape character $ :return: """ pre_sql_statement = "SELECT `id` FROM `" + run_table + "` WHERE " condition_list = [] variable_list = [] score = 0 if model is not None: condition_list.append("`model`=%s") variable_list.append(model) score +=1 if method is not None: condition_list.append("`method`=%s") variable_list.append(method) score +=1 if obs_end_start is not None: condition_list.append("`obs_end`>=%s") variable_list.append(obs_end_start) score +=1 if obs_end_end is not None: condition_list.append("`obs_end`<=%s") variable_list.append(obs_end_end) score +=1 if grid_id is not None: condition_list.append("`grid_id` like %s ESCAPE '$'") variable_list.append(grid_id) score +=1 if score == 0: return None conditions = " AND ".join(condition_list) sql_statement = pre_sql_statement + conditions + ";" print(sql_statement) ids = [] connection = pool.connection() try: with connection.cursor() as cursor: row_count = cursor.execute(sql_statement, tuple(variable_list)) if row_count > 0: results = cursor.fetchall() for result in results: ids.append(result.get('id')) return ids except Exception: traceback.print_exc() finally: if connection is not None: connection.close()
89d366acd2d2703fd141021e1d4d111553a20a28
692,704
def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs): """ Format the error message for when __array_ufunc__ gives up. """ args_string = ', '.join(['{!r}'.format(arg) for arg in inputs] + ['{}={!r}'.format(k, v) for k, v in kwargs.items()]) args = inputs + kwargs.get('out', ()) types_string = ', '.join(repr(type(arg).__name__) for arg in args) return ('operand type(s) all returned NotImplemented from ' '__array_ufunc__({!r}, {!r}, {}): {}' .format(ufunc, method, args_string, types_string))
4620772f4521325e66798c57e501ffd88ab991d1
692,705
def dlog(num, base=2): """Returns the discrete logarithm of num. For the standard base 2, this is the number of bits required to store the range 0..num.""" return [n for n in range(32) if num < base**n][0]
55812ba61d432fc36817189f3c3935ef151fcb3b
692,706
import logging def compute_metrics(metrics, model, dataset): """ Compute several metrics for a set of all predictions, set of non-poisoned examples and poisoned ones """ logger = logging.getLogger(__name__) d = {} clean = dataset.load_clean() patch = dataset.a_patch if patch is not None: objective_class = dataset.objective_class # get x test examples that are not in the objective class x_test = clean.x_test[clean.y_test_cat != objective_class] # apply patch to all original test data x_test_patched = patch.apply(x_test) # predict y_pred_patched = model.predict_classes(x_test_patched) d['patch_success_rate'] = (y_pred_patched == objective_class).mean() logger.info('Patch success rate: %.2f', d['patch_success_rate']) # predictions on clean test set y_pred = model.predict_classes(clean.x_test) y_true = clean.y_test_cat the_metrics = {function.__name__: function(y_true, y_pred) for function in metrics} for metric, value in the_metrics.items(): logger.info('%s: %.2f', metric, value) return {**d, **the_metrics}
a38219391c79d2443d727aae26df71ebd1e2696f
692,707
def get_original_fn(fn): """Gets the very original function of a decorated one.""" fn_type = type(fn) if fn_type is classmethod or fn_type is staticmethod: return get_original_fn(fn.__func__) if hasattr(fn, "original_fn"): return fn.original_fn if hasattr(fn, "fn"): original_fn = get_original_fn(fn.fn) try: fn.original_fn = original_fn except AttributeError: pass return original_fn return fn
5de6cb2b600e8d026d41928dc5cf0d7c2ead0bfa
692,709
def get_coinbase_api_pagination_id(timestamp, last_data=[], data=[]): """ Pagination details: https://docs.pro.coinbase.com/#pagination """ if len(data): return data[-1]["trade_id"]
9fbd5564731d26816718752c9a60ab37a0fc6426
692,710
def _get_sources_with_sink(node_str, connections): """Returns the source nodes that are connected to the sink node with the given string representation. Args: node_str: a string representation of a PipelineNode connections: a list of PipelineConnection instances Returns: a list of PipelineNodes that are connected to the given sink node. """ return [c.source for c in connections if c.sink.is_same_node_str(node_str)]
e7713889bd88833d6ed0967b3f09199eac5f6df4
692,711
import os import platform def is_ubuntu(): """True when running Ubuntu""" if os.name == 'posix': return 'ubuntu' in platform.platform().lower() return False
c3883978de794e58d6a390bc4911210477248ae9
692,712
from typing import List def fast_pdf_sum(prob: List, value_start: int, rate_decay_min: int, rate_decay_max: int, ret_value_start: int, ret_length: int): """Unused. Original convolve aproximation from JS code""" rate_decay_min = int(rate_decay_min) rate_decay_max = int(rate_decay_max) ret_value_start = int(ret_value_start) ret_length = int(ret_length) output = [] prob_length = len(prob) # ret_value_start = ret_value_start # prob = prob append_to_output = output.append for x in range(ret_length): temp = 0 l = x + rate_decay_min + 1 + ret_value_start r = x + rate_decay_max + ret_value_start l -= value_start r -= value_start if l < 0: l = 0 if r > prob_length: r = prob_length if l >= r: temp += 0 else: # subtractor = 0 if l == 0 else self.prob[int(l - 1)] # temp += self.prob[int(r - 1)] - subtractor subtractor = 0 if l == 0 else prob[l - 1] temp += prob[r - 1] - subtractor l = x + rate_decay_min + ret_value_start r = x + rate_decay_max + ret_value_start l -= value_start r -= value_start if l < 0: l = 0 if r > prob_length: r = prob_length if l >= r: temp += 0 else: # subtractor = 0 if l == 0 else self.prob[int(l - 1)] # temp += self.prob[int(r - 1)] - subtractor subtractor = 0 if l == 0 else prob[l - 1] temp += prob[r - 1] - subtractor append_to_output(temp) return output
3649c02bf246e828a009eee00085e9d03583e941
692,713
import sys import importlib import pkgutil def import_fuzzers(pkg_name): """ dynamically load fuzzer frontends using importlib """ package = sys.modules[pkg_name] return [ importlib.import_module(pkg_name + '.' + submod) for _, submod, _ in pkgutil.walk_packages(package.__path__) ]
0e83843887aa92cdc26c1589cc5ee59ba8ece9eb
692,714
def buscar_imagenes(archivo): """Filtrar imágenes jpg y jpeg""" return archivo.lower().endswith(('jpg', 'jpeg'))
2876287f92ceff7973777d83538ae530ae386dcf
692,715
def max_density(number_of_lanes, vehicle_length) : """ Обчислює максимальне значення густини """ return number_of_lanes / vehicle_length
8020d152fb1c34a2a776d9fcc102012fef02f45a
692,716
def retrieve_block(model_storage, weight_idx, block_x_idx, block_y_idx, block_size_x, block_size_y): """Retrieve a block with given weight_idx, block_x_idx, and block_y_idx Args: model_storage (model_storage): a model's model_storage weight_idx (int): weight index block_x_idx (int): block index in x-axis block_y_idx (int): block index in y-axis block_size_x (int): block size in x-axis block_size_y (int): block size in y-axis Return: numpy array: a block """ b = model_storage['weights_padded'][weight_idx][block_x_idx*block_size_x: (block_x_idx+1)*block_size_x, block_y_idx*block_size_y: (block_y_idx+1)*block_size_y] return b
28118eb869fb350397bed0afbea402a9375db834
692,717
def pos_tag_feature(sentences, words, pos_tag): """ List of values from 0 to 1 rating the number of words with a certain part of speech tag that appear in the sentence""" pos_tag_words_count_list = [] # Create a list with the number of words with the input pos_tag appear in the phrase for sentence in sentences: pos_tag_words_count_list.append(len([word for word in sentence.bag_of_words if words[word].part_of_speech[1] == pos_tag])) # Return a list of values normalize by the sentence with the maximum number of pos_tag words return ([pos_tag_words_sentence / max(pos_tag_words_count_list) for pos_tag_words_sentence in pos_tag_words_count_list] if max(pos_tag_words_count_list) != 0 else [0] * len(pos_tag_words_count_list))
8fa591a9084ed17e46ffe3f2158785815fb8e279
692,718