content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def _get_file_list(data): """ 获取用例文件列表 """ files = [] files_name = [] for index, d in enumerate(data): print("index:", index) print("d:", d) case_file = { "id": index, "label": d["file"], "children": [] } if len(files) == 0: files.append(case_file) files_name.append(d["file"]) else: if d["file"] not in files_name: files.append(case_file) files_name.append(d["file"]) return files
b206e7954929a26aad9e36283a14c10ac9c88cc4
694,738
def split_fqn(title): """ Split fully qualified name (title) in name and parent title """ fqn = title.split('/') if len(fqn) > 1: name = fqn.pop() return (name, '/'.join(fqn)) else: return (title, None)
5595cbc5bf312f4eedf1987925ed08ae0f840087
694,741
def make_dict_from_csv(fn): """ convert listing of partial permutations with multiplicity into dict input file should have one line for each "type" of ballot - first entry is number of ballots of this type - successive entries are ranked candidates (no non-votes or repeats) - all candidates are comma-separated """ d = dict() for x in open(fn,'r'): line = x.rstrip().split(',') lbl = '_'.join(line[1:]) if lbl not in d.keys(): d[lbl] = int(line[0]) else: d[lbl] += int(line[0]) # print("%d -- %s" % (int(line[0]),lbl)) return d
2ca1dd45b7d92f036462753c5016983eea66dcd4
694,742
def _replace_special_characters(str_special_chars): """ Replaces special characters in a string with their percent-encoded counterparts ':' -> '%3A' '/' -> '%2F' ',' -> '%2C' (e.g. "1/1/9" -> "1%2F1%2F9") :param str_special_chars: string in which to substitute characters :return: new string with characters replaced by their percent-encoded counterparts """ str_percents = str_special_chars.replace(":", "%3A").replace("/", "%2F").replace( ",", "%2C") return str_percents
c488f10cd817673e853fce1651d6444e2fa2a97b
694,744
import sys def _next_argument(): """Return the next argument as a string The next argument is returned. Options (strings starting with -) are ignored. The argument is removed from the sys.argv-list. Returns: String with the next argument. """ for idx in range(1, len(sys.argv)): if not sys.argv[idx].startswith("-"): return sys.argv.pop(idx) raise TypeError("Missing argument")
d6aa4a018a80976863327bb9a635857f91b24cff
694,745
def collect_reports(number_of_workers, report_queue): """pulls reports off the report queue until it has grabbed 1 poison pill for each worker process returns the reports""" list_of_bad_players_inventories = [] poison_pills_found = 0 while poison_pills_found < number_of_workers: report = report_queue.get() if not report: poison_pills_found += 1 else: list_of_bad_players_inventories.append(report) return list_of_bad_players_inventories
d90f557568be7b3d084bd192d5b2a92288340db2
694,746
def bit_or(evaluator, ast, state): """Evaluates "left | right".""" res = evaluator.eval_ast(ast["left"], state) | evaluator.eval_ast(ast["right"], state) return res
c16ad4fd4daccf2d0f05785b7cc4032ff1b78c18
694,747
def has_tags(available, required): """ Helper method to determine if tag requested already exists """ for key, value in required.items(): if key not in available or value != available[key]: return False return True
c70ee7ab13b2f68625b8814ef5dd40eb21c9c3f1
694,748
def unique_type(data): """ get the unique types of a list of data """ tps = [type(_) for _ in data] tpunique = [] while len(tps)> 0: tp = tps[0] tpunique.append(tp) tpcount = tps.count(tp) for i in range(tpcount): tps.remove(tp) return tpunique
d9a97166c05ebe331b969aff58ed3d9875f4cb86
694,749
def is_snapshot_task_running(vm): """Return True if a PUT/POST/DELETE snapshot task is running for a VM""" snap_tasks = vm.get_tasks(match_dict={'view': 'vm_snapshot'}) snap_tasks.update(vm.get_tasks(match_dict={'view': 'vm_snapshot_list'})) return any(t.get('method', '').upper() in ('POST', 'PUT', 'DELETE') for t in snap_tasks.values())
26088c4583d35e5d9b069b7f5046301f5e127feb
694,750
def add_setup(setup=None, teardown=None): """decorate test functions to add additional setup/teardown contexts""" def decorate_function(test): def wrapper(self): if setup: setup(self) test(self) if teardown: teardown(self) return wrapper return decorate_function
a13ca16e3aa88ca4fd638ec666aaeaa95c090280
694,751
from typing import Dict from typing import Any from typing import List def get_region_sizes(settings: Dict[str, Any]) -> List[int]: """ Compute size of each layer in network specified by `settings`. """ dim = settings["num_tasks"] + settings["obs_dim"] region_sizes = [] for region in range(settings["num_layers"]): if region == 0: region_size = settings["hidden_size"] * (dim + 1) elif region == settings["num_layers"] - 1: region_size = dim * (settings["hidden_size"] + 1) else: region_size = settings["hidden_size"] ** 2 + settings["hidden_size"] region_sizes.append(region_size) return region_sizes
c8a1880ff67bdf3bc2658162cd6827927e9c4737
694,752
def get_classification_task(graphs): """ Given the original data, determines if the task as hand is a node or graph classification task :return: str either 'graph' or 'node' """ if isinstance(graphs, list): # We're working with a model for graph classification return "graph" else: return "node"
e64620b31e68631d883e8e3d635d2147fe85cf78
694,753
import unicodedata def code_points(text, normalize=None): """Return the sequence of unicode code points as integers If normalize is not None, first apply one of the unicode normalization schemes: NFC, NFD, NFKC, NFKD. More details on normalization schemes in: https://docs.python.org/3/library/unicodedata.html#unicodedata.normalize """ if normalize is not None: text = unicodedata.normalize(normalize, text) return [ord(c) for c in text]
a01d39b2272803b75e567d190858301db9eb9251
694,754
def datetime_to_string(dt, hms=True): """ :param dt: Day with format datetime :param hms: Boolean, if True "%Y-%m-%d %H:%M:%S" is returned :return: String of the date with format: "%Y-%m-%d", if hms True string of the date with format: "%Y-%m-%d %H:%M:%S" """ if hms: return str(dt.year) + "-" + str(dt.month) + "-" +str(dt.day) + " " + str(dt.hour) + ":" + str(dt.minute) + ":" + str(dt.second) else: return str(dt.year) + "-" + str(dt.month) + "-" +str(dt.day)
c4abb250a5d6ef98e742fe92a29db5329256a5dc
694,755
def find_unique_name(name, names, inc_format='{name}{count:03}', sanity_count=9999999): """ Finds a unique name in a given set of names :param name: str, name to search for in the scene :param names: list<str>, set of strings to check for a unique name :param inc_format: str, used to increment the name :param sanity_count: int, used to prevent infinite search loop. Increment if needed (default=9999999) """ count = 0 ret = name while ret in names: count += 1 ret = inc_format.format(name=name, count=count) if sanity_count and count > sanity_count: raise Exception('Unable to find a unique name in {} tries, try a different format.'.format(sanity_count)) return ret
6b48426df7340f88f657097bfa8d35e32cd790f6
694,756
def get_nextupsegs(graph_r, upsegs): """Get adjacent upsegs for a list of segments as a single flat list. Parameters ---------- graph_r : dict Dictionary of upstream routing connections. (keys=segments, values=adjacent upstream segments) upsegs : list List of segments Returns ------- nextupsegs : list Flat list of next segments upstream from upsegs """ nextupsegs = set() for s in upsegs: nextupsegs.update(graph_r.get(s, {})) #nextupsegs.update(graph_r[s]) return nextupsegs
cc38dd78bd0af8cce2c0a96106bea58d1e9d0b17
694,757
import json def df_json(df_desc): """ Convert & manipulate describe by updating JSON Parameters: pandas.core.frame.DataFrame Returns: list of JSON including column_name """ op_json = json.loads(df_desc.to_json()) for col_name in op_json: op_json[col_name]['column_name'] = col_name return op_json
5b6a721770ccb0989893fd552faa64cbea2a1dee
694,758
def build_texts_from_gems(keens): """ Collects available text from each gem inside each keen in a list and returns it :param keens: dict of iid: keen_iid :return: texts: dict of iid: list of strings with text collected from each gem. """ texts = {} for keen in keens.values(): for gem in keen.gems: sents = [gem.text, gem.link_title, gem.link_description] sents = [s for s in sents if s] # filters empty sentences if sents: texts[gem.gem_id] = sents return texts
b9b94f4a1035f03746bd3b18755c02b19a97b27b
694,759
def filterResultsByRunsetFolder(runSets, form): """ Filters out results that don't have the specified runsetFolder name """ ret = [] for runset in runSets: if runset.runsetFolder == form['runset'].value: ret.append(runset) return ret
a9416fa8f96aa2dc370c23260f65bfb53114e09a
694,760
import platform def get_iana_protocols(): """Parse the local file of IANA IP protocols and return a dictionary of protocol number to name. :rtype:dict[int,str] """ os_dist = platform.system() if os_dist == "Linux": protocols_file_path = "/etc/protocols" elif os_dist == "Windows": protocols_file_path = "C:\\windows\\system32\\etc\\protocols" else: raise TypeError("Unsupported OS '{}'".format(os_dist)) protocols = {} with open(protocols_file_path) as services_file: for line in services_file.readlines(): if not line.startswith("#") and not line.isspace(): _, protocol_number, protocol_name, *_ = line.split() protocols[int(protocol_number)] = protocol_name return protocols
5899d4ff43452de36a3d8764332fd34e5338e2dd
694,761
import csv def read_list( csvFile ): """Returns a list which has been stored as a csv file""" with open( csvFile ) as csvFile: reader = csv.reader( csvFile, quotechar='|' ) out = [ ] for row in reader: out += row return out
9fcb5b01496b39915e0169d0a19efc91a779aaf1
694,762
import os def dccTest(): """Returns true or false after checking if the `KRAKEN_DCC` environment variable is set to use this plugin. .. note:: The variable value to activate the Maya plugin is: `Maya`. Returns: bool: True if the environment variable is set to use this plugin. """ krakenDCC = os.environ.get('KRAKEN_DCC') if krakenDCC == "Maya": return True else: return False
78ec12c48e92e32c1c3a6ef94f4fd7b678aae478
694,763
def _sqrt_nearest(n, a): """Closest integer to the square root of the positive integer n. a is an initial approximation to the square root. Any positive integer will do for a, but the closer a is to the square root of n the faster convergence will be. """ if n <= 0 or a <= 0: raise ValueError("Both arguments to _sqrt_nearest should be positive.") b=0 while a != b: b, a = a, a--n//a>>1 return a
ba0836616adafefcf4f76ce59961826820eed186
694,765
def sanitize_filename_windows(name): """Turn the filename into a legal windows filename. See http://support.grouplogic.com/?p=1607 """ name = name.decode('utf-8') name = name.encode('ascii', 'ignore') name = name.replace('"', "'") name = name.replace(" ", "_") # Personal preference name = "".join(ch for ch in name if ch not in "^/?<>\:*|”") return name[:255]
8fb0def759ba55b072ac3f70f74cbcbc1307fc7c
694,766
import typing def __evict__( sorted_hits: typing.List[int], cache_hits: typing.Dict[int, typing.Dict[str, int]], size_to_recover: int, ) -> typing.Tuple[list, int]: """Select how many to evict. :param sorted_hits: List of current hits sorted from lower to higher. :param cache_hits: Cache hits structure. :param size_to_recover: Amount of size to recover. :return: List of f_names to evict. """ to_evict = list() total_recovered_size = 0 for hits in sorted_hits: # Does not check the order by size of the objects since they have the # same amount of hits for f_name in cache_hits[hits]: recovered_size = cache_hits[hits].pop(f_name) to_evict.append(f_name) size_to_recover -= recovered_size total_recovered_size += recovered_size if size_to_recover <= 0: return to_evict, total_recovered_size return to_evict, total_recovered_size
56ee2471df745222192a691a1eacfce05eeecb9d
694,768
def match_info(match,requested_team): """ Functions that returns the list called match_information. In conjunction with other function, it is used to display information about the games. Param: match is list """ match_information = [] if match["score"]["winner"] == "HOME_TEAM": match_information.append(match["homeTeam"]["name"].upper()) match_information.append(match["awayTeam"]["name"]) if match_information[0] == requested_team: match_information.append("WIN") else: match_information.append("LOSS") elif match["score"]["winner"] == "AWAY_TEAM": match_information.append(match["homeTeam"]["name"]) match_information.append(match["awayTeam"]["name"].upper()) if match_information[1] == requested_team: match_information.append("WIN") else: match_information.append("LOSS") else: match_information.append(match["homeTeam"]["name"]) match_information.append(match["awayTeam"]["name"]) match_information.append("DRAW") return match_information
62b34a3df44b067c87318f90ad5af46c72114ce4
694,769
def split_by_last_word(string_list): """ split list of strings list by last word Args: string_list: list(str), list of text in form of str Returns: list,list the list of text with last word removed and the last word text list """ # return [ ' '.join(s.split()[:-1]) for s in string_list],[ s.split()[-1:][0] for s in string_list] return [ ' '.join(s.split()[:-1]) for s in string_list]
0b0efa5a11d5efba5a9372fadadb5816882468c8
694,770
def find_net(table_coordinates): """ Finding net coordinates, taking avg of x1 and x4 (mid points of table) """ top_x = table_coordinates[1][0] bottom_x = table_coordinates[4][0] avg = int((top_x+bottom_x)/2) return avg
e8d8821977362c641be0ca7621802e774caf5adf
694,771
def _get_v1_error_from_json(_json_error, _fail_on_no_results): """This function extracts error details (if present) from a Community API v1 response. :param _json_error: The API response in JSON format :type _json_error: dict :param _fail_on_no_results: Defines if an exception should be raised if no results are returned :type _fail_on_no_results: bool :returns: A Boolean stating if an error was found and a tuple with the error code and error/exception messages """ _error_code, _error_msg, _exc_msg = 0, '', '' _error_found = True if _json_error['status'] == 'error' else False if _error_found: _error_code = _json_error['error']['code'] _error_msg = _json_error['error']['message'] _exc_msg = f"The Community API v1 call failed with the error code '{_error_code}' and the following " + \ f"message: {_error_msg}" _inner_response = _json_error[list(_json_error.keys())[1]] if len(_inner_response[list(_inner_response.keys())[0]]) == 0: _error_found = _fail_on_no_results _error_code = 404 _error_msg = f"The Community API v1 call returned no results." _exc_msg = _error_msg return _error_found, (_error_code, _error_msg, _exc_msg)
12c785b049e199b3917b36abff388b6999879deb
694,772
import math def hill_func (x, Pmin, Pmin_inc, K, n, repress=False): """ Hill function used for fitting """ if x < 0.0: x = 0.0 if repress == True: return Pmin + Pmin_inc*( math.pow(K,n) / (math.pow(K,n)+math.pow(x,n)) ) else: return Pmin + Pmin_inc*( math.pow(x,n) / (math.pow(K,n)+math.pow(x,n)) )
c8728b1abb7417e9050602e4904ae0dd199cccdd
694,773
def analysis_find_numbers( database, arg_five_or_seven_numbers): """Find numbers """ found_five = {} found_euro = {} for key, numbers in database.items(): for vals in numbers: if len(arg_five_or_seven_numbers) >= 5: numbers_five = [vals[0],vals[1],vals[2],vals[3],vals[4]] if numbers_five[0] == arg_five_or_seven_numbers[0] and numbers_five[1] == arg_five_or_seven_numbers[1] and numbers_five[2] == arg_five_or_seven_numbers[2] and numbers_five[3] == arg_five_or_seven_numbers[3] and numbers_five[4] == arg_five_or_seven_numbers[4]: found_five[key] = numbers_five if len(arg_five_or_seven_numbers) == 7: numbers_euro = [vals[5], vals[6]] if numbers_euro[0] == arg_five_or_seven_numbers[5] and numbers_euro[1] == arg_five_or_seven_numbers[6]: found_euro[key] = numbers_euro return found_five, found_euro
e6606cdc62efa622b30906101c86cbcceb3c4118
694,774
def is_clique(G, nodes): """Returns True if and only if `nodes` is an independent set in `G`. `G` is an undirected simple graph. `nodes` is an iterable of nodes in `G`. """ H = G.subgraph(nodes) n = len(H) return H.number_of_edges() == n * (n - 1) // 2
13d91a326b33f8bec0957f44350bca460ccd485c
694,775
def clamp(v, lo, hi): """Return v clamped to range [lo, hi]. >>> clamp(1, 2, 3) 2 >>> clamp(4, 0, 1) 1 >>> clamp(6, 5, 8) 6 """ assert lo <= hi if v < lo: return lo elif v > hi: return hi else: return v
5b2213be8f7ce24bfb3addb6c3ff436a62ff1dbd
694,776
def collatz_sequence(seed): """Given seed generates Collatz sequence""" sequence = [seed] counter = seed # pylint: disable=misplaced-comparison-constant while 1 != counter: # pylint: disable=misplaced-comparison-constant counter = (int(counter/2) if 0 == counter%2 else 3*counter+1) sequence.append(counter) return sequence
37195042d6feae7d9ea26ebfea8c35a9436c8c11
694,777
import sys def extract_module_locals(depth=0): """Returns (module, locals) of the funciton `depth` frames away from the caller""" f = sys._getframe(depth + 1) global_ns = f.f_globals module = sys.modules[global_ns['__name__']] return (module, f.f_locals)
21892cf93211211a16d0c913879d0a59205d91c9
694,778
import csv def loadFlags(file): """Takes in a filename/path and reads the data stored in the file. If it is a single column of data returns a 1D list, if it is multiple columns of data returns a 2D list. Note the first line is skipped as it assumes these are column labels. """ with open(file) as fstrm: data = csv.reader(fstrm) vals = list(data) if all([len(val)==1 for val in vals]): data = [val[0] for val in vals[1:]] else: data = vals[1:] return data
cfd48269ed94b47dfd2c12e6a7f66f8106253f15
694,779
def _read_windex(fn): """ Return word index as dict of words to int pointers """ with open(fn) as f: return dict((w.strip(), int(n)) for w,n in zip(f, f))
366330ce84dfe7834104a6b641cfae3b1dcfa8d3
694,781
from typing import List from typing import Dict import asyncio def loop_info() -> List[Dict]: """Show information about running loop.""" loop = asyncio.get_event_loop() return [ { "running": loop.is_running(), "policy": str(asyncio.get_event_loop_policy().__class__).split("'")[1], "exception_handler": loop.get_exception_handler(), } ]
ac42e33b1448964d95080902301558572090fd4d
694,782
def fopen(ref): """ Open a file and return a list of lines. """ return [i.split() for i in open(ref).readlines()]
689d74d626de2a84a1a28f219c634f3eb043ec76
694,783
import colorsys def hsl_to_rgb(hue, saturation, lightness): """Takes a colour in HSL format and produces an RGB string in the form #RRGGBB. :param hue: The Hue value (between 0 and 360). :param saturation: The Saturation value (between 0 and 100). :param lightness: The Lightness value (between 0 and 100). :raises ValueError: if any of the three parameters are outside their \ bounds.""" if not isinstance(hue, int) and not isinstance(hue, float): raise TypeError("hue must be numeric, not '%s'" % hue) if not isinstance(saturation, int) and not isinstance(saturation, float): raise TypeError("saturation must be numeric, not '%s'" % saturation) if not isinstance(lightness, int) and not isinstance(lightness, float): raise TypeError("lightness must be numeric, not '%s'" % lightness) if not (0 <= hue <= 360): raise ValueError("hue must be between 0 and 360, not '%s'" % str(hue)) if not (0 <= saturation <= 100): raise ValueError( "saturation must be between 0 and 100, not '%s'" % str(saturation) ) if not (0 <= lightness <= 100): raise ValueError( "lightness must be between 0 and 100, not '%s'" % str(lightness) ) r, g, b = colorsys.hls_to_rgb(hue / 360, lightness / 100, saturation / 100) return ("#%02x%02x%02x" % (int(r * 255), int(g * 255), int(b * 255))).upper()
a7d0ab91bc01c04f2ecf5afa8255f639e5758a6c
694,785
def is_overlapping(segment_time, previous_segments): """ Checks if the time of a segment overlaps with the times of existing segments. Arguments: segment_time -- a tuple of (segment_start, segment_end) for the new segment previous_segments -- a list of tuples of (segment_start, segment_end) for the existing segments Returns: True if the time segment overlaps with any of the existing segments, False otherwise """ segment_start, segment_end = segment_time # Step 1: Initialize overlap as a "False" flag. (≈ 1 line) overlap = False # Step 2: loop over the previous_segments start and end times. # Compare start/end times and set the flag to True if there is an overlap (≈ 3 lines) for previous_start, previous_end in previous_segments: if segment_start <= previous_end and segment_end >= previous_start: overlap = True return overlap
27b817a76829eb7eba63d3fd22376e4164a7bf39
694,786
import types def ascii_only(input_text): """ Map Word Text to best ASCII equivalents See the 'findunicode.py' program for how to search for these :param input_text: (str) input text :return: (str) Ascii only text """ replacements = { 160 : u'-', 174 : u'r', # Registered sign 176 : u"degree-", # Degree sign 177 : u"+/-", 181 : u"u", # Micro 189 : u"1/2", 215 : u'*', 224 : u"`a", 946 : u'B', # Beta 956 : u'v', 969 : u'w', 8211 : u'-', 8217 : u"'", 8220 : u"``", 8221 : u"''", 8230 : u"...", 8722 : u'-', 8804 : u'<=', 61664: u'->', 8805 : u'>=', 8226 : u'o', # Bullet } if isinstance(input_text, types.GeneratorType): text = ''.join(i for i in input_text) else: text = input_text return ''.join( [u'' if len(i) == 0 else replacements[ord(i)] if ord(i) >= 128 and ord(i) in replacements else i if ord(i) < 128 else u' ' for i in text])
b7f16ac74f3637acb992a9efb918e7d1da2b326b
694,787
def make_regex(pattern, escape=False): """Build regular expression corresponding to `pattern`.""" def re_group(r): return r'(' + r + r')' def re_or(r1, r2): return re_group(re_group(r1) + '|' + re_group(r2)) def re_opt(r): return re_group(r) + '?' asterisk = '*' res = '' res += '^' for i, ch in enumerate(pattern): match_start = '' if ch.isalpha(): ch_lower = ch.lower() ch_upper = ch.upper() not_alpha = '[^a-zA-Z]' not_upper = '[^A-Z]' anycase = (re_opt(r'.{asterisk}{not_alpha}') + '{match_start}' + '[{ch_lower}{ch_upper}]') camelcase = re_opt(r'.{asterisk}{not_upper}') + '{ch_upper}' ch_res = re_or(anycase, camelcase) elif ch.isdigit(): ch_res = (re_opt(r'.{asterisk}[^0-9]') + '{match_start}{ch}') else: ch_res = r'.{asterisk}\{match_start}{ch}' res += ch_res.format(**locals()) if escape: res = res.replace('\\', '\\\\') return res
46f38f71f19f3f7e0cf718a47683615cee0b048e
694,789
def gir_merge_dicts(user, default): """Girschik's dict merge from F-RCNN python implementation""" if isinstance(user, dict) and isinstance(default, dict): for k, v in default.items(): if k not in user: user[k] = v else: user[k] = gir_merge_dicts(user[k], v) return user
49196cc305c8acb454d9d3d8d9b6ddbabd67dff8
694,790
import json def ler_arquivo(local: str) -> list: """ Essa função serve para ler um arquivo JSON e devolver um dicionário :param local: str, local onde se encontra o arquivo JSON :return: list, lista de dicionário com os dados do arquivo JSON """ with open(local, encoding='UTF-8') as arquivo: return json.loads(arquivo.read())
02f4f9460ad359939c120f605657bca6cf905a60
694,791
import re def get_derived_table(view_list): """ This function extracts the derived table clause from a view file, if it exists. :param view_list: view file broken down list :type view_list: list :return: a list of the derived table clause for a view file """ derived_list = [] for line in view_list: line_list = list(filter(None, line.split(' '))) derived_list.append(line_list) end_of_derived_table = sum([bool(re.search('dimension', s) or re.search('parameter', s) or re.search('measure', s)) for s in line_list]) if end_of_derived_table > 0: break return derived_list[1:-1]
1fe5546f387037ec393f8f4cfc4e86c09e9d74c3
694,792
def calculate_delta(a, b): """Delta distance between two coords. :type a: tuple :param a: starting coord :type b: tuple :param b: goal coord :rtype: interger """ dx = abs(a[2] - b[2]) dy = abs(a[1] - b[1]) dz = abs(a[0] - b[0]) return dx + dy + dz
1a91e5b85962dedbed43017a6188d5a59d8f8415
694,793
import sys def SecFinder(SecID): """ """ _SecID = str(SecID).lower() _SecID = _SecID.replace('section','') _SecID = _SecID.replace('shape','') _SecID = _SecID.replace('type','') _SecID = _SecID.replace('member','') _SecID = _SecID.replace('chord','') _SecID = _SecID.replace('rack','') _SecID = _SecID.replace('opposed','') _SecID = _SecID.replace(' ','') _SecID = _SecID.replace('-','') _SecID = _SecID.replace('_','') _SecID = _SecID.strip() _TubularSplit = ['splittubular', 'tubularsplit'] _Tubular = ['hssround','pipe','tubular','chs','circular'] _Triangular = ['triangular'] if _SecID in _Tubular: _FinalSec = 'tubular' elif _SecID in _TubularSplit: _FinalSec = 'rack_split_tubular' elif _SecID in _Triangular: _FinalSec = 'triangular' else: print('error section {} no available'.format(_SecID)) sys.exit('error section {} no available'.format(_SecID)) # return _FinalSec #
8a69baf3fb4664747359172fb9d1f2bbd1edcef9
694,794
import typing import ipaddress def format_address(address: typing.Optional[tuple]) -> str: """ This function accepts IPv4/IPv6 tuples and returns the formatted address string with port number """ if address is None: return "<no address>" try: host = ipaddress.ip_address(address[0]) if host.is_unspecified: return "*:{}".format(address[1]) if isinstance(host, ipaddress.IPv4Address): return "{}:{}".format(str(host), address[1]) # If IPv6 is mapped to IPv4 elif host.ipv4_mapped: return "{}:{}".format(str(host.ipv4_mapped), address[1]) return "[{}]:{}".format(str(host), address[1]) except ValueError: return "{}:{}".format(address[0], address[1])
28c735fd60f69f8bb0f038b136fc9b8d1b938a91
694,795
def get_x_ranges(table): """ table = [ ['2', 'A'], ['K', 'J', '10' ...], ..., ] """ ranges = [] for column in table: lefts = [card.box.left for card in column] rights = [card.box.left + card.box.width for card in column] left = min(lefts) right = max(rights) ranges.append((left, right)) return ranges
e62b0bd72ff416527eda5339234b3fe6c8bf9d75
694,796
import json def refundCardTransactionPayload(amount, reason, merchant_refund_reference, refund_time): """ Function for constructing payload for refundCardTransaction API call. Note: All parameters are of type String unless otherwise stated below. :param amount: Integer - Amount to be refunded :param reason: Reason for refund :param merchant_refund_reference: A reference specified by the merchant to identify the refund. This field must be unique per refund. :param refund_time: Date and time of the request. Format - YYYY-MM-DD HH:mm:ss :return: JSON payload for API call """ payload_py = { "amount": amount, "reason": reason, "merchant_refund_reference": merchant_refund_reference, "refund_time": refund_time } payload_json = json.dumps(payload_py) return payload_json
6c54a5177acf1d79bc1d06e709ca9a81ffb6c9b7
694,797
def round_to(x, base=1, prec=2): """ Round to nearest base with precision. :param x: (scalar) - value to be rounded from :param base: (scalar) - value to be rounded to :param prec: (int) - number of decimal points :return rounded_val: (scalar) - rounded value """ try: return round(base * round(float(x) / base), prec) except: print('Unable to round to') return x
d8c041a99d948458ced6942f037592da225fa9e4
694,798
def select_sort(inputlist): """ 简单选择排序 :param inputlist: a list of number :return: the ascending list """ length = len(inputlist) for i in range(length): minimum = i for j in range(i, length): if inputlist[j] < inputlist[minimum]: minimum = j inputlist[i], inputlist[minimum] = inputlist[minimum], inputlist[i] return inputlist
c6163f0d7c0b5048c067d6f8d0adfd08bfb02728
694,799
def definir_orden_builtin(a: str, b: str, c: str) -> str: """Devuelve tres palabras en orden alfabético de izquierda a derecha. :param a: Primera palabra. :a type: str :param b: Segunda palabra. :b type: str :param c: Tercera palabra. :c type: str :return: Las tres palabras en orden alfabético de izquierda a derecha. :rtype: str """ return ", ".join(sorted([a, b, c], key=str.lower))
7cb1a5916a2917b942121de52020c7323c695ba8
694,800
def check_position(ball_, points_): """ The function return coordinates ball. """ points_cords = points_ ball_cords = ball_ for obj in points_cords: if(obj.cords == ball_cords): current_point_cord=obj.point_pos return current_point_cord
bb2a003caffc4c7caf20d61b4e9e0ba7eef06b2f
694,801
def positive_sent(args, premise, verbose=True): """Check whether root verb is negated""" # If 'not' is in the lemma list if 'not' in premise.lemmas: # Get all "not" NOT_tokens = [t for t in premise.tokens if t.lemma == 'not'] for not_t in NOT_tokens: # Get root verb root_verb = next(t for t in premise.tokens if t.deprel == 'ROOT') # Is "not" dependent on root verb? if not_t.head == root_verb.id: return False return True # If no "not", assume it is positive sentence else: return True
0a822c223a395f70a3961711dead446d629a385f
694,802
import six def cached(method): """ Decorate `method`, to cache its result. Args: method: The method whose result should be cached. Returns: The decorated method. """ results = {} @six.wraps(method) def wrapper(*args, **kwargs): cache_key = (args, tuple((k, kwargs[k]) for k, v in sorted(kwargs))) if cache_key not in results: results[cache_key] = method(*args, **kwargs) return results[cache_key] return wrapper
d5dd2ac223d835287e22e1067300118905d56041
694,803
def get_node_val(node, val_type): """Return the value as a string of a child node of the specified type, or raise ValueError if none exists""" for child in node.children: if child.expr_name == val_type: return child.text.strip('"') raise ValueError("No value of specified type.")
d501dd7ba20620e844a5d4d2d33112e64a9dfef0
694,804
from typing import List def find_mine(field: List) -> List[int]: """Finds mine location. Examples: >>> assert find_mine([[0, 0, 0], [0, 1, 0], [0, 0, 0]]) == [1, 1] """ return [ [top_index, inner_index] for top_index, top_value in enumerate(field) for inner_index, inner_value in enumerate(top_value) if inner_value != 0 ][0]
8fdf9b299b7869dbbfabf4fe654a6dd5fa9b5f9a
694,805
def _make_pretty_arguments(arguments): """ Makes the arguments description pretty and returns a formatted string if `arguments` starts with the argument prefix. Otherwise, returns None. Expected input: Arguments: * arg0 - ... ... * arg0 - ... ... Expected output: **Arguments:** * arg0 - ... ... * arg0 - ... ... """ if arguments.startswith("\n Arguments:"): arguments = "\n".join(map(lambda u: u[6:], arguments.strip().split("\n")[1:])) return "**Arguments:**\n\n%s\n\n" % arguments
b6e7571c3d0e432649edf8295d3aa640140c551f
694,806
import torch def nested_stack(params, roll: bool = False): """Form a tensor from a nexted list of tensors. This function is a generalization of torch.stack. For proper usage, it's important that params is a nested list with shape consistent with and array. The innermost elements of that nested list should be PyTorch tensors, all of which have identical size. For an example, suppose that a, b, c, and d are all tensors of size (5,). Then, nested_stack([[a, b], [c, d]]) returns a tensor of size (2, 2, 5). If roll is set to True, then the dimensions of the tensors (like a, b, c and d in the example above) will be permuted to the start of the output. This is useful if those dimensions were supposed to be batch dimensions. In the example, the output with roll=True would have size (5, 2, 2). If instead a, b, c, and d all had size (6, 9, 8), then the output size would be (6, 9, 8, 2, 2) if roll=True and (2, 2, 6, 9, 8) if roll=False. """ def recursive_stack(params_): if isinstance(params_[0], torch.Tensor): return torch.stack(params_) num_rows = len(params_) return torch.stack( [nested_stack(params_[i]) for i in range(num_rows)] ) stacked = recursive_stack(params).squeeze(0) if roll: inner = params[0] while not isinstance(inner, torch.Tensor): inner = inner[0] inner_dim = inner.dim() perm = list(range(stacked.dim()-inner_dim, stacked.dim())) + list(range(stacked.dim()-inner_dim)) return stacked.permute(perm) else: return stacked
9b8b56eb15f55cbc5bf0b726e7aaf7fd3d476ada
694,807
def count_features_type(features): """ Counts three different types of features (float, integer, binary). :param features: pandas.DataFrame A dataset in a panda's data frame :returns a tuple (binary, integer, float) """ counter={k.name: v for k, v in features.columns.to_series().groupby(features.dtypes)} binary=0 if ('int64' in counter): binary=len(set(features.loc[:, (features<=1).all(axis=0)].columns.values) & set(features.loc[:, (features>=0).all(axis=0)].columns.values) & set(counter['int64'])) return (binary,len(counter['int64'])-binary if 'int64' in counter else 0,len(counter['float64']) if 'float64' in counter else 0)
7c759ac9289e7f2cdb542d67ad30bf36e06749c8
694,808
def pkgconfig_script(ext_build_dirs): """Create a script fragment to configure pkg-config""" script = [] if ext_build_dirs: for ext_dir in ext_build_dirs: script.append("##increment_pkg_config_path## $$EXT_BUILD_DEPS$$/" + ext_dir.basename) script.append("echo \"PKG_CONFIG_PATH=$${PKG_CONFIG_PATH:-}$$\"") script.append("##define_absolute_paths## $$EXT_BUILD_DEPS$$ $$EXT_BUILD_DEPS$$") return script
7b056b2509b09ea2cd163dd0fc2420ad59030060
694,809
import re def default_cleaner_fn(fld): """ Return a copy of the given field cleaned up by removing any unwanted characters. """ if (isinstance(fld, str)): return re.sub("[\"\'\\\\]", "", fld) # remove quotes and backslashes else: return fld
7f9bacfe981f1b3591cde5cc7ae04b654689b0c4
694,810
def get_numversion_from_version(v: str) -> tuple[int, int, int]: """Kept for compatibility reason. See https://github.com/PyCQA/pylint/issues/4399 https://github.com/PyCQA/pylint/issues/4420, """ version = v.replace("pylint-", "") result_version = [] for number in version.split(".")[0:3]: try: result_version.append(int(number)) except ValueError: current_number = "" for char in number: if char.isdigit(): current_number += char else: break try: result_version.append(int(current_number)) except ValueError: result_version.append(0) while len(result_version) != 3: result_version.append(0) return tuple(result_version)
9fb58832b43efb4a8fd7b493e06f2f277c9b466f
694,811
def get_pagination_parameters(request, paginator, paginated): """ Prepare and return the template parameters needed for pagination. Thanks to https://gist.github.com/sbaechler/5636351 Args: ``request`` (django.http.HttpRequest): The request object. ``paginator`` (django.core.paginator.Paginator): An instance of the Paginator with the paginated data. ``paginated`` (django.core.paginator.Page): The paginated data. Returns: ``dict``. A dictionary with all values needed by the template to create the pagination. """ LEADING_PAGE_RANGE_DISPLAYED = TRAILING_PAGE_RANGE_DISPLAYED = 10 LEADING_PAGE_RANGE = TRAILING_PAGE_RANGE = 8 NUM_PAGES_OUTSIDE_RANGE = 2 ADJACENT_PAGES = 4 pages = paginator.num_pages page = paginated.number in_leading_range = in_trailing_range = False pages_outside_leading_range = pages_outside_trailing_range = range(0) if pages <= LEADING_PAGE_RANGE_DISPLAYED + NUM_PAGES_OUTSIDE_RANGE + 1: in_leading_range = in_trailing_range = True page_range = [n for n in range(1, pages + 1)] elif page <= LEADING_PAGE_RANGE: in_leading_range = True page_range = [n for n in range(1, LEADING_PAGE_RANGE_DISPLAYED + 1)] pages_outside_leading_range = [ n + pages for n in range(0, -NUM_PAGES_OUTSIDE_RANGE, -1)] elif page > pages - TRAILING_PAGE_RANGE: in_trailing_range = True page_range = [n for n in range( pages - TRAILING_PAGE_RANGE_DISPLAYED + 1, pages + 1) if n > 0 and n <= pages] pages_outside_trailing_range = [ n + 1 for n in range(0, NUM_PAGES_OUTSIDE_RANGE)] else: page_range = [n for n in range( page - ADJACENT_PAGES, page + ADJACENT_PAGES + 1) if n > 0 and n <= pages] pages_outside_leading_range = [ n + pages for n in range(0, -NUM_PAGES_OUTSIDE_RANGE, -1)] pages_outside_trailing_range = [ n + 1 for n in range(0, NUM_PAGES_OUTSIDE_RANGE)] # Now try to retain GET params, except for 'page' params = request.GET.copy() if 'page' in params: del(params['page']) get_params = params.urlencode() prev = paginated.previous_page_number() if paginated.has_previous() else "" return { 'pages': pages, 'page': page, 'previous': prev, 'next': paginated.next_page_number() if paginated.has_next() else "", 'has_previous': paginated.has_previous(), 'has_next': paginated.has_next(), 'page_range': page_range, 'in_leading_range': in_leading_range, 'in_trailing_range': in_trailing_range, 'pages_outside_leading_range': pages_outside_leading_range, 'pages_outside_trailing_range': pages_outside_trailing_range, 'get_params': get_params, 'count': paginator.count, }
0dd54bffdf31a3cf78bfdc510ac20a901e3adc5e
694,812
from typing import Tuple def extend_shape(original_shape: Tuple, new_size: int, axis: int = 0) -> Tuple: """Extend a dimension of a shape tuple""" shape = list(original_shape) shape[axis] = new_size return tuple(shape)
c48e2559900e88ec6c808735ac017c65bc82741a
694,813
import string def validate_ISBN10(isbn): """ Validate ISBN10 code. Returns the ISBN or False if is not valid. """ isbn = isbn.replace("-", "").replace(" ", "") if len(isbn) == 10 and not [x for x in isbn if x not in ( string.digits + "X")]: total = 0 for i in range(9): total += int(isbn[i]) * (10 - i) z = (11 - (total % 11)) % 11 if (z == 10 and isbn[-1] == 'X') or ("%d" % z == isbn[-1]): return isbn else: return False
0bc0a4fe0bde01a240895129ee99410fb9d383e0
694,815
import os def fix_jinja(txt): """appends empty line to file, if missing""" return ( os.linesep.join([s for s in txt.splitlines() if not s.strip() == ""]) + os.linesep )
338c634fee7c1a48cd574d953f1cbbcd2d25a2d1
694,816
def _resource_for_help(pkg_info, help_file): """ Get the resource name that references the help file in the given help package. The help file should be relative to the document root of the package. """ return "Packages/%s/%s" % (pkg_info.doc_root, help_file)
39214fe39db71935a85763e1731c0da97fe9e74b
694,817
def tokenize_message(message): """return a list of normalized words.""" return (message .lower() .replace(".", " .") .replace(",", " ,") .replace("?", " ?") .replace("!", " !") .replace(":", " :") .replace("'s", " 's") .split())
4e5bc88985912f48ea276e9ad77880c32a85542f
694,818
def str2bool(v): """ Convert a string to a boolean :return boolean: Returns True if string is a true-type string. """ return v.lower() in ('true', 't', '1', 'yes', 'y')
32dc16194fa6096e53e1a0b21f0287c31a7cd824
694,819
def get_subplotspec_list(axes_list, grid_spec=None): """Return a list of subplotspec from the given list of axes. For an instance of axes that does not support subplotspec, None is inserted in the list. If grid_spec is given, None is inserted for those not from the given grid_spec. """ subplotspec_list = [] for ax in axes_list: axes_or_locator = ax.get_axes_locator() if axes_or_locator is None: axes_or_locator = ax if hasattr(axes_or_locator, "get_subplotspec"): subplotspec = axes_or_locator.get_subplotspec() subplotspec = subplotspec.get_topmost_subplotspec() gs = subplotspec.get_gridspec() if grid_spec is not None: if gs != grid_spec: subplotspec = None elif gs.locally_modified_subplot_params(): subplotspec = None else: subplotspec = None subplotspec_list.append(subplotspec) return subplotspec_list
b51d540a7c3c8fa2b1f5a9d628abe69302678de8
694,820
def adjustbufsize( bufsize: int, bits: int) -> int: """Adjust buffer size to account for bit depth Args: bufsize: initial estimate of buffer size bits : bit depth of bitmap (1, 4, 8, 24) bits Returns: An adjusted int value of the buffer size """ if bits == 1: bufsize >>= 3 elif bits == 24: bufsize *= 3 elif bits == 4: bufsize >>= 1 return bufsize
9130c0fa174d9daf6dffabba67c77e9d85b29c13
694,821
def and_(a, b): """Same as a & b """ return a & b
a07f69143fd9eaa3b27bb07ee72c04efa31c5b7f
694,823
import re def generate_top_kmer_md_table(t_kmer_dic, g_kmer_dic, top=5, val_type="c"): """ Given k-mer count dictionaries for genomic and transcript context set, generate a markdown table with top 5 k-mers (sorted by decending dictionary value). val_type: Specify type of stored dictionary value. c : count (count of k-mer) r : ratio (k-mer count / total k-mer count) p : percentage ( (k-mer count / total k-mer count) * 100) """ assert t_kmer_dic, "given dictionary t_kmer_dic empty" assert g_kmer_dic, "given dictionary g_kmer_dic empty" assert re.search("^[c|p|r]$", val_type), "invalid val_type given" # Get size of k. k = 0 for kmer in t_kmer_dic: k = len(kmer) break # Expected kmer number. exp_kmer_nr = pow(4,k) t_kmer_nr = 0 g_kmer_nr = 0 for kmer in t_kmer_dic: kc = t_kmer_dic[kmer] if kc: t_kmer_nr += 1 for kmer in g_kmer_dic: kc = g_kmer_dic[kmer] if kc: g_kmer_nr += 1 t_kmer_perc = "%.2f " %((t_kmer_nr / exp_kmer_nr) * 100) + " %" g_kmer_perc = "%.2f " %((g_kmer_nr / exp_kmer_nr) * 100) + " %" # Adjust decimal places based on k-mer size. dc_p = 2 dc_r = 4 if k > 3: for i in range(k-3): dc_p += 1 dc_r += 1 dc_p_str = "%."+str(dc_p)+"f" dc_r_str = "%."+str(dc_r)+"f" add_ch = "" if val_type == "p": add_ch = " %" # Format percentage to two decimal places. for kmer in t_kmer_dic: new_v = dc_p_str % t_kmer_dic[kmer] t_kmer_dic[kmer] = new_v for kmer in g_kmer_dic: new_v = dc_p_str % g_kmer_dic[kmer] g_kmer_dic[kmer] = new_v elif val_type == "r": # Format percentage to four decimal places. for kmer in t_kmer_dic: new_v = dc_r_str % t_kmer_dic[kmer] t_kmer_dic[kmer] = new_v for kmer in g_kmer_dic: new_v = dc_r_str % g_kmer_dic[kmer] g_kmer_dic[kmer] = new_v # Get top j k-mers. i = 0 t_topk_list = [] for kmer, v in sorted(t_kmer_dic.items(), key=lambda item: item[1], reverse=True): i += 1 if i > top: break t_topk_list.append(kmer) i = 0 g_topk_list = [] for kmer, v in sorted(g_kmer_dic.items(), key=lambda item: item[1], reverse=True): i += 1 if i > top: break g_topk_list.append(kmer) # Generate markdown table. mdtable = "| Rank | &nbsp; &nbsp; Transcript context &nbsp; &nbsp; | &nbsp; &nbsp; Genomic context &nbsp; &nbsp;|\n" mdtable += "| :-: | :-: | :-: |\n" for i in range(top): t_kmer = t_topk_list[i] g_kmer = g_topk_list[i] pos = i + 1 mdtable += "| %i | %s (%s%s) | %s (%s%s) |\n" %(pos, t_kmer, str(t_kmer_dic[t_kmer]), add_ch, g_kmer, str(g_kmer_dic[g_kmer]), add_ch) mdtable += "| ... | &nbsp; | &nbsp; |\n" mdtable += "| # distinct k-mers | %i (%s) | %i (%s) |\n" %(t_kmer_nr, t_kmer_perc, g_kmer_nr, g_kmer_perc) # Return markdown table. return mdtable
f0be470a6b2e10a5f786cd7d01c6b09b7f1e395e
694,824
import argparse def parse_args(): """Parse input arguments.""" parser = argparse.ArgumentParser( description='3DT BDD format dataset generation', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('set', choices=['gta', 'kitti'], help='Generate GTA or KITTI dataset') #parser.add_argument('split', choices=['train', 'val', 'test'], help='Generate GTA train, validation or test dataset', default="test") parser.add_argument('--mode', choices=['train', 'test'], default='test', help='Test mode dont filter anything, train mode will') parser.add_argument('--kitti_task', default='track', choices=['detect', 'track'], help='KITTI task [detect, track]') parser.add_argument('--max_depth', dest='max_depth', help='filter if depth exceed this threshold', default=150, type=int) parser.add_argument('--min_pixel', dest='min_pixel', help='filter if #pixels lower than this threshold', default=256, type=int) parser.add_argument('--verbose_interval', dest='verbose_interval', help='show info every N frames', default=10, type=int) args = parser.parse_args() return args
80b92fe9f64fb40d9accd041d050e143e4174560
694,825
def schedule(year=None, month=None, day=None, week=None, day_of_week=None, hour=None, minute=None, second=None, start_date=None, end_date=None, timezone=None): """Schedule a function to be executed according to a crontab-like schedule The decorated function will be executed according to the schedule provided. Slack Machine uses APScheduler under the hood for scheduling. For more information on the interpretation of the provided parameters, see :class:`CronTrigger<apscheduler:apscheduler.triggers.cron.CronTrigger>` :param int|str year: 4-digit year :param int|str month: month (1-12) :param int|str day: day of the (1-31) :param int|str week: ISO week (1-53) :param int|str day_of_week: number or name of weekday (0-6 or mon,tue,wed,thu,fri,sat,sun) :param int|str hour: hour (0-23) :param int|str minute: minute (0-59) :param int|str second: second (0-59) :param datetime|str start_date: earliest possible date/time to trigger on (inclusive) :param datetime|str end_date: latest possible date/time to trigger on (inclusive) :param datetime.tzinfo|str timezone: time zone to use for the date/time calculations (defaults to scheduler timezone) """ kwargs = locals() def schedule_decorator(f): f.metadata = getattr(f, "metadata", {}) f.metadata['plugin_actions'] = f.metadata.get('plugin_actions', {}) f.metadata['plugin_actions']['schedule'] = kwargs return f return schedule_decorator
051ed1fbdf113834bcff626b3679f1dc6ea5580c
694,826
def to_template_dict(obj): """Extract the CFN template dict of an object for test comparisons""" if hasattr(obj, 'to_dict') and callable(obj.to_dict): return obj.to_dict() elif isinstance(obj, dict): return dict((key, to_template_dict(value)) for (key, value) in obj.items()) elif isinstance(obj, (list, tuple)): return type(obj)(to_template_dict(item) for item in obj) else: return obj
4f5440fb4e5d73d5a6acb2f758bf6e6bb8182d52
694,827
def reset_grads(model, require_grad): """reset_grads""" for p in model.parameters_dict().values(): p.requires_grad = require_grad return model
47a3e799e8ab95d9500add8911dc7ceda42c93a9
694,828
import os def comment(string): """return string as a comment""" lines = [line.strip() for line in string.splitlines()] return '# ' + ('%s# ' % os.linesep).join(lines)
2d7040129ff1f1b985a2761c6ecce28ce1560aa9
694,829
def get_default_ext(delim): """Retrieves the default extension for a delimiter""" if delim == ',': return "csv" if delim == '\t': return "tsv" return "txt"
37e27bebcb6c1fbfe3d794d3d2ac34d9909ed40c
694,830
def to_camel(s): """Convert an underscored title into camel case. 'PARENT_ORGANISATION_ID' => 'parentOrganisationId'""" bits = [(x.lower() if i == 0 else x.title()) for (i, x) in enumerate(s.split("_"))] return "".join(bits)
34400be6a346d886b2fca562b737b7811b871af1
694,831
def get_first_image_in_list(body_response, name_filter=None): """ Gets the first image in the list :param body_response: Parsed response (Python dic) :param name_filter: If this arg is set, this method will filtered by name content :return: First image in list (that contains name_filter in its name); None if not found or list is empty """ image_id = None if name_filter is not None: for image in body_response['images']: if name_filter in image['name']: image_id = image['id'] break else: if len(body_response['images']) != 0: image_id = body_response['images'][0]['id'] return image_id
a20db4ac9cd165d6853f8a96d1739d7d25a718f8
694,832
def vcr_config(): """Common configuration for all vcr tests.""" return {"filter_headers": ["authorization"], "ignore_localhost": True, "record_mode": "new_episodes"}
2576975562b0cab12aac6abba69519f3fae7360b
694,833
import numpy def plane_pt_to_3d_point(pos, trans_matrix): """ Transform plane point position eg. P(x, y, 0, 1) to 3d point eg. P'(X, Y, Z, 1) @param pos: <array_like> position @param trans_matrix: <numpy.array> transformation matrix @return: <Tuple> 3d position """ orig_matrix = numpy.array(pos, ndmin=2) return numpy.dot(orig_matrix, trans_matrix.transpose())
1cc4bee4c0de4dbc811572113b17e2b95bfbce7e
694,834
import csv import json from typing import Counter def convert_newsqa(file_path): """ Converts NewsQA dataset to jtr format. Args: file_path: path to the NewsQA CSV file (data/NewsQA/) Returns: dictionary in jtr format """ # meta info if '/' in file_path: filename = file_path[file_path.rfind('/') + 1:] # Maybe support a system-specific delimiter else: filename = file_path # data question_sets = [] with open(file_path) as data_file: reader = csv.reader(data_file) reader.__next__() for row in reader: [story_id, question, answer_char_ranges, is_answer_absent, is_question_bad, validated_answers, story_text] = row if validated_answers: answers = json.loads(validated_answers) spans = [k for k, v in answers.items() if ":" in k] else: answers = Counter() for rs in answer_char_ranges.split("|"): for r in set(rs.split(",")): if ":" in r: answers[r] += 1 spans = [k for k, v in answers.items() if ":" in k and v >= 2] if spans: qa_set = { "support": [story_text], "questions": [{ 'question': { 'text': question, 'id': story_id + "_" + question.replace(" ", "_") }, 'answers': [{"span": [int(span.split(":")[0]), int(span.split(":")[1])], "text": story_text[int(span.split(":")[0]):int(span.split(":")[1])] } for span in spans] }] } question_sets.append(qa_set) corpus_dict = { 'meta': { 'source': filename }, 'instances': question_sets } return corpus_dict
0e48883e179f2d440ac8c72c8a5ff9344f595f1f
694,835
def _zones_to_regions(zones): """ Return list of regions from the input list of zones :param zones: List of zones. This is the output from `get_zones_in_project()`. :return: List of regions available to the project """ regions = set() for z in zones: # Chop off the last 2 chars to turn the zone to a region r = z['name'][:-2] regions.add(r) return list(regions)
83c59cc6c2a9fc6e36a64044dc8ccc73ec039801
694,836
def cond(conditions, value): """Returns a function, fn, which encapsulates if/else, if/else, ... logic. R.cond takes a list of [predicate, transformer] pairs. All of the arguments to fn are applied to each of the predicates in turn until one returns a "truthy" value, at which point fn returns the result of applying its arguments to the corresponding transformer. If none of the predicates matches, fn returns undefined""" for predicate, transformer in conditions: if predicate(value): return transformer(value)
942f7c474294863515ad7607d5474bbc91ba8936
694,837
import numpy def insert_function_sinc(x): """ Insertion with Sinc function :param x: 1D vector :return: 1d vector """ s = numpy.zeros_like(x) s[x != 0.0] = numpy.sin(numpy.pi * x[x != 0.0]) / (numpy.pi * x[x != 0.0]) return s
1ddf6d9fcc07dcfcec26c272b6d44df662ed34a6
694,838
import re def is_valid_fqdn(host): """Reasonable attempt at validating a hostname Compiled from various paragraphs outlined here https://tools.ietf.org/html/rfc3696#section-2 https://tools.ietf.org/html/rfc1123 Notably, * Host software MUST handle host names of up to 63 characters and SHOULD handle host names of up to 255 characters. * The "LDH rule", after the characters that it permits. (letters, digits, hyphen) * If the hyphen is used, it is not permitted to appear at either the beginning or end of a label :param host: :return: """ if len(host) > 255: return False host = host.rstrip(".") allowed = re.compile(r'(?!-)[A-Z0-9-*]{1,63}(?<!-)$', re.IGNORECASE) result = all(allowed.match(x) for x in host.split(".")) if result: parts = host.split('.') if len(parts) > 1: return True return False
42b7e4b7f249590ac74482c680d61ae82d3903fd
694,839
def to_year_count(d): """ date > n years """ return d.year
2665ad98d7252a4e097bdf959d39f8f92bac5fa3
694,840
def task_check(): """Pre-deployment check""" return {'actions': [], 'task_dep': ['test', 'lint']}
51bb907b2d25d312e76164dce4c3c4aacecc66b2
694,841
def get_band_height(element): """ Return height the the specified element. :param element: current jrxml element being processes. :return: """ return float(element['child'][0]['band']['attr']['height'])
5968319f36f4a541c922c7422c734077173ca113
694,842
import re def reformat_ISBNs(text: str, match_func) -> str: """Reformat ISBNs. :param text: text containing ISBNs :param match_func: function to reformat matched ISBNs :type match_func: callable :return: reformatted text """ isbnR = re.compile(r'(?<=ISBN )(?P<code>[\d\-]+[\dXx])') text = isbnR.sub(match_func, text) return text
a440d65e4bd747c28fb186e8b76389e8bb59526c
694,843
import os def has_horovodrun(): """Returns True if running with `horovodrun` using Gloo or OpenMPI.""" return "OMPI_COMM_WORLD_RANK" in os.environ or "HOROVOD_RANK" in os.environ
dd810f5a8aaf497147281290afa458f4eb8b37fe
694,844
import requests def check_connection(uname, pword): """Check if a connection with CNES Pepscan be established :param uname: :param pword: :return: """ response = requests.get( 'https://peps.cnes.fr/rocket/#/search?view=list&maxRecords=50', auth=(uname, pword), stream=True ) return response.status_code
2025103c0dfd5355d2d4a1ab3e340ae8e1ac6a47
694,846
import sys def read_file(name, normalize=True): """ Read a file. """ try: with open(name, 'r', encoding='utf-8') as f: # read the data data = f.read() if normalize: # normalize line endings data = data.replace("\r\n", "\n") return data except IOError as e: (errno, strerror) = e.args sys.stderr.write('Failed to read file ' + name + ': ' + strerror) raise
5ef4072381d150997f0807c5bef69abba67f5726
694,847
def get_memory_usage(pandas_df): """ Returns the number of bytes used by a pandas dataframe """ return pandas_df.memory_usage(deep=True).sum()
7ed31e0f20269224ea0517a71045992106df9030
694,848