content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def _full_analysis_mp_alias(br_obj, analysis_set, output_directory, unique_name, verbose, quick_plots): """ Alias for instance method that allows the method to be called in a multiprocessing pool. Needed as multiprocessing does not otherwise work on object instance methods. """ return (br_obj, unique_name, br_obj.full_analysis(analysis_set, output_directory, verbose = verbose, compile_pdf = verbose, quick_plots = quick_plots))
6997d1c641154e0761b3f7304f09afecaa0a09ab
690,480
import argparse def parse_args() -> str: """Get the args""" parser = argparse.ArgumentParser(prog="assert_wat") parser.add_argument("wat_file", nargs="+", help="The .wat file to assert.") args = parser.parse_args() return str(args.wat_file[0])
a43877ed8459aa8d03f5e9ee3d86430a5a6bf29d
690,481
def GatherResultsFromMultipleFiles(results_by_file): """Gather multiple results to organize them by check name and file name. Args: results_by_file: A dict of check results indexed by file name. Returns: A dict of check results in the form of: {`check_name`: {`file_name`: { 'warning': { 'range': [lower_bound, upper_bound] 'count': number of occurrences that fall into the range, 'total': total number of data points, }, 'error': ... }}} """ merged = {} for filename, results in results_by_file.iteritems(): if results: for check_name, values in results.iteritems(): if check_name not in merged: merged[check_name] = {filename: values} else: merged[check_name][filename] = values return merged
eba58732330198b387c31b2d29d2e43089cb933d
690,482
def add_active_pixel(sweep_line, index, distance, visibility): """Add a pixel to the sweep line in O(n) using a linked_list of linked_cells.""" #print('adding ' + str(distance) + ' to python list') #print_sweep_line(sweep_line) # Make sure we're not creating any duplicate message = 'Duplicate entry: the value ' + str(distance) + ' already exist' assert distance not in sweep_line, message new_pixel = \ {'next':None, 'index':index, 'distance':distance, 'visibility':visibility} if 'closest' in sweep_line: # Get information about first pixel in the list previous = None pixel = sweep_line[sweep_line['closest']['distance']] # won't change # Move on to next pixel if we're not done while (pixel is not None) and \ (pixel['distance'] < distance): previous = pixel pixel = pixel['next'] # 1- Make the current pixel points to the next one new_pixel['next'] = pixel # 2- Insert the current pixel in the sweep line: sweep_line[distance] = new_pixel # 3- Make the preceding pixel point to the current one if previous is None: sweep_line['closest'] = new_pixel else: sweep_line[previous['distance']]['next'] = sweep_line[distance] else: sweep_line[distance] = new_pixel sweep_line['closest'] = new_pixel return sweep_line
ac55e918096599d9159477dd713960bcf09c9e02
690,483
def create_vocab( lines, vocab_file, min_frequency=3, special_symbols=["[PAD]", "[SEP]", "[CLS]", "[MASK]", "[UNK]"], ): """Create vocabulary from lines""" # Count word occurency vocab = {} for line in lines: if line.strip(): for w in line.strip().split(): if w in vocab: vocab[w] += 1 else: vocab[w] = 1 # Remove rare words new_vocab = {w: c for w, c in vocab.items() if c >= min_frequency} # Include special symbols and write to file vocab = special_symbols + sorted(new_vocab.keys()) with open(vocab_file, 'w') as f: for w in vocab: f.write(w + '\n') print(f"Created vocabulary file: {vocab_file}, with size {len(vocab)}") return len(vocab)
908599a6d6cb4a4df34dabaf5470bbd0911c3c91
690,484
import hashlib def _username_hash(username): """Returns bytes, a cryptographically safe one-way hash of the username. This way, if someone breaks the Fernet encryption, they still don't know the username. Args: username: unicode """ return hashlib.sha256(username.encode('utf-8')).digest()
45869c410ad53bfbffb38282f6cf43f56c892d77
690,485
def get_string(node, tag, default=None): """Finds and returns a string from the specified child tag in node.""" text = node.findtext(tag, default=default) if text == default: return text return " ".join(text.split())
d4e2824873fd4ac3c93ff98b8d6f65b4d0b07b9f
690,486
def show_input(data): # 이거 각 setting으로 옮겨주기 """ 입력값이 올바른지 확인 :param data: 어떤 데이터든 가능 :return: 맞으면 True / 틀리면 False (type: boolean) """ print(data) confirm = input("입력을 재대로 하셨나요? Y/N: ") print("===========================================") if confirm.lower() == 'y': return True else: return False
8d45258dfcd1f33eeb36b4f51434779ae8f72b7e
690,487
import hashlib def get_sha_hash(input_string): """ Method returns the sha hash digest for a given string. Args: input_string (str): the input string for which sha has to be computed """ return hashlib.md5(input_string).digest()
58bebd717d53d3dec090031dec932cf7aa1d93b7
690,488
def reindex_coefficients(expr, coefficients): """ Re-index coefficients (i.e. if a1 is there and not a0, replace a1 by a0, and recursively). """ coeffs = sorted( [x for x in expr.free_symbols if x in coefficients], key=lambda x: x.name ) for idx, coeff in enumerate(coefficients): if idx >= len(coeffs): break if coeff != coeffs[idx]: expr = expr.subs(coeffs[idx], coeff) return expr
6f0d10c601ac088710a449522788cb9969c68353
690,489
def generate_graphic_character_vocabulary(conn, min_coverage): """Generate a vocabulary of characters from graphic representations of lemmas with the specified minimal corpus coverage. This is the smallest vocabulary of the most frequent characters so that these characters together cover at least a portion of ``min_coverage`` of the corpus. :param conn: Database connection for statistics. :param float min_coverage: The minimal coverage. :return: A dictionary from characters from graphic representations of lemmas to their frequency rank. """ if min_coverage < 0 or min_coverage > 1: raise ValueError('The minimum coverage must be between 0 (inclusive) and 1 (inclusive)') if min_coverage == 0: return dict() return {graphic_c: rank for graphic_c, rank in conn.cursor().execute( '''SELECT graphic, rank FROM statistics WHERE language = "jpn" AND form = "lemma:graphic:character" AND count >= ( SELECT MAX(count) FROM statistics WHERE language = "jpn" AND form = "lemma:graphic:character" AND cumulative_count >= ( SELECT MAX(cumulative_count) FROM statistics WHERE language = "jpn" AND form = "lemma:graphic:character") * ?)''', (min_coverage,))}
e9b67082fb3ff6144fcaeec352140e5e3c5fef66
690,491
def blendTriangular(d, u=0.1, s=0.4, c=0.9): """ Triangular blending funciton, taken from eq. 3.5 c must be greater than s s must be greater than u u is the beginning point of the triangle c is the endpoint of the triangle s is the peak of the triangle """ d = float(d) u = float(u) s = float(s) c = float(c) if (s - u) == 0: return 0 if (c - s) == 0: return 0 if d <= u: b = 0.0 elif d > u and d <= s: b = (d - u)/(s - u) elif d > s and d < c: b = (c - d)/(c - s) else: b = 0.0 return b
0e7e093c1ba2eaab46810cf09e53a2255ccfd4ba
690,492
def ordenar_alinhamento(elemento_frasico, alinhamento): """ Ordena os pares alinhados conforme as frases originais. :param elemento_frásico: lista de tuplos com as informações (palavra/gesto, lema, classe gramatical) do elemento frásico :param alinhamento: dicionário com as palavras/gestos alinhados e as suas classes gramaticais :return: Lista com as palavra/gestos alinhados ordenados conforme a sua ordem na frase original. """ alinhamento_ordenado = [] for t in elemento_frasico: for k, v in alinhamento.items(): if k == t[1]: alinhamento_ordenado.append(v) return alinhamento_ordenado
6994fdc7576d2e1820f6edea25046c03f1589eaf
690,493
def modulus(a,b): """ if b != 0 return absolute value of (a) % b else return 1 """ if(b == 0): return 1 else: try: c = abs(a) % b except: c = 1 return c
6af8e948fc4428911de4133a0e9b424825465b87
690,494
def dbquery(db, dbcode): """Retrieve ancillary information about a database""" return db.query(dbcode)
2946d908fda6c9cc497b8b6b7e6545b15463cb8f
690,495
def path_empty(grid, start, finish): """returns true if path between start and finish (exclusive of endpoints) is empty""" x, y = start xx, yy = finish if y == 0: # left case if xx < x: for i in range(xx, x): if grid[i, 0] is not None: return False # right case elif x < xx: for i in range(x+1, xx+1): if grid[i, 0] is not None: return False # check spaces in hole for j in range(1, yy): if grid[xx, j] is not None: return False else: # check above for j in range(y): if grid[x, j] is not None: return False # left case if xx < x: for i in range(xx+1, x): if grid[i,0] is not None: return False # right case elif x < xx: for i in range(x+1, xx): if grid[i,0] is not None: return False return True
d5f5ad72d1930292fc94ac242a2a77fa4475e7c7
690,496
def _svgcolor(color): """ Convert a PyChart color object to an SVG rgb() value. See color.py. """ return 'rgb(%d,%d,%d)' % tuple(map(lambda x:int(255*x), [color.r,color.g,color.b]))
a867173b68c92a7985603899bb9dfd266f81b0c3
690,497
import os import subprocess def clean_log_file(file, output=None): """Cleanse lines, which do not start with a TSTICK identifier. If no output is given, the result is written to <filename>_clean.<ext>.""" file = os.path.abspath(file) filename, ext = os.path.splitext(file) if not output: output = f"{filename}_clean{ext}" sed_cmd = f"sed -rn '/^[[:digit:]]+?: /p' {file} > {output}" ret = subprocess.run(sed_cmd, shell=True) if ret.returncode == 0: print(f'Created {output}') return else: return ret.returncode
07037e95bb8ca9f2b949a9c42dd94d32dd74227d
690,498
import pickle def unpickle(filepath): """ Input: -filepath: the file path where the pickled object you want to read in is stored Output: -obj: the object you want to read in """ pickle_file = open(filepath, 'rb') obj = pickle.load(pickle_file) pickle_file.close() return obj
cf06be6a626a7c1e893717686ff6d2398965d049
690,499
def create_dvr(x): """ Creates the DVR table of the domain""" dvr = x.iloc[:,[0,2]].groupby('element').sum().sort_values(by='frequency_in_category', ascending=False) tot = sum(dvr['frequency_in_category']) dvr['global_weight'] = dvr/tot dvr.reset_index(inplace=True) dvr['rnk'] = range(1,len(dvr)+1) return dvr
b6b4bb529d764112260a0ba7ad90f4ccc9c0b546
690,500
import getpass def ask_password(prompt="Password : ", forbiddens=[]): """Prompt the user for a password without echoing Keyword Arguments: prompt {str} -- the question message (default: {"Password : "}) forbiddens {list} -- the list of bad passwords (default: {[]}) Returns: str -- the appropriate input password """ password = getpass.getpass(prompt) if password not in forbiddens: return password else: return ask_password(prompt, forbiddens)
4b51af58a1eada7883ea2dace3bd0f263ee9772e
690,501
def get_id(first_name, last_name): """ :param first_name: The first_name to search for. :param last_name: The last_name to search for. :return: The id number for the given first/last name, otherwise None. """ with open("database.txt", "r") as file: for line in file: line = line.rstrip() if not line: continue first, last, _id = line.split(", ") if first_name == first and last_name == last: return _id return None
0ec8f4b24b0453474c1449909f7fb079b5b784dc
690,502
def check_file(filename): """Returns whether or not a file is considered a valid image""" ext = filename.split(".")[-1].lower() return ext == "jpg" or ext == "png" or ext == "jpeg"
b44ef445babbabd9b3ec4dde2b25bacacd2c6b4a
690,503
import sys def frozen(set): """Raise an error when trying to set an undeclared name, or when calling from a method other than Frozen.__init__ or the __init__ method of a class derived from Frozen""" def set_attr(self,name,value): if hasattr(self,name): #If attribute already exists, simply set it set(self,name,value) return elif sys._getframe(1).f_code.co_name == '__init__': #Allow __setattr__ calls in __init__ calls of proper object types for k,v in sys._getframe(1).f_locals.items(): if k=="self" and isinstance(v, self.__class__): set(self,name,value) return raise AttributeError("You cannot add an attribute '%s' to %s" % (name, self)) return set_attr
a9c1266fca229f8ebea246fef799499873ffccd2
690,504
def get_q_id(hit): """ Returns the query ID for a hit. Parameters ---------- A hit parsed from an HHsearch output file, i.e. dict with at least the key 'alignment' which is a dict by itself and comes at least with the key 'Q xxx' where xxx is some identifier. The value for this 'Q xxx' key is a third dict which needs to contain the key 'sequence'. Returns ------- str : The query ID starting with 'Q '. Notes ----- Each 'hit' has one 'alignment', which comes with different lines. One of those lines is 'Q consensus'. Another line is called 'Q xxx' where xxx is the ID of the input query sequence. This function find this 'Q xxx' ID. We assume that there are only two line names starting with 'Q '. """ # find the right ID _id = [_id for _id in hit['alignment'].keys() if _id.startswith('Q') and _id != 'Q Consensus'][0] return _id
35f301ce5a4cad34e39a79c28475b76c979da46c
690,506
import torch def IoG_batch(box_a, box_b): """ param box: (N, 4) ndarray of float """ N = box_a.shape[0] xmin_cat = torch.cat([box_a[:, 0].view(N, 1), box_b[:, 0].view(N, 1)], dim=-1) ymin_cat = torch.cat([box_a[:, 1].view(N, 1), box_b[:, 1].view(N, 1)], dim=-1) xmax_cat = torch.cat([box_a[:, 2].view(N, 1), box_b[:, 2].view(N, 1)], dim=-1) ymax_cat = torch.cat([box_a[:, 3].view(N, 1), box_b[:, 3].view(N, 1)], dim=-1) print("xmin", xmin_cat) #input inf?? inter_xmin,indice = torch.max(xmin_cat, dim=-1) inter_ymin,indice = torch.max(ymin_cat, dim=-1) inter_xmax,indice = torch.min(xmax_cat, dim=-1) inter_ymax,indice = torch.min(ymax_cat, dim=-1) Iw = torch.clamp(inter_xmax - inter_xmin, min=0) #缩紧 Ih = torch.clamp(inter_ymax - inter_ymin, min=0) I = Iw * Ih G = (box_b[:, 2] - box_b[:, 0]) * (box_b[:, 3] - box_b[:, 1]) output = torch.div(I.to(torch.float64), G.to(torch.float64)) #type return output
c6a4d1e915645da18d108521cd95a525ebbce81e
690,507
import os def get_dir(f): """ 1、判断f是否为一个有效的路径 是,则执行第二步 否,则返回一个无效文件的错误信息 2、获取f路径下面的所有文件,判断文件的类型 包含文件,则输出文件夹路径列表 不包含文件,则输出Not dir """ if os.path.exists(f): fileList=os.listdir(f) # print(fileList) result=[] for file in fileList: if os.path.isdir(f+os.sep+file): result.append(f+os.sep+file) if result==[]: return 'Not dir' return result else: return '文件路径不存在'
38885b9f8f2696005752292443eb9a37691c13d5
690,508
def divide_tuples(tup1, tup2): """ Function to divide tuples for percentage reduction plot Args: tup1: Higher level count statistics tup2: Lower level count statistics Return: Percent of counts removed by step. Returns 0 if no data available. """ try: res = tuple(round((ele1 / ele2),6) for ele1, ele2 in zip(tup1, tup2)) except RuntimeWarning: # divide by 0 res = (0,0,0) return res
7c6f184249d83b98501f8a6bfc53c649e131b666
690,509
def split_version(version_string): """Parse a version string like 2.7 into a tuple.""" return tuple(map(int, version_string.split(".")))
48cea68ebcd84b2d8cf4d1a2cf15387664067a72
690,510
def is_icmp_reply(pkt, ipformat): """Return True if pkt is echo reply, else return False. If exception occurs return False. :param pkt: Packet. :param ipformat: Dictionary of names to distinguish IPv4 and IPv6. :type pkt: dict :type ipformat: dict :rtype: bool """ # pylint: disable=bare-except try: if pkt[ipformat['IPType']][ipformat['ICMP_rep']].type == \ ipformat['Type']: return True else: return False except: # pylint: disable=bare-except return False
21196c53c0e227602f8aaba984d56aeae3af2781
690,512
def main(args=None): """Console script for poolclass""" return 0
9f4b98950988f31bb4c2462b9eb839a1936011c2
690,513
import pydoc def lookup_by_objref(objref): """ Imports an object by an ObjRef object. If ObjRef object also contains module attribute, it will also attempt to relative import from it when absolute import was not successful. """ obj = pydoc.locate(objref.name) if obj is None: if objref.module is None: raise ImportError('Unable to import "%s"' % (objref.name)) path = '.'.join([objref.module, objref.name]) obj = pydoc.locate(path) if obj is None: raise ImportError('Unable to import "%s" nor "%s"' % (objref.name, path)) return obj
c2b9245c5c2b7355f71a8cc3d6ce59f80d9434c1
690,514
def currency(value): """ Converts the number to an €-amount """ if value: return (("%.2f" % round(value, 2)) + "€").replace(".", ",") else: return "-"
e3e43925aafb58c86be0f7f9f2ffefd6d62ac585
690,515
def validate_move(move, turn, board): """ Determine if the next move is valid for the current player :param move: :param turn: :param board: :return: boolean flag for if the move is valid as well as the current gamestate dictionary """ if turn == 1: piece = 'X' else: piece = 'O' try: if board[move[:-1]][move] not in ('X', 'O'): board[move[:-1]][move] = piece return True, board else: return False, board except KeyError: return False, board
b3117e72a8377aaceb5ee8c887a272bcb15ea553
690,518
def convert_name(name, to_version=False): """This function centralizes converting between the name of the OVA, and the version of software it contains. OneFS OVAs follow the naming convention of <VERSION>.ova :param name: The thing to covert :type name: String :param to_version: Set to True to covert the name of an OVA to the version :type to_version: Boolean """ if to_version: return name.rstrip('.ova') else: return '{}.ova'.format(name)
2800c22e2af5a6ad3d537a9713c473e6d44101c6
690,519
def trim(paging_url): """trims the paging url to the uri""" return paging_url[26:]
820e1bf4bfc45806f433fb56e3fe821a89cc73d3
690,520
def meas_pruning_ratio(num_blobs_orig, num_blobs_after_pruning, num_blobs_next): """Measure blob pruning ratio. Args: num_blobs_orig: Number of original blobs, before pruning. num_blobs_after_pruning: Number of blobs after pruning. num_blobs_next: Number of a blobs in an adjacent segment, presumably of similar size as that of the original blobs. Returns: Pruning ratios as a tuple of the original number of blobs, blobs after pruning to original, and blobs after pruning to the next region. """ ratios = None if num_blobs_next > 0 and num_blobs_orig > 0: # calculate pruned:original and pruned:adjacent blob ratios print("num_blobs_orig: {}, blobs after pruning: {}, num_blobs_next: {}" .format(num_blobs_orig, num_blobs_after_pruning, num_blobs_next)) ratios = (num_blobs_orig, num_blobs_after_pruning / num_blobs_orig, num_blobs_after_pruning / num_blobs_next) return ratios
fabe113effdd97cffa31ccd9cda105b464a3163f
690,521
def percint(a, b): """Calculate a percent. a {number} Dividend. b {number} Divisor. return {int} quotient as a truncated percent value (e.g. 25 for .25) """ return int((a/float(b)) * 100)
a50dc82018476f1ef702000ae939a8b98c9e9bc1
690,522
import os def read_mdl_data(idir_traj): """Read the mdl data according to the idir_path """ # get the traj files traj_files = os.listdir(idir_traj) assert len(traj_files) > 0, 'There is no trajectory.' # read trajs trajs = [] for traj_file in traj_files: with open(idir_traj + '/' + traj_file) as f_mdl_traj: # _traj = [list(map(float, _point.replace('\n', '').split(','))) for _point in f_mdl_traj.readlines()] _traj = [eval(_point) for _point in f_mdl_traj.readlines()] trajs.append(_traj) return trajs
2939220d44292b0103c179c44b8c8ce1b0fb1a70
690,523
def sort_012(input_list): """ Sort a list containing the integers 0, 1, and 2, in a single traversal :param input_list: list :return: list """ current_index = 0 zero_index = 0 two_index = len(input_list) - 1 while current_index <= two_index: if input_list[current_index] is 2: input_list[current_index], input_list[two_index] = input_list[two_index], input_list[current_index] two_index -= 1 continue if input_list[current_index] is 0: input_list[current_index], input_list[zero_index] = input_list[zero_index], input_list[current_index] zero_index += 1 current_index += 1 return input_list
a64efa5e591120bbd2f12c1eb9d0915259757f59
690,524
def count_str(S): """Takes a pd Series with at least the indices 'alt' and 'repeatunit', both strings. Return the number of occurances of repeatunit in alt""" if S['alt'] is None: return 0 count = S['alt'].count(S['repeatunit']) return count
0717727ff59a3b29e22502875c73323558554eec
690,525
import re def html_to_string(parser_content): """Extracts the textual content from an html object.""" # Remove scripts for script in parser_content(["script", "style", "aside"]): script.extract() # This is a shorter way to write the code for removing the newlines. # It does it in one step without intermediate variables return " ".join(re.split(r'[\n\t]+', parser_content.get_text()))
e489bb03a945c2d6bc9b5f4ece14037551cd04ec
690,526
def abs2(src, target): """ compute the square absolute value of two number. :param src: first value :param target: second value :return: square absolute value """ return abs(src - target) ** 2
275e015bca3cae2f737b284f40861b3136936109
690,527
from typing import List def score_yacht(dice: List[int]) -> int: """ 50 points """ return 50 if all(d == dice[0] for d in dice) else 0
11fd7e087bc9b1abecd3c43c9a5422c8e24f882e
690,528
def union(bbox1, bbox2): """Create the union of the two bboxes. Parameters ---------- bbox1 Coordinates of first bounding box bbox2 Coordinates of second bounding box Returns ------- [y0, y1, x0, x1] Coordinates of union of input bounding boxes """ y0 = min(bbox1[0], bbox2[0]) y1 = max(bbox1[1], bbox2[1]) x0 = min(bbox1[2], bbox2[2]) x1 = max(bbox1[3], bbox2[3]) return [y0, y1, x0, x1]
0cb11ca0925bfbb191070b701e032abeca32eea5
690,529
import os import re def list_files(basedir, recursive=False, pattern_to_exclude=None, verbose=False): """ Return a list of files given some parameter @param : - basedir : base directory where we looking for some files - recursive : does we look into sub directories - pattern_to_exclude : a regex for excluding some files """ list_files = [] # List files of a specific source if recursive: for dirpath, dirname, files in os.walk(basedir): for file in files: list_files.append("{0}/{1}".format(dirpath, file)) #rename_file(dirpath, file, suffix, exclude, verbose) else: for file in os.listdir(basedir): full_path = "{0}/{1}".format(basedir, file) if os.path.isfile(full_path): list_files.append(full_path) #rename_file(source, file, suffix, exclude, verbose) # Exclude result that matches a pattern result = [] if pattern_to_exclude: for a_file in list_files: if re.search(pattern_to_exclude, a_file, re.IGNORECASE) is not None: result.append(a_file) if verbose: print("File {0} - Match exlusion request {1}".format(a_file, pattern_to_exclude)) else: result = list_files return result
dca9649d38e46646729ee8368f3df2cca87553f6
690,530
def round_base(x, base=.05): """"rounds the value up to the nearest base""" return base * round(float(x) / base)
5ef08809764cae5d7a086b35dddc2a1765e50686
690,532
def run_bq_query(client, query, timeout): """ Returns the results of a BigQuery query Args: client: BigQuery-Python bigquery client query: String query timeout: Query timeout time in seconds Returns: List of dicts, one per record; dict keys are table field names and values are entries """ job_id, _results = client.query(query, timeout=timeout) complete, row_count = client.check_job(job_id) if complete: results = client.get_query_rows(job_id) print('Got %s records' %row_count) else: raise RuntimeError('Query not complete') return(results)
1336b884b32d15e7bcb5b97ef8b2b6922d775e77
690,533
def create_headers(bearer_token): """Create headers to make API call Args: bearer_token: Bearer token Returns: Header for API call """ headers = {"Authorization": f"Bearer {bearer_token}"} return headers
5870332cf71800d0bcfbd739cc16db85508111bc
690,534
def get_ship_name(internal_name): """ Get the display name of a ship from its internal API name :param internal_name: the internal name of the ship :return: the display name of the ship, or None if not found """ internal_names = { "adder": "Adder", "alliance-challenger": "Alliance Challenger", "alliance-chieftain": "Alliance Chieftain", "alliance-crusader": "Alliance Crusader", "anaconda": "Anaconda", "asp-explorer": "Asp Explorer", "asp-scout": "Asp Scout", "beluga-liner": "Beluga Liner", "cobra-mk-iii": "Cobra MkIII", "cobra-mk-iv": "Cobra MkIV", "diamondback-explorer": "Diamondback Explorer", "diamondback-scout": "Diamondback Scout", "dolphin": "Dolphin", "eagle": "Eagle", "federal-assault-ship": "Federal Assault Ship", "federal-corvette": "Federal Corvette", "federal-dropship": "Federal Dropship", "federal-gunship": "Federal Gunship", "fer-de-lance": "Fer-de-Lance", "hauler": "Hauler", "imperial-clipper": "Imperial Clipper", "imperial-courier": "Imperial Courier", "imperial-cutter": "Imperial Cutter", "imperial-eagle": "Imperial Eagle", "keelback": "Keelback", "krait-mk-ii": "Krait MkII", "krait-phantom": "Krait Phantom", "mamba": "Mamba", "orca": "Orca", "python": "Python", "sidewinder": "Sidewinder", "type-6": "Type-6 Transporter", "type-7": "Type-7 Transporter", "type-9": "Type-9 Heavy", "type-10": "Type-10 Defender", "viper-mk-iii": "Viper MkIII", "viper-mk-iv": "Viper MkIV", "vulture": "Vulture", } if internal_name in internal_names: return internal_names[internal_name] return None
ee23f2b7e97df0b74b2006a8a0a4782201137de7
690,535
def user_input(passcode: str) -> str: """Get the passcode from the user.""" code = input(f"Type the numerical value of the passcode `{passcode}`: ") return code
05449c0106382bd566f2fd1af26c5c0c198b5b13
690,536
def _get_kwarg(kwarg, metric_name="Metric", **kwargs): """Pop a kwarg from kwargs and raise warning if kwarg not present.""" kwarg_ = kwargs.pop(kwarg, None) if kwarg_ is None: msg = "".join( [ f"{metric_name} requires `{kwarg}`.", f"Pass `{kwarg}` as a keyword argument when calling the metric.", ] ) raise ValueError(msg) return kwarg_
4dfdd18419cab9c64a9dc2010a54c2f41edcde23
690,538
def candidate_symbol(comp): """ Return a character representing completion type. :type comp: jedi.api.Completion :arg comp: A completion object returned by `jedi.Script.complete`. """ try: return comp.type[0].lower() except (AttributeError, TypeError): return '?'
437a1d3c1490e833b707cd4df465a19c702dfe8e
690,539
import glob def getRansomwareFiles(path): """ Return all the ransomware files (sorted) from a given path """ try: all_file_names = [i for i in glob.glob(str(path) + '/*_labeled.*')] all_file_names = sorted(all_file_names) return all_file_names except: print("Ransomware samples could not be read") return
ec97bbcbee0cf0900370f41dc4d21d3ecc6b2233
690,540
def add_n(attrs, inputs, proto_obj): """Elementwise sum of arrays""" return 'add_n', attrs, inputs
bfe822bd74875374ffeedccaa4cee65bea3b683d
690,542
def to_dynamic_cwd_tuple(x): """Convert to a canonical cwd_width tuple.""" unit = "c" if isinstance(x, str): if x[-1] == "%": x = x[:-1] unit = "%" else: unit = "c" return (float(x), unit) else: return (float(x[0]), x[1])
ab0b74097a2513b7ee44aaa23cb9fa6cfb864ed0
690,543
def find_gcd(NumpyArray): """Retorna o Maximo Divisor Comum de um Numpy Array""" def gcd(x, y): while y > 0: x, y = y, x % y return x Ngcd = NumpyArray[0] for i in range(1, len(NumpyArray)): Ngcd = gcd(Ngcd, NumpyArray[i]) return Ngcd
9438ea949c03f6d120b69931f57738b5b78fd4a5
690,544
import torch def compute_projected_translation(translation, K): """ """ return torch.einsum('bij,bjhw->bihw', K[:,:3,:3], translation)
5aec8542134e3c93238ce4e4d0285329d9991002
690,545
import typing def attach(object: typing.Any, name: str) -> typing.Callable: """Return a decorator doing ``setattr(object, name)`` with its argument. >>> spam = type('Spam', (object,), {})() >>> @attach(spam, 'eggs') ... def func(): ... pass >>> spam.eggs # doctest: +ELLIPSIS <function func at 0x...> """ def decorator(func): setattr(object, name, func) return func return decorator
32f2d5beaf3114e8724f380fb691b128b920d3eb
690,547
def expected_metadata(): """ Metadata for reuse. """ return {"abc": 123}
9716620f33fc53405e8d5734199be4d1a58811ae
690,548
import email def process(data): """Extract required data from the mail""" mail = email.message_from_string(data[1]) return { 'date': mail['Date'], 'to': mail['To'], 'from': mail['From'], 'message': mail.get_payload() }
8cac6adbc212614d3c93cdb784a644e8f2a6d964
690,549
def test_in(value, seq): """Check if value is in seq. Copied from Jinja 2.10 https://github.com/pallets/jinja/pull/665 .. versionadded:: 2.10 """ return value in seq
7e42d027af4aecfc6cc6f9a93b6ee07eba3459a8
690,550
def dt_controller(current_control_output_value, previous_control_output_value, derivative_gain_value): """Docstring here (what does the function do)""" return (current_control_output_value - previous_control_output_value) * derivative_gain_value
06775c81ad3dfe19b60191500f39e6234a82d6a1
690,551
import os def safe_folder_fixture(input_folder): """Provides a folder each file contains a definition of a .SAFE structure on how it will be reconstructed from files from AWS S3 buckets.""" return os.path.join(input_folder, "aws_safe")
5c99cf466230ca0c09f72e355193b3201a005ddf
690,552
def make_copy_files_rule(repository_ctx, name, srcs, outs): """Returns a rule to copy a set of files.""" # Copy files. cmds = ['cp -f "{}" "$(location {})"'.format(src, out) for (src, out) in zip(srcs, outs)] outs = [' "{}",'.format(out) for out in outs] return """genrule( name = "{}", outs = [ {} ], cmd = \"""{} \""", )""".format(name, "\n".join(outs), " && \\\n".join(cmds))
fcc562b6ce7e8fe865d412b49fcd57f82f661945
690,553
def cal_z_c(fd, z_liq, h0): """ Calculation of characteristic depth from Karamitros et al. (2013) :param fd: :param z_liq: :param h0: :return: """ if fd.width > z_liq: z_c = h0 + z_liq else: z_c = h0 + fd.b return z_c
5661774bf5328b680987dbb9dc93b4896e5768ff
690,554
def json_serializer(obj): """ A JSON serializer that serializes dates and times """ if hasattr(obj, 'isoformat'): return obj.isoformat()
d70e4488091b00c753d820556da485d31e49eb84
690,556
import re def strip(text): """ python's str.strip() method implemented using regex Args: text (str): text to strip of white space Returns: textStripped (str): text stripped of white space """ stripStartRegex = re.compile(r'(^\s*)') stripEndRegex = re.compile(r'(\s*$)') textStartStripped = stripStartRegex.sub('', text) textStripped = stripEndRegex.sub('', textStartStripped) return textStripped
e68864333a39beab2c0af5e74ea1c983ac9035ca
690,557
def to_lower(list_of_lists_of_tokens): """ Function to lower text. Parameters ---------- list_of_lists_of_tokens : dataframe column or a variable containing a list of word-token lists, with each sublist being a sentence in a paragraph text E.g., [[ 'I', 'think', .'], ['Therefore', ',', 'I', 'am', '.']] OUTPUT : a list of lists of token word (i.e., tokenisation preserving sentences' boundaries) E.g., [[ 'i', 'think', .'], ['therefore', ',', 'i', 'am', '.']] """ try: return [[token.lower() for token in sent] for sent in list_of_lists_of_tokens] except TypeError as e: # pragma: no cover return e except: # pragma: no cover return []
98c6d96e257f5234650255c1e9535c01ff656163
690,559
from typing import Union import re def get_zone_id_from_item_name(item_name: str) -> Union[str, None]: """ Extract and return the zone id from the the item name. """ pattern = '([^_]+)_([^_]+)_(.+)' match = re.search(pattern, item_name) if not match: return None level_string = match.group(1) location = match.group(2) return level_string + '_' + location
3bd0869ddb4903343589e31436d8ad11020f5bf5
690,560
import os def required_folder(*parts): """joins the args and creates the folder if not exists""" path = os.path.join(*parts) if not os.path.exists(path): os.makedirs(path) assert os.path.isdir(path), "%s is not a folder as required" % path return path
ace2d0d17c078bfb00524acdd42ad66b47bc2bfe
690,561
import os def cwd(pid): """cwd(pid) -> str Args: pid (int): PID of the process. Returns: The path of the process's current working directory. I.e. what ``/proc/<pid>/cwd`` points to. """ return os.readlink('/proc/%d/cwd' % pid)
db4b27872e1d41f079745667f2eb9849296d8c34
690,562
import os def read_config_env(): """ Check if there is an environment variable called BALLISTICS_CONF. If so, read the value and split it on spaces. Exit: arguments: a list of the arguments to insert as if on the command line. """ args = [] env = os.getenv('BALLISTICS_CONF') if not env: return args args = env.strip().split(' ') return args
80d0390ec9f105ba90c887ea9cb41d913de9cfc7
690,563
import collections def replace_key_in_order(odict, key_prev, key_after): """Replace `key_prev` of `OrderedDict` `odict` with `key_after`, while leaving its value and the rest of the dictionary intact and in the same order. """ tmp = collections.OrderedDict() for k, v in odict.items(): if k == key_prev: tmp[key_after] = v else: tmp[k] = v return tmp
118b6e443fb36aac3154af48dcb55e908e2f31b3
690,564
def _which(repository_ctx, cmd, default = None): """A wrapper around repository_ctx.which() to provide a fallback value.""" result = repository_ctx.which(cmd) return default if result == None else str(result)
bd971599fbb77bf7eb504946ef2f901e877ed9b1
690,565
from typing import List def axial_load(W:float, L:float, l1:float) -> List[float]: """ Case 5 from Matrix Analysis of Framed Structures [Aslam Kassimali] """ l2 = L - l1 Fa = W*l2/L Fb = W*l1/L return [Fa, Fb]
b5985f5eeeec2c59096ab2bb22b1148ef9d5d841
690,566
import torch def masked_TOP_loss(preds, targets): """ Top One Probability(TOP) loss,from <<Learning to Rank: From Pairwise Approach to Listwise Approach>> """ preds = torch.squeeze(preds[targets.mask]) targets = targets.data[targets.mask] preds_p = torch.softmax(preds, 0) targets_p = torch.softmax(targets, 0) loss = torch.mean(-torch.sum(targets_p*torch.log(preds_p))) return loss
e21590d09490b585ffa13ab73a9b1ff7af3f392c
690,567
def isJsonSafe(data): """ Check if an object is json serializable :param data: (python object) :return: (bool) """ if data is None: return True elif isinstance(data, (bool, int, float, str)): return True elif isinstance(data, (tuple, list)): return all(isJsonSafe(x) for x in data) elif isinstance(data, dict): return all(isinstance(k, str) and isJsonSafe(v) for k, v in data.items()) return False
4daa6dd9c6a10dc03ff4801bebf41eda8d2861ee
690,568
import os def build_files_list(root_dir): """Build a list containing absolute paths to the generated files.""" return [ os.path.join(dirpath, file_path) for dirpath, subdirs, files in os.walk(root_dir) for file_path in files ]
d7ae23428043b4727a23a4b1ee32e1fad77cd6b3
690,569
from typing import Callable def composed(*decorators: Callable) -> Callable: """ Build a decorator by composing a list of decorator """ def inner(f: Callable) -> Callable: for decorator in reversed(decorators): f = decorator(f) return f return inner
ec6ef95e2cd3616d67ea76ca71519e4ee7703c01
690,570
def split_user_goals(all_user_goals): """ Helper method to split the user goals in two sets of goals, with and without request slots """ user_goals_no_req_slots = [] user_goals_with_req_slots = [] for user_goal in all_user_goals: if len(user_goal["request_slots"].keys()) == 0: user_goals_no_req_slots.append(user_goal) else: user_goals_with_req_slots.append(user_goal) return user_goals_no_req_slots, user_goals_with_req_slots
1d1d536ec78f89aaa49135512648aa30ea2142f3
690,571
def get_phi0(self, b_, bp_): """ Get the reduced density matrix element corresponding to many-body states b and bp. Parameters ---------- self : Builder or Approach The system given as Builder or Approach object. b_,bp_ : int Labels of the many-body states. Returns -------- phi0bbp : complex A matrix element of the reduced density matrix (complex number). """ b = self.si.states_order[b_] bp = self.si.states_order[bp_] bcharge = sum(self.si.get_state(b)) bpcharge = sum(self.si.get_state(bp)) phi0bbp = 0.0 if self.funcp.kerntype == 'Pauli': if b == bp: ind = self.si.get_ind_dm0(b, b, bcharge, maptype=1) phi0bbp = self.phi0[ind] elif bcharge == bpcharge: ind = self.si.get_ind_dm0(b, bp, bcharge, maptype=1) conj = self.si.get_ind_dm0(b, bp, bcharge, maptype=3) if ind != -1: if type(self.si).__name__ == 'StateIndexingDMc': phi0bbp = self.phi0[ind] else: ndm0, npauli = self.si.ndm0, self.si.npauli phi0bbp = (self.phi0[ind] + 1j*self.phi0[ndm0-npauli+ind] * (+1 if conj else -1) * (0 if ind < npauli else 1)) return phi0bbp
3e22545e7836cf8bf5c6b8ebb46518c1ec5cc114
690,572
def valid_simili_tabpanel(arch): """A tab panel with tab-pane class must have role="tabpanel".""" # Select elements with class 'btn' xpath = '//*[contains(concat(" ", @class, " "), " tab-pane ")' xpath += ' or contains(concat(" ", @t-att-class, " "), " tab-pane ")' xpath += ' or contains(concat(" ", @t-attf-class, " "), " tab-pane ")]' xpath += '[not(@role="tabpanel")]' if arch.xpath(xpath): return "Warning" return True
65634d4e50ee7370c130357b71a8596d5991ea3e
690,573
import torch def train_test_split(dataset, params): """Grabs random Omniglot samples and generates test samples from same class. The random seed is taken from params.sampler_seed, the test_shift is which sample to grab as a test. If it ends up being a different class, the sampler is walked back until the class is same, and the sample is different. Args: dataset: (Dataset) Sampler from Omniglot dataset. params: (json dict) Params.json file. Returns: train_dataloader, test_dataloader: (tuple) Containing matched train test pairs. """ train_dataset = [] test_dataset = [] # Random seed from params file. torch.manual_seed(params.sampler_seed) # Create batch_size random indices from dataset. # Subtract params.test_shift so that we don't pick a random sample # so close to the end of the set that it looks for a test pair in # the blackness of 'index out of range'. idxs = torch.randint(len(dataset) - params.test_shift, (1, params.batch_size)) # Make sure one of them is our control. idxs[0, 0] = 19 for i, idx in enumerate(idxs[0]): shift_idx = params.test_shift train_sample, train_lbl = dataset[idx] test_sample, test_lbl = dataset[idx + shift_idx] # Make sure labels are the same, and it is not the same sample. while (train_lbl != test_lbl) or (torch.equal(train_sample, test_sample)): test_sample, test_lbl = dataset[idx + shift_idx] shift_idx -= 1 train_dataset.append(train_sample) test_dataset.append(test_sample) #=====MONITORING=====# # Uncomment to see train_samples or change selection to test_sample. # utils.animate_weights(train_sample, auto=True) #=====END MONITORING=====# train_dataloader = torch.stack(train_dataset) train_dataloader.unsqueeze_(1) test_dataloader = torch.stack(test_dataset) test_dataloader.unsqueeze_(1) return train_dataloader, test_dataloader
a044bbc2467c1d4ee3ad3424ff59ea3ceb3d735d
690,575
from typing import Callable from typing import Any def parse_optional_value(value: str, deserialize: Callable[[str], Any]) -> Any: """Serialize optional values with the given serializer.""" if value == "": return None return deserialize(value)
f8efe829152cdbc776d72f26ddb30aa7ac9d0ba2
690,576
def expected_column_names(): """Expected column names for metadata.""" return [ "isic_id", "image_name", "dataset", "description", "accepted", "created", "tags", "pixels_x", "pixels_y", "age", "sex", "localization", "benign_malignant", "dx", "dx_type", "melanocytic" ]
a099c528d1b4dba02ad2b8ee8560e5c0693ff4d9
690,577
import os import re def getActiveRevision( sitPath, project ): """ This function returns the currently installed patchlevel (SVN revision) of a project, by resolving the 2-digit symlinks. in root SIT: 2.2 --> 2.2.500 in proxy SIT: 2.2 --> 2.2.501 By providing a 'sitPath' URL you can query the SIT root directory or any SIT proxy directory, e.g.: globalRevision = getActiveRevision( sitRootPath, 'Libraries/Foo/1.0' ) proxyRevision = getActiveRevision( sitProxyPath, 'Libraries/Foo/1.0' ) if proxyRevision != globalRevision: # inform user The function will return 'None' if the patchlevel number can't reliably be detected. """ realpath = os.path.realpath( os.path.join( sitPath, project ) ) regexp = re.compile( r"^(\d+)\.(\d+)\.(\d+)" ) tmp = regexp.search( os.path.basename( realpath ) ) if tmp: return int( tmp.group( 3 ) )
0be47d4ee69545ffa34b191ca58bac24bba594af
690,579
def is_hidden(name): """Check if object is active or no""" if len(name) < 2: return False if name.startswith('t_'): return True return False
99a8e1f13a9402a7f9fae8553dc0548f7ea2f8e4
690,580
def parseGroups(group_file, xornot): """ :param group_file: name, fastaid1, fastfa2 :return: """ data = dict() count = 0 with open(group_file) as f: for lines in f.readlines(): lsplit = [x.replace("\n","").replace(" ", "") for x in lines.split(",")] if xornot: p = [x + "_1" for x in lsplit] data[count] = lsplit + p else: data[count] = lsplit count += 1 o = dict() for k,v in data.items(): for x in v: o[x] = k return o, data
79c01f7ce876811dc67b33a284ad8de53ba75cb9
690,581
import pathlib def project_dir(): """Root directory of the project.""" return pathlib.Path(__file__).parents[1].absolute()
0ecc714b340e43fe0e07d5e0deb6655d58a7a07f
690,582
def instruction(f): """ Decorator for instructions, to make selectively exporting possible. """ f._instruction = True return f
8d0d7836dc4e4f51212bfd36a0c39829fbe80586
690,583
import math def dist(a, b): """ euclidean distance """ return math.sqrt((a[0]-b[0])**2 + (a[1]-b[1])**2)
62393373ff9cbf2a42c412e88118b7bae5798bcb
690,584
def time_display(timestamp): """Formats a timestamp. Params: timestamp(int) Returns: string """ timestring = [] if timestamp < 0: timestamp = 0 days = timestamp // 86400 hours = timestamp % 86400 // 3600 mins = timestamp % 86400 % 3600 // 60 secs = timestamp % 86400 % 3600 % 60 if days > 0: timestring.append(str(days) + "d") if hours > 0: timestring.append(str(hours) + "h") if mins > 0: timestring.append(str(mins) + """) if secs >= 0: timestring.append(str(secs) + """) return " ".join(timestring)
78d5f0de9aacb81ee287e30069b16df21e389061
690,585
def sumValues(map: dict) -> int: """ Sums dict's values """ total: int = 0 for v in map.values(): total += v return total
b625f0f9abc0ffba28d8259b240b8cbc1c8306b0
690,586
import re def convert_footnotes(xml_soup): """Return a beautiful xml soup...""" if xml_soup.find_all('li', id=re.compile("fn*.")): # Iterate through footnotes footnotes = xml_soup.find_all('a', id=re.compile("fnref*.")) for index_footnote, each_footnote in enumerate(footnotes): footnote_content = xml_soup.find_all('li', id=re.compile("fn*."))[index_footnote]; # clean footnote footnote_content = footnote_content.contents[0] # clear fn back link (↵) footnote_content.find('a', href=re.compile("#fnref*.")).extract() # remove link # replace footnote content each_footnote.insert_before("-fn--") # to fit the requirements of ReFoot_mod.js each_footnote.insert_after("--fn-") # to fit the requirements of ReFoot_mod.js each_footnote.replace_with(footnote_content) #remove surrounding <p>? # clean footnotes from xml footnotes = xml_soup.find('div', { "class" : "footnotes" }) footnotes.extract() # remove footnotes title footnotes_title = xml_soup.find('h2', id="notes") footnotes_title.extract() return xml_soup
1b632854dff872b9a254d7ae277cac6528bc3974
690,588
def gt10(val): """ Predicate testing if a value is less than 10 """ return val > 10
70fcfcdb444873fc586f4bf38e5167a5f8099eda
690,589
def gender(mention): """ Compute gender of a mention. Args: mention (Mention): A mention. Returns: The tuple ('gender', GENDER), where GENDER is one of 'MALE', 'FEMALE', 'NEUTRAL', 'PLURAL' and 'UNKNOWN'. """ return "gender", mention.attributes["gender"]
9b7fab2ca688662c5e3c7a5a24e05d6aa739ed15
690,591
def GetBytes(byte, size): """Get a string of bytes of a given size Args: byte: Numeric byte value to use size: Size of bytes/string to return Returns: A bytes type with 'byte' repeated 'size' times """ return bytes([byte]) * size
887721f9777af3124d134be47b0a9959ed4b40af
690,592
import re def string_to_tags_list(string): """ Given a string representing tags in TiddlyWiki format parse them into a list of tag strings. """ tags = [] tag_matcher = re.compile(r'([^ \]\[]+)|(?:\[\[([^\]]+)\]\])') for match in tag_matcher.finditer(string): if match.group(2): tags.append(match.group(2)) elif match.group(1): tags.append(match.group(1)) return tags
20b3df498304902000e37f822023ae2276eb18be
690,593
def unpack_byte_string(byte_string): """unpacks a byte string""" return "".join("%02x" % x for x in byte_string)
7c372a4c0b2dc37b60afe832ab29b435a14e6da8
690,594