content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import os def check_filename(originalName): """ add number to filename to make unique if not already - returns new name""" count = 1 exists = True fileName = originalName while(exists): exists = os.path.exists(fileName) if exists: if "." in originalName: fileName = originalName.split(".") fileName[0] += "_" + str(count) fileName = ".".join(fileName) else: fileName = originalName + "_" + str(count) count += 1 return fileName
9611d9225def48e7fb6b2b578c60f4fe5bc1dc73
695,302
def and_(predicate1, predicate2): """DBC helper for conjunction of predicates""" def and_predicates(*args, **kwargs): return predicate1(*args, **kwargs) and predicate2(*args, **kwargs) return and_predicates
a2586e3d9c9828f6af9fdf1300db601c1018b290
695,303
import unittest def expected_failure_if(expect): """ Unit test decorator to expect failure under conditions. @param expect: Flag to check if failure is expected @type expect: bool """ if expect: return unittest.expectedFailure else: return lambda orig: orig
4cf083167e44328811a8df1bb9b8fc55dd741661
695,304
def getDist(ind1,ind2,distMat): """ distMat is a distance matrix. distMat[i,j] == distance from i to j """ return distMat[ind1,ind2]
c9303fd806cd765295f437ee533952692f89702c
695,305
def sleep_onset_latency(predictions): """ Calculates sleep onset latency on an array of sleep/wake predictions in one minute epochs. This corresponds to the total number of minutes awake before the first sleep period. Parameters ---------- predictions : array-like Binary sleep/wake predictions. Awake encoded as 1 and sleep as 0. Returns ------- sol : int Total number of minutes spent awake before the first sleep period. """ first_sleep_epoch = predictions.argmin() sol = predictions[0:first_sleep_epoch].sum() return int(sol)
6595aba1d22d9555c8998c6bb940045951b2503c
695,306
def initialize_LIP_dict(LIP_feature_collection): """ Initialize the dictionary which contains the LIP fraction remaining for all LIPs. Parameters ---------- LIP_feature_collection : feature collection feature collection of LIPs Returns ------- LIP_fracs : dictionary with keys = LIP Ids, values = LIP fraction remaining """ # get the unique ID associated with each LIP geometry LIP_Ids = [] for feature in LIP_feature_collection: LIP_Id = feature.get_feature_id().get_string() LIP_Ids.append(LIP_Id) # create a dictionary: key = LIP Id, value = LIP fraction remaining ones = [1]*len(LIP_Ids) LIP_fracs = dict(zip(LIP_Ids, ones)) return LIP_fracs
e105b84781105599bc92a6e1eece9b4f8ef2e4e9
695,307
def version_tuple_to_str(version): """Join version tuple to string.""" return '.'.join(map(str, version))
2567dd8481fe9dc6b1fc71cb7669aed204aaec9a
695,308
def _pat_mergeable(p1, p2): """ Compare two *AbstractionPattern* instances for equality regarding an interpretation merging operation. Evidence and hypothesis comparison is assumed to be positive, so only the automata and the initial and final states are compared. """ if p1 is None or p2 is None: return p1 is p2 return p1.automata is p2.automata and p1.istate == p2.istate and p1.fstate == p2.fstate
f0c418fd63784e5e6ea8cf02ac17ec315eba809d
695,309
import torch def sample_pdf_2(bins, weights, num_samples, det=False): """sample_pdf function from another concurrent pytorch implementation by yenchenlin (https://github.com/yenchenlin/nerf-pytorch). """ weights = weights + 1e-5 pdf = weights / torch.sum(weights, dim=-1, keepdim=True) cdf = torch.cumsum(pdf, dim=-1) cdf = torch.cat([torch.zeros_like(cdf[..., :1]), cdf], dim=-1) # (batchsize, len(bins)) # Take uniform samples if det: u = torch.linspace(0.0, 1.0, steps=num_samples, dtype=weights.dtype, device=weights.device) u = u.expand(list(cdf.shape[:-1]) + [num_samples]) else: u = torch.rand( list(cdf.shape[:-1]) + [num_samples], dtype=weights.dtype, device=weights.device, ) # Invert CDF u = u.contiguous() cdf = cdf.contiguous() inds = torch.searchsorted(cdf, u, right=True) below = torch.max(torch.zeros_like(inds - 1), inds - 1) above = torch.min((cdf.shape[-1] - 1) * torch.ones_like(inds), inds) inds_g = torch.stack((below, above), dim=-1) # (batchsize, num_samples, 2) matched_shape = (inds_g.shape[0], inds_g.shape[1], cdf.shape[-1]) cdf_g = torch.gather(cdf.unsqueeze(1).expand(matched_shape), 2, inds_g) bins_g = torch.gather(bins.unsqueeze(1).expand(matched_shape), 2, inds_g) denom = cdf_g[..., 1] - cdf_g[..., 0] denom = torch.where(denom < 1e-5, torch.ones_like(denom), denom) t = (u - cdf_g[..., 0]) / denom samples = bins_g[..., 0] + t * (bins_g[..., 1] - bins_g[..., 0]) return samples
e20ba8d000876278d28b732fd6bcd190cd649796
695,310
def Code_f(val): """ :param val: The value of this Code """ return '\\begin{lstlisting}\n' + val + '\n\\end{lstlisting}\n'
594a913361853ccd64523adb79ca0c933c07d807
695,311
import numpy as np def rms(x): """Return the root mean square of x, instead of doing matplotlib.mlab.rms_flat. """ return np.sqrt(np.mean(x*x))
e09ae59241ac41b58d6c1273a344b622ac05d092
695,312
def isAttrMirrored(attr, mirrorAxis): """ """ if mirrorAxis == [-1, 1, 1]: if attr == 'translateX' or attr == 'rotateY' or attr == 'rotateZ': return True elif mirrorAxis == [1, -1, 1]: if attr == 'translateY' or attr == 'rotateX' or attr == 'rotateZ': return True elif mirrorAxis == [1, 1, -1]: if attr == 'translateZ' or attr == 'rotateX' or attr == 'rotateY': return True elif mirrorAxis == [-1, -1, -1]: if attr == 'translateX' or attr == 'translateY' or attr == 'translateZ': return True return False
560f20fd5e2110ce31fb9dddbf72fc1a8f19ca65
695,313
def pytest_report_header(config): """ return a string in test report header """ return "Hey this are the tests"
e64bf912f78e8524d99126569d0423c821158498
695,314
def firmValuationPE(annual_revenue: float, p_e_ratio: float) -> float: """Valuation by price-to-earnings (P/E) ratio. Earnings are sales minus costs. Some people prefer P/E over P/S as it captures profitability. However, companies plowing extra money into growth will have poor P/E. Eg Amazon. """ return annual_revenue * p_e_ratio
55ae91c6e37468de8963b0d8c5754bb21aab81e0
695,315
def reportNumber(n, options, field=None): """ Given n an integer, report back the correct format as string. """ if field is not None: return "%*g" % (field, n) else: return "%g" % n
cdaa1e78a1031a39c37b2e72f771b0667819b172
695,316
def is_pic(img_name): """判断是否是图片 参数: img_name (str): 图片路径 返回: flag (bool): 判断值。 """ valid_suffix = ['JPEG', 'jpeg', 'JPG', 'jpg', 'BMP', 'bmp', 'PNG', 'png'] suffix = img_name.split('.')[-1] flag = True if suffix not in valid_suffix: flag = False return flag
7c809234a486d9d9effe97ecdf0bb3cc561af9fc
695,317
def valid_moves(board): """Returns a list of all valid moves in the position""" moves = [] # Go through each space, if it's not X or O, append it for space in board: if space != "X" and space != "O": moves.append(space) # Return moves at the end return moves
1447314b16ab611ab796fcbb1e5582b98d6ae88e
695,318
def estimate_kpar(kpnts): """ Estimte KPAR which is the number of parallel process among k-points. """ return max(kpnts)
659dcde496f08de761986b903e6497e76f22ecfc
695,319
def reverse_round(key): """Reverses the first operations of the round (before mixcolumn)""" state = [(key[i // 2] >> (4 * (i % 2))) & 0xf for i in range(32)] # reverse shiftrows old_state = state.copy() for i in range(4): for j in range(8): # state[8 * i + j] = old_state[8 * i + (j + 4 + i) % 8] state[8 * i + ((j + 4 + i) % 8)] = old_state[8 * i + j] final_state = [state[2 * i] << 4 | state[2 * i + 1] for i in range(16)] return bytes(final_state)
3b9e3246ff3cbb67016b7e4eb446e0dc4a38eadc
695,320
import logging def filter_storage_size_num(size_str): """Convert human-readable size string to a string convertible to float""" # pattern: '^[1-9][\d\.]*[MGT]B?$', multiplier=1000 (not KiB) if size_str.endswith('B'): size_str = size_str[:-1] try: size_num = 1000000 for multiplier in ['M', 'G', 'T']: if size_str.endswith(multiplier): return '{:.2f}'.format(size_num * float(size_str[:-1])) size_num = size_num * 1000 return '{:.2f}'.format(float(size_str)) except ValueError as ex: logging.error(size_str + " is not a valid size string") raise
145680ec35ce14a17721250156fcb6c368558c28
695,321
def welcome(): """List all available api routes.""" return ( f"Welcome to the Hawaii Climate 'Surfs Up' Home Page<br/>" f"Available Routes:<br/>" f"<br/>" f"List precipitation data with dates:<br/>" f"/api/v1.0/precipitation<br/>" f"<br/>" f"List precipitation stations names and location information:<br/>" f"/api/v1.0/stations<br/>" f"<br/>" f"List of temperature observations a year from the last data point:<br/>" f"/api/v1.0/tobs<br/>" f"<br/>" f"<br/>" f"DIRECTIONS (for last two Routes)<br/>" f"At browser address line, overtype <start> or <start>/<end> with date.<br/>" f"Use the following date format: 'yyyy-mm-dd' or 'yyyy-mm-dd'/'yyyy-mm-dd'.<br/>" f"<br/>" f"Diplay Minimum, Maximum, and Average temperatures from a given start date:<br/>" f"/api/v1.0/min_max_avg/&lt;start&gt;<br/>" f"<br/>" f"Diplay Minimum, Maximum, and Average temperatures from a given start and end date:<br/>" f"/api/v1.0/min_max_avg/&lt;start&gt;/&lt;end&gt;<br/>" f"<br/>" f"<br/>" )
43c9ec89fcaaaa4d14205da694271dc4a50f7b31
695,323
import os def save_and_view_map(f_map, output_path): """Writes Folium Map to an output file and prints the path to the terminal. Saves the specified Folium Map to the specified location. File is saved as an .html file. The user can then open the path in any browser to display it. Args: f_map: A Folium Map object to be written. output_path: A string path to the location where the Folium Map should be saved. Include file type ending (.html). Returns: 1 when done writing and printing the Folium map .html file. """ if output_path[-5:] == '.html': pass else: raise ValueError('output file must be an html') f_map.save(f"{output_path}") current_directory = os.getcwd() print("Map saved, please copy this file path into any browser: "+\ "file://"+ current_directory+'/'+\ f"{output_path}") return 1
663f1f55d284c6049138465eff9848a47de2af7a
695,324
def _format_vendor_id(vendor_id: str) -> str: """Strips vendor name from vendor_id field. Example: >>> _format_vendor_id("0x1234 (Nice Vendor Inc.)") # "0x1234" """ return vendor_id.split(maxsplit=1)[0]
e181330ca164ea4fdbf6ea2e57e20b707351dcfc
695,325
def search_result(doc_info, index): """ The search_results function displays the results of the query performed by the user into the search engine Args: doc_info (list): A list containing document information index (int): An integeter containing the ranking Returns: A string containing one result from the search """ return f""" <div style="font-size: 125%; display: inline-flex; flex-direction: row"> <div> {index + 1}. &emsp; </div> <div> <div> File Path: {doc_info[0]} </div> <div> URL: <a href="{doc_info[1]}" target="_blank">{doc_info[1]}</a> </div> </div> </div> <br> <br> """
56378bc310343ed104a3e507e5295952b2cbc62f
695,326
import argparse def _parse_args(): """ Parses the commandline arguments for running an expirement trail/series of trials Args: Returns: args: the parsed arguments in a new namespace """ """ Add arguments below. Example format: parser.add_argument('-cp', '--continue_training_policy', action='store_true', help='A help message' ) parser.add_argument('--q1_checkpoint_filename', type=str, default='./q1_checkpoint.pth', help="Name of file to save and load" ) """ parser = argparse.ArgumentParser( description="Arguments for Experience Replay project" ) # Model related arguments here parser.add_argument("--env_name", type=str, default="CartPole-v1") parser.add_argument( "--no_alpha_tune", dest="alph_tune", default=True, action="store_false" ) parser.add_argument("--eval_freq", type=int, default=10000) # Training related arguments here parser.add_argument("--rand_seed", type=int, default=1) parser.add_argument("--log_dir", type=str, default=None) parser.add_argument("-t", "--time_limit", type=int, default=10_000) parser.add_argument("-s", "--steps", type=int, default=1_000_000) parser.add_argument("-n", "--num_episodes", type=int, default=1000) parser.add_argument("-lr", "--learning_rate", type=float, default=1e-3) parser.add_argument("-c", "--continue_training", action="store_true") parser.add_argument("--batch_size", type=int, default=256) parser.add_argument("--buff_size", type=int, default=100000) args = parser.parse_args() return args
e83b1688883cad61769866ec3cf7ec161b1bcc0f
695,327
def user_mention(user_id: str) -> str: """ Return a mention of a user that can be sent over a Discord message. This is a convenience method for cases where the user_id is known but you don't have or need the full discord.User or discord.Member object. """ if not user_id.isnumeric(): raise ValueError("Discord ID must be numeric") return '<@{}>'.format(user_id)
bcd093e3d49db48dd32765b477f4f7438230b4fc
695,328
def read_players_positions(positions_file): """ Read players positions """ # Show info to CLI print("[Players Positions] Reading...") # Initialize dictionaries dict_timestamp = {} dict_game_frame = {} # Used to normalize data inside the same game_frame (each of 23 entry will have same timestamp) counter = 0 last_timestamp = 0 # Read file with open(positions_file, "r") as input_file: # For each line lines = input_file.readlines() for line in lines: # Parse and extract timestamp, game_frame, player, x, y = line.rstrip().split(' ') # Normalization phase inside 23 elements (same timestamp). Used to avoid missing BB in a frame. if (counter == 0): last_timestamp = timestamp else: timestamp = last_timestamp if (counter == 22): counter = -1 counter = counter + 1 # Create a list (if empty) if dict_timestamp.get(timestamp) is None: dict_timestamp[timestamp] = [] # Fill the list dict_timestamp[timestamp].append({ 'game_frame': game_frame, 'player': player, 'x': x, 'y': y }) # Create a list (if empty) if dict_game_frame.get(game_frame) is None: dict_game_frame[game_frame] = [] # Fill the list dict_game_frame[game_frame].append({ 'timestamp': timestamp }) # Show info to CLI print("[Players Positions] Loaded.", end='\r\n\r\n') return dict_game_frame, dict_timestamp
6a729c683849935d43d77631b17952a4eea6d375
695,329
def is_array(type_ir): """Returns true if type_ir is an array type.""" return type_ir.HasField("array_type")
ac51de921484113d56923cea74324470706883b7
695,330
def parse_input(event): """Parses all input required from step function.""" input_request = event["input"] return { "batch_id": input_request["transformation_step_output"]["batch_id"], "output_sns_arn": input_request.get("destinationSnsArn"), "execution_id": event["execution_id"], }
ed85b61e7c9e68dbbee910d7d6c1eaf342255aa0
695,331
import itertools def flatten(l): """ Function to flatten a list. """ return list(itertools.chain.from_iterable(l))
3db376d039ca5b51ac10ea4ce673bd72b04b4b2b
695,332
def compute_wap(inner, outer): """Computes the wall area percentage (WAP) from the inner and outer airway measurements.""" return (outer - inner) / outer * 100
379e0dcd1729e34e39295988c94640378d128103
695,333
from typing import Union from pathlib import Path import os def build_path(path: Union[Path, str], path_is_absolute: bool = False) -> Path: """ This function build Path instance from arguments and returns it. :param path: path :param path_is_absolute: if True, use path as absolute :return: Path instance """ if not path_is_absolute: return Path(os.getcwd()) / path if isinstance(path, str): return Path(path) return path
92aaf148411a915663d3c1b8b3183d4edae7d243
695,334
def arrival_1(): """ Packets arrive with a constant interval of 1.5 seconds. """ return 1.5
2c7a6cee521ae18ce06f07b8b694c322fadd55ea
695,335
def pkcs5_unpad(data): """Do PKCS5 unpadding to data and return """ data_bytes = bytes(data) return data_bytes[0:-data_bytes[-1]]
7058f51e456c8dbe8b4c9c4cd9e26bc7f27efaf6
695,336
from typing import Counter def mostCommon(armies): """ Returns the most common army """ common = [a for a,c in Counter(armies).most_common(2) if not a.isDefeated()] if len(common): return str(common.pop()) else: return ""
596e1b772a7fbb1e048e3c17c0e0a08a0d0f2b2d
695,337
import os def clean_path(path): """Get a file path for the FLAC file from a FLACCue path. Notes ----- Files accessed through FLACCue will still read normally. We just need to trim off the song times. """ if('.flaccuesplit.' in path): splits = path.split('.flaccuesplit.') times, extension = os.path.splitext(splits[1]) try: # The extension should not parse as an int nor split into ints # separated by :. If it does, we have no extension. int(extension.split(':')[0]) extension = '' except ValueError: pass path = splits[0] + extension return path
1b21cf166c4ff072050538a4bb391a0a48b5c456
695,338
import re def remove_comments(string): """ Removes /**/ and // comments from a string (used with the control script). From http://stackoverflow.com/questions/2319019/using-regex-to-remove-comments-from-source-files """ string = re.sub(re.compile("/\*.*?\*/",re.DOTALL ) ,"" ,string) # remove all occurance streamed comments (/*COMMENT */) from string string = re.sub(re.compile("//.*?\n" ) ,"" ,string) # remove all occurance singleline comments (//COMMENT\n ) from string return string
3f04a554de57517951d65396e998e29dd18b22b8
695,339
def dash_case(name): """ Convert a camel case string to dash case. Example: >>> dash_case('SomeName') 'some-name' """ letters = [] for c in name: if c.isupper() and letters and letters[-1] != "-": letters.append("-" + c.lower()) else: letters.append(c.lower()) return "".join(letters)
5fbe7aa6f3e0b063a572e57b4b3000bb7835355f
695,340
import string def tamper(payload, **kwargs): """ Converts all characters in a given payload (not processing already encoded) Reference: https://www.acunetix.com/vulnerabilities/unicode-transformation-issues/ >>> tamper('SELECT FIELD FROM TABLE WHERE 2>1') 'SELECT%C0%AAFIELD%C0%AAFROM%C0%AATABLE%C0%AAWHERE%C0%AA2%C0%BE1' """ retVal = payload if payload: retVal = "" i = 0 while i < len(payload): if payload[i] == '%' and (i < len(payload) - 2) and payload[i + 1:i + 2] in string.hexdigits and payload[i + 2:i + 3] in string.hexdigits: retVal += payload[i:i + 3] i += 3 else: if payload[i] not in (string.ascii_letters + string.digits): retVal += "%%C0%%%.2X" % (0x8A | ord(payload[i])) else: retVal += payload[i] i += 1 return retVal
e2215bc0e23a8deab52d211a72ca3c3efef968c8
695,341
from datetime import datetime def get_formatted_updated_date(str_date): """ converting 2015-08-21T13:11:39.335Z string date to datetime """ return datetime.strptime(str_date, "%Y-%m-%dT%H:%M:%S.%fZ")
b0d20010a1748d470d23452c959f18aa124d9ddb
695,342
import numpy def triangle_bump(t_array, t_start, t_stop, peak_value): """ Creates a symmetrical triangular bump starting at t_start and ending t_stop with peak height of peak_values. Arguments: t_array = array of time points t_start = time at which bump starts t_stop = time at which bump stops peak_value = the peak height of the bump. Returns: value_array = array of function values. """ dt = t_array[1] - t_array[0] t_peak = 0.5*(t_start + t_stop) rate = peak_value/(t_peak - t_start) value_list = [] value = 0.0 for t in t_array: value_list.append(value) if t > t_start and t <= t_peak: value += rate*dt elif t > t_peak and t <= t_stop: value -= rate*dt else: value = 0.0 value_array = numpy.array(value_list) return value_array
2720f7315757c4ce032907fde02b0ae3bc8ac6f8
695,343
def is_on_filesystem(entry, filesystem): """ Check if a certain element is on a certain type of filesystem :param entry: a dfvfs PathSpec object :param filesystem: dfvfs type indicator string :return: True if the specified filesystem is somewhere in the path-chain of the element """ path = entry while True: if path.type_indicator == filesystem: return True if path.parent: path = path.parent else: return False
d402edf7629c05be4308e04965b9b54e1c9a3272
695,344
import struct def ReadCollapsedSequences(trace, cache_filename): """ Read the collapsed sequences from file if they exist. @params cache_filename: the file that conatains the cached data. """ # get the dataset for this trace dataset = trace.dataset # first read the node mapping nnodes = len(trace.nodes) node_mapping = {} reduced_nodes_to_nodes = {} node_labels = [] node_label_names = [] edges = [] with open(cache_filename, 'rb') as fd: # read the forward node mapping for iv in range(nnodes): mapped_value, = struct.unpack('q', fd.read(8)) node_mapping[iv] = mapped_value # read the backwards mapping nreduced_nodes, = struct.unpack('q', fd.read(8)) for iv in range(nreduced_nodes): reduced_nodes_to_nodes[iv] = [] no_nodes_reduced, = struct.unpack('q', fd.read(8)) for ip in range(no_nodes_reduced): node, = struct.unpack('q', fd.read(8)) reduced_nodes_to_nodes[iv].append(node) for iv in range(nreduced_nodes): label, = struct.unpack('q', fd.read(8)) node_labels.append(label) # maximum bytes depends on the dataset if dataset == 'openstack': max_bytes = 196 elif dataset == 'xtrace': max_bytes = 64 else: assert (False) # read the node label names for iv in range(nreduced_nodes): node_label_name_bytes, = struct.unpack('%ds' % max_bytes, fd.read(max_bytes)) node_label_name = node_label_name_bytes.decode().strip('\0') node_label_names.append(node_label_name) # read the edges nedges, = struct.unpack('q', fd.read(8)) for _ in range(nedges): source_index, destination_index, = struct.unpack('qq', fd.read(16)) edges.append((source_index, destination_index)) return node_mapping, reduced_nodes_to_nodes, node_labels, node_label_names, edges
9ce5e5087d2daa40d90c8c3549b8cab7277ad1ee
695,345
def ComputeSemiMinorAxis( efit, smarange=None ): """For Bender-style ellipse-fits only! Re-computes semi-minor axis b, based on ellipticity and semi-major axis. Optionally, the range of semi-major axes for which b is recomputed can be specified via smarange (only semi-major axis values >= smarange[0] and <= smarange[1] will be affected). """ if "intens" in efit.keys(): print("*** Ellipse fit appears to be IRAF-style! ***") return None a = efit['a'] ell = efit['eps'] nRows = len(a) if smarange is not None: amin = smarange[0] amax = smarange[1] iGood = [ i for i in range(nRows) if a[i] >= amin and a[i] <= amax ] else: iGood = range(nRows) for i in iGood: efit['b'][i] = a[i] * (1.0 - ell[i])
1465175bd83499e1369c90cd7d2570e117abe941
695,346
def fake_join(a, *p): """ Lifted from the POSIX implementation of os.path, for testing purposes. """ path = a for b in p: if b.startswith('/'): path = b elif path == '' or path.endswith('/'): path += b else: path += '/' + b return path
824b9cef3585f6bf9985931f9fcf3a0315acd614
695,348
def remove_return(seq): """ Remove return characters args: seq: String output: seq: String """ return seq.replace("\n", "").replace("\r", "").replace("\t", "")
6179c18d0c1719538abd9dc7f753c627db2e02fa
695,349
import re def extract_value(content, key, is_int_value=True, delimiter='=', throw_not_found=False, default_value=-1): """ Extracts a key from content, value can be an integer or string Args: content (str): the full given text content key (str): the wanted key to be searched in the given content is_int_value (bool): determines if the key's value is int or string, which effects the search and parsing of the value delimiter (str): the separator between the key and it's value to be splitted by throw_not_found (bool): a flag determines if to raise a LookupError default_value (any): the value returned upon not finding the key in the content while not throwing an error Raises: LookupError: throw_not_found is true and key could not be found ValueError: is_int_value is true and the value is not a positive integer or an error while parsing the value Returns: (int|str): the extracted value """ if is_int_value: match = re.search(fr'{key}=\d+', content) else: match = re.search(fr'{key}=\S+', content) if not match: if throw_not_found: raise LookupError(f'"{key}=" is not present in the given content') else: return default_value value = match.group(0).split(delimiter)[1] try: return int(value) if is_int_value else str(value) except ValueError: raise ValueError('an error accourd while extraction.')
29386c9995d042f7c36118ca80cb4f2f335accfc
695,350
def line_has_sep(line): """Line has a `-` before a `=` """ a = line.find('-') # not header b = line.find('=') # header if a == -1: # No `-` return False elif b == -1: # No `=`, but `-` return True else: return a < b
61b9a8fa77dda3197abf1765cf50801f90d82251
695,351
import collections def get_history_naming_package_numbers(data, commit, date): """Get number of packages for each naming policy violation. """ result = [] progress = collections.Counter( 'Misnamed Subpackage' if package['is_misnamed'] else 'Blocked' if package['blocked_requires'] else 'Ambiguous Requires' if package['unversioned_requires'] else 'OK' for package in data['packages'].values() ) for status_name in 'Misnamed Subpackage', 'Ambiguous Requires', 'Blocked': row = { 'commit': commit, 'date': date, 'status': status_name, 'num_packages': progress[status_name], } result.append(row) return result
a50e576341100993b28e8ce7b395616ac6082196
695,353
import torch def getOptimizer(model, lr, weight_decay): """Optimizer for training""" return torch.optim.Adadelta(model.parameters(), lr=lr, weight_decay=weight_decay)
20b878320692b44aa512d47f0b4d736acb1f86ea
695,354
def find_present_and_absent_index(src_str, keyphrase_str_list, use_name_variations=False): """ :param src_str: stemmed word list of source text :param keyphrase_str_list: stemmed list of word list :return: """ num_keyphrases = len(keyphrase_str_list) #is_present = np.zeros(num_keyphrases, dtype=bool) present_indices = [] absent_indices = [] for i, v in enumerate(keyphrase_str_list): if use_name_variations: keyphrase_word_list = v[0] else: keyphrase_word_list = v joined_keyphrase_str = ' '.join(keyphrase_word_list) if joined_keyphrase_str.strip() == "": # if the keyphrase is an empty string #is_present[i] = False absent_indices.append(i) else: # check if it appears in source text match = False for src_start_idx in range(len(src_str) - len(keyphrase_word_list) + 1): match = True for keyphrase_i, keyphrase_w in enumerate(keyphrase_word_list): src_w = src_str[src_start_idx + keyphrase_i] if src_w != keyphrase_w: match = False break if match: break if match: #is_present[i] = True present_indices.append(i) else: #is_present[i] = False absent_indices.append(i) return present_indices, absent_indices
4d59d27da68def7af02ea9e5d658330a6e32525b
695,356
import warnings def push(root: dict, item: str) -> dict: """ Inplace 'pushing' the given item into the root dictionary into a key consisting of an index in between 0 to 1000. Args: root(dict): Root dictionary to push `item` in. item(any): Item to be put into the dictionary. Returns: dict: `root` """ warnings.warn( "The method `push` will be deprecated in future releases.", DeprecationWarning ) index = 0 while index in root: index += 1 if index > 1000: raise Exception("To many items in the dictionary for appending.") root[index] = item return root
b6a24d2330020c4c7b71fb17a852f3e3e61a8a70
695,357
def deselect(elements=None): """ Deselects the given elements. If no elements are passed then all elements are deselected. :param elements: List of elements to deselect. If none are given then all elements are deselected. :type elements: list(Element, Element, ...) :return: None """ return None
36f730f5cf95d976fa0b49d7be62a54789781d05
695,358
import six import json def json_decode(data): """ Decodes the given JSON as primitives """ if isinstance(data, six.binary_type): data = data.decode('utf-8') return json.loads(data)
8a1593790fefbffa016168a23a30300ba1751dcd
695,359
import os def get_sub_list(file_list, dirname: str): """ 去除父路径,获得子路径:file_list = file_list - dirname :param file_list: :param parent: :return: """ dirname = dirname[:-len(os.sep)] if dirname.endswith(os.sep) else dirname for i, f in enumerate(file_list): if dirname in f: file_list[i] = f[len(dirname) + 1:] return file_list
e27195807cd264852aed286f16bd5894655d9790
695,360
def _w_long(x): """Convert a 32-bit integer to little-endian.""" return (int(x) & 0xFFFFFFFF).to_bytes(4, 'little')
4cd2c9b3e57f8c4dbd100ef00af6cc51480dc683
695,361
def Split_Info(info): """Splits necessary information out from the info vcf column Parameters ---------- info : Series Info column from a vcf as a series Returns ------- dict A dict of necessary fields and their values from the vcf """ fields = ['QD=', 'MQ=', 'MQRankSum=', 'FS=', 'ReadPosRankSum=', 'SOR='] # split out all necessary fields from info columns parts = dict(part.split('=') for part in info.split(';') if any(field in part for field in fields)) return parts
e58d2dad51d34a7644a7d5bf307449194aec9ca3
695,362
from typing import List import random def injectFaults(mat: List[List[int]], error_map: List[List[float]], b: int) -> int: """Inject faults into an MLC matrix Args: mat (list): Matrix to inject faults into error_map (dict): Error map dictionary b (int): Bits per cell Returns: int: Number of injected errors in the matrix """ err_count = 0 for i in range(len(mat)): for j in range(len(mat[0])): val = mat[i][j] new_val = val err_prob_l = error_map[val][0] err_prob_h = error_map[val][1] rand = random.random() if rand < err_prob_l: new_val -= 1 elif err_prob_l < rand < err_prob_l + err_prob_h: new_val += 1 if val != new_val: err_count += 1 mat[i][j] = new_val return err_count
b7c158c6bd5fe856115af13e652b513a2aadae5f
695,363
import time def get_download_file_name(node_type, node_name, slot): """Get file name.""" timestamp_carry = 100 file_name = "{}.{}.0.0.{}.output.{}.NONE.npy".format(node_type, node_name, round(time.time() * timestamp_carry), slot) return file_name
73859916094de59958626cdb9e5cea00403758ab
695,364
def add_dividers(row, divider, padding): """Add dividers and padding to a row of cells and return a string.""" div = ''.join([padding * ' ', divider, padding * ' ']) return div.join(row)
7cbe235ddf8c320cadfcc4b4a3f17a28c2aaac1c
695,365
import re def name2label(name, schema=None): """ Convert a column name to a Human Readable name. borrowed from old TG fastdata code """ # Create label from the name: # 1) Convert _ to Nothing # 2) Convert CamelCase to Camel Case # 3) Upcase first character of Each Word # Note: I *think* it would be thread-safe to # memoize this thing. if schema: if name.startswith(schema+'.'): name = '.'.join(name.split('.')[1:]) label = str(''.join([s.capitalize() for s in re.findall(r'([A-Z][a-z0-9]+|[a-z0-9]+|[A-Z0-9]+)', name)])) return label
7d939514acaabec18790380f13bb042344986508
695,366
def is_transpose_identity(perm): """ Tells if the permutation *perm* does nothing (itentity). :param perm: permutation :return: boolean """ return list(perm) == list(range(len(perm)))
09bc1fd0577297b1f9450c7f2f215197ae8ce3ee
695,367
import math def rta3(rts): """ RTA3 -- "Computational Cost Reduction for Real-Time Schedulability Tests Algorithms" http://ieeexplore.ieee.org/document/7404899/ """ wcrt = [0] * len(rts) a = [0] * len(rts) i = [0] * len(rts) schedulable = True flag = True for idx, task in enumerate(rts): a[idx] = task["C"] i[idx] = task["T"] t = rts[0]["C"] wcrt[0] = rts[0]["C"] for idx, task in enumerate(rts[1:], 1): t_mas = t + task["C"] while schedulable: t = t_mas for jdx, jtask in zip(range(len(rts[:idx]) - 1, -1, -1), reversed(rts[:idx])): if t_mas > i[jdx]: tmp = math.ceil(t_mas / jtask["T"]) a_tmp = tmp * jtask["C"] t_mas += (a_tmp - a[jdx]) if t_mas > task["D"]: schedulable = False break a[jdx] = a_tmp i[jdx] = tmp * jtask["T"] if t == t_mas: break wcrt[idx] = t if not schedulable: wcrt[idx] = 0 break return [schedulable, wcrt]
3b23678731ca4b9eeaf9076a346af724073f62de
695,368
def Get(x, start, end=None, step=None): """ iterable >> Get(start, end, step) Extract elements from iterable. Equivalent to slicing [start:end:step] but per element of the iterable. >>> from nutsflow import Collect >>> [(1, 2, 3), (4, 5, 6)] >> Get(1) >> Collect() [2, 5] >>> [(1, 2, 3), (4, 5, 6)] >> Get(0, 2) >> Collect() [(1, 2), (4, 5)] >>> [(1, 2, 3), (4, 5, 6)] >> Get(0, 3, 2) >> Collect() [(1, 3), (4, 6)] >>> [(1, 2, 3), (4, 5, 6)] >> Get(None) >> Collect() [(1, 2, 3), (4, 5, 6)] :param iterable iterable: Any iterable :param indexable x: Any indexable input :param int start: Start index for columns to extract from x If start = None, x is returned :param int end: End index (not inclusive) :param int step: Step index (same as slicing) :return: Extracted elements :rtype: object|list """ return x if start is None else x[slice(start, end, step) if end else start]
7d46a94aca43f8d2bea691df4f1c3d6498bd3339
695,369
from typing import List import difflib def compare_files(path1: str, path2: str) -> List[str]: """Returns the delta between two files using -, ?, + format excluding lines that are the same Args: path1 (str): Path to first file path2 (str): Path to second file Returns: List[str]: Delta between the two files """ diff = difflib.ndiff( open(path1).read().splitlines(), open(path2).read().splitlines() ) return [x for x in diff if x[0] in ["-", "+", "?"]]
2f8df203f3db161313ab2427f17e5db964f27f25
695,370
def append_custom_fields(json_blob): """Append x_mitre custom fields to Description.""" for obj in json_blob: if 'x_mitre_collections' in obj['attributes']: del obj['attributes']['x_mitre_collections'] return json_blob
e62a0d7c64391052921905f25d8442f0114e91ac
695,371
import argparse def get_argparser_ctor_args(): """ This method returns a dict containing the kwargs for constructing an argparse.ArgumentParser (either directly or as a subparser). """ return { 'prog': 'CodeChecker version', 'formatter_class': argparse.ArgumentDefaultsHelpFormatter, # Description is shown when the command's help is queried directly 'description': "Print the version of CodeChecker package that is " "being used.", # Help is shown when the "parent" CodeChecker command lists the # individual subcommands. 'help': "Print the version of CodeChecker package that is being used." }
5b3444d7e07e645de9ea892236272df02e3c2857
695,372
def _switch(mthread, local, sync, mproc): """ A construct needed so we can parametrize the executor fixture. This isn't straightforward since each executor needs to be initialized in slightly different ways. """ execs = dict(mthread=mthread, local=local, sync=sync, mproc=mproc) return lambda e: execs[e]
1ee45a6faf29d46e4ecc76fd50a5f92735d66107
695,373
import torch def logaddexp(x, y): """Compute log(e^x + e^y) element-wise in a numerically stable way. The arguments have to be of equal dimension. Arguments --------- x : :class:`torch:torch.Tensor` y : :class:`torch:torch.Tensor`""" maxes = torch.max(x, y) return torch.log(torch.exp(x - maxes) + torch.exp(y - maxes)) + maxes
e7160562fd0d3db6d7596577d42aee5116470051
695,374
def takes_router(func): """ Decorator that marks a function or class method to automatically receive a kwarg named `router`, referencing the :class:`mitogen.core.Router` active in the context in which the function is being invoked in. The decorator is only meaningful when the function is invoked via :data:`CALL_FUNCTION <mitogen.core.CALL_FUNCTION>`. When the function is invoked directly, `router` must still be passed to it explicitly. """ func.mitogen_takes_router = True return func
113fe6b719867ccb3841eda426e8ec1cc04f89c1
695,375
def word_count(text): """ The word-count of the given text. Goes through the string exactly once and has constant memory usage. Not super sophisticated though. """ if not text: return 0 count = 0 inside_word = False for char in text: if char.isspace(): inside_word = False elif not inside_word: count += 1 inside_word = True return count
18aef06c928ab9db30ba5c0c062dfa068180c99a
695,376
def find_homology(long_seq, short_seq): """ :param long_seq: str, the base DNA sequence user wants to search in with all upper case characters :param short_seq: str, the DNA sequence user wants to match with all upper case characters :return: the homology in long_seq """ homology = '' similarity = 0 for i in range(len(long_seq) - len(short_seq) + 1): # Search from [0] to [long_seq - short_seq] in long_seq new_homology = '' new_similarity = 0 for j in range(i, i + len(short_seq)): # Get the similarity of short_seq and the string from long_seq[i] to long_seq[i+len(short_seq)-1] if long_seq[j] == short_seq[j - i]: # The two DNA match and should add up similarity new_similarity += 1 else: pass if new_similarity > similarity: # The new DNA section in long_seq has more similarity and should replace the homology similarity = new_similarity for k in range(i, i + len(short_seq)): # Assign new homology new_homology += long_seq[k] homology = new_homology return homology
11dedba1584003174d9c8f8ba51adf99dde17f8c
695,378
def convert_units(arg, unit): """Checks compatibility and converts units using simtk.units package Args: arg (Quantity): quantity to be converted unit (Unit): Unit to be converted to Returns: arg (Quantity): Quantity scaled to the new unit """ conversionFactor = (arg.unit).conversion_factor_to(unit) arg = arg * conversionFactor return arg._value * unit
a5148d66247c41089bd01c11f7debfb955d67119
695,379
def omniscidb_to_ibis_dtype(omniscidb_dtype): """ Register OmniSciDB Data Types. Parameters ---------- omniscidb_dtype : OmniSciDBDataType Returns ------- ibis.expr.datatypes.DataType """ return omniscidb_dtype.to_ibis()
598eeb9ec7efc495f19035145054621b94b8f069
695,380
def _canonicalize_extension(ext): """Returns a transformed ext that has a uniform pattern. Specifically, if ``ext`` has a leading . then it is simply returned. If ``ext`` doesn't have a leading . then it is prepended. Exceptions to this are if ``ext`` is ``None`` or "". If ``ext`` is "" then "" is return. If ``ext`` is None then None is returned. :param ext: The extension to canonicalize. :returns: The canonicalized extension. """ if ext is None or ext == "" or ext.startswith("."): return ext return "." + ext
935e85fd9a0f1bcfadc68c2390446ecbc814a0bc
695,381
def min_laboratories(**kwargs): """ Максимальный вес груза, прибывшего в порт. На вход поступает название судна и общий вес груза Функция выводит название судна, которое прибыло с грузом наибольшего веса """ if kwargs: max_weight = max(kwargs.values()) for vessel, weight in kwargs.items(): if weight == max_weight: print( f"Наибольший вес груза - ({weight} тонн) " f" у судна: {vessel}" ) else: return None
9d429dd6ebb65bd71c31644d231c2a878d71ce3a
695,382
def get_dictionary_from_list(list_to_search, key, search_value): """ Find a dictionary in a list of dictionaries based on a certain key's value Parameters ---------- list_to_search: list List of dictionaries to search in key: str The key in the dictionaries to look for the value search_value: str The key's value you are looking to match Returns ------- Dictionary object we are searching for """ for the_dict in list_to_search: if the_dict[key] == search_value: return the_dict
9683ccaa9e0b0310aadc519f0067c921112f820c
695,383
def cname(s): """ make name s C-friendly """ for c in "-+.": s = s.replace(c, "_") return s.upper()
7ec03dd504f67a897d53280440d09454e5a0b601
695,384
import logging import sys def gen_logger(name, log_level): """Create a logger to be used between processes. :returns: Logging instance. """ logger = logging.getLogger(name) logger.setLevel(log_level) shandler: logging.StreamHandler = logging.StreamHandler(sys.stdout) fmt: str = '\033[1;32m%(levelname)-5s %(module)s:%(funcName)s():' fmt += '%(lineno)d %(asctime)s\033[0m| %(message)s' shandler.setFormatter(logging.Formatter(fmt)) logger.addHandler(shandler) return logger
cc3f5bef62ea5d9cf97f3ed03bfe4f44dddbe451
695,385
import subprocess import json def read_go_deps(main_packages, build_tags): """ read_go_deps returns a list of module dependencies in JSON format. Main modules are excluded; only dependencies are returned. Unlike `go list -m all`, this function excludes modules that are only required for running tests. """ go_list_args = ["go", "list", "-deps", "-json"] if build_tags: go_list_args.extend(["-tags", build_tags]) output = subprocess.check_output(go_list_args + main_packages).decode("utf-8") modules = {} decoder = json.JSONDecoder() while True: output = output.strip() if not output: break pkg, end = decoder.raw_decode(output) output = output[end:] if 'Standard' in pkg: continue module = pkg['Module'] if "Main" not in module: modules[module['Path']] = module return sorted(modules.values(), key=lambda module: module['Path'])
cb1f9ef5bf2f3d0090287521129d7e77fde0fc73
695,386
def list_stringify(inlist): """Recursively rebuilds a list - making all the members strings... Useful before writing out lists. Used by makelist.""" outlist = [] for item in inlist: if not isinstance(item, list): if not isinstance(item, str): thisitem = str(item) else: thisitem = item else: thisitem = list_stringify(item) outlist.append(thisitem) return outlist
92ae1e1e08d195b7af282d62986045d661663f90
695,387
def message_relative_index(messages, message_id): """ Searches the relative index of the given message's id in a channel's message history. The returned index is relative, because if the message with the given is not found, it should be at that specific index, if it would be inside of the respective channel's message history. Parameters ---------- messages : `deque` of ``Message`` The message history of a channel. message_id : `int` A messages's id to search. Returns ------- index : `int` """ bot = 0 top = len(messages) while True: if bot < top: half = (bot+top)>>1 if messages[half].id > message_id: bot = half+1 else: top = half continue break return bot
6b7b98de79bc18a531b15bacae326b242710d121
695,388
def _string(self) -> str: """Returns string representation of Path.as_posix()""" return self.as_posix()
3897e5bd1f689f706b51653f6fd9f588d6d3bb54
695,389
import time def get_dbus_proxy_obj_helper(request, get_environment, get_bus): """ Returns dbus proxy object connected to org.scarlett @ /org/scarlett/Listener # noqa ProxyObject implementing all the Interfaces exposed by the remote object. """ time.sleep(2) print( "[get_dbus_proxy_obj_helper] ('org.scarlett','/org/scarlett/Listener')" ) # noqa return get_bus.get("org.scarlett", object_path="/org/scarlett/Listener")
8384efc96da9efdd6113e7156574cfedf6f5626e
695,392
def gcd(*args): """Calculate the greatest common divisor (GCD) of the arguments.""" L = len(args) if L == 0: return 0 if L == 1: return args[0] if L == 2: a, b = args while b: a, b = b, a % b return a return gcd(gcd(args[0], args[1]), *args[2:])
0d425e9fb35e824bcd946d68dbac31f9d87d020f
695,393
import os def get_shapelists(root): """ :param root: a path/folder of shape files :return: a dictionary of files' names """ catfile = os.path.join(root, 'ShapeLists.txt') filetitles = {} with open(catfile, 'r') as f: i = 0; for line in f: filetitles[line.strip('\n')] = i i = i + 1 return filetitles
81837091add7a74b6911380ce8cefb76acd4fac1
695,394
import numpy def quaternion_multiply(quaternion1, quaternion0): """Return multiplication of two quaternions. >>> q = quaternion_multiply([4, 1, -2, 3], [8, -5, 6, 7]) >>> numpy.allclose(q, [28, -44, -14, 48]) True """ w0, x0, y0, z0 = quaternion0 w1, x1, y1, z1 = quaternion1 return numpy.array([-x1 * x0 - y1 * y0 - z1 * z0 + w1 * w0, x1 * w0 + y1 * z0 - z1 * y0 + w1 * x0, -x1 * z0 + y1 * w0 + z1 * x0 + w1 * y0, x1 * y0 - y1 * x0 + z1 * w0 + w1 * z0], dtype=numpy.float64)
3767aec912253437e26293bacc4223aa877f406f
695,395
def testTelescopes(k, telescopes): """ k: a telescope, baseline or triplet (str) eg: 'A0', 'A0G1', 'A0G1K0' etc. telescopes: single or list of telescopes (str) eg: 'G1', ['G1', 'A0'], etc. returns True if any telescope in k assumes all telescope names have same length! """ if type(telescopes)==str: telescopes = [telescopes] test = False for t in telescopes: test = test or (t in k) return test
bd3d4ef02c3fa059869ced2a17b9edb3b993d6b0
695,396
import torch def _squash(input_tensor, dim=2): """ Applies norm nonlinearity (squash) to a capsule layer. Args: input_tensor: Input tensor. Shape is [batch, num_channels, num_atoms] for a fully connected capsule layer or [batch, num_channels, num_atoms, height, width] or [batch, num_channels, num_atoms, height, width, depth] for a convolutional capsule layer. Returns: A tensor with same shape as input for output of this layer. """ epsilon = 1e-12 norm = torch.linalg.norm(input_tensor, dim=dim, keepdim=True) norm_squared = norm * norm return (input_tensor / (norm + epsilon)) * (norm_squared / (1 + norm_squared))
715b5819498d4c3a7c40c623fc9a40d2fcfb3773
695,397
def sort_key(key): """ Quick wrap of key for sorting usage: >>> list_ = [{"a": 1, "b": 3}, {"a": 2, "b": 0}] >>> sorted(list_, key=sort_key("b")) [{"a": 2, "b": 0}, {"a": 1, "b": 3}] """ return lambda i: i[key]
01657ca8b2865f061b530d58b706020f7f9825b1
695,398
def is_valid_variable_name(name: str) -> bool: """ All single-letter, uppercase variable names are reserved. """ if len(name) == 1 and name.upper() == name: return False if not name.isidentifier(): return False return True
27459ec71782ff7f0a0c9a8e4f29fc548c398f71
695,399
def crop_image(data, header, scale): """ Crop the image in the given HDUList around the centre point. If the original size is (W, H), the cropped size will be (scale * W, scale * H). """ if scale < 0 or scale > 1: raise ValueError("scale must be in [0, 1]") # idx, data = get_data_index(hdul) h, w = data.shape half_h = int(h * 0.5 * scale) half_w = int(w * 0.5 * scale) mid_y = int(h / 2) mid_x = int(w / 2) data = data[mid_y - half_h:mid_y + half_h, mid_x - half_w:mid_x + half_w] new_h, new_w = data.shape header['NAXIS1'] = new_w header['NAXIS2'] = new_h return data, header
e2145b3953565ec75437e5fb7df759bc6e15746c
695,400
import argparse def option_handler(): """Validates and parses script arguments. Returns: Namespace: Parsed arguments object. """ parser = argparse.ArgumentParser(description="Create json file of all packs dependencies.") parser.add_argument('-o', '--output_path', help="The full path to store created file", required=True) parser.add_argument('-i', '--id_set_path', help="The full path of id set", required=True) return parser.parse_args()
94c8bd195ef4fdc0111c7fd77efa14869bb539b4
695,401
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter def get_parser(): """Return the parser object for this script.""" parser = ArgumentParser(description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter) return parser
93c940b6048a4dc3e715094d683dd698a7f8c54e
695,402
import itertools def all_combinations(samples_list): """ returns all combinations in the list given """ iterable = itertools.chain.from_iterable(itertools.combinations(samples_list, r) for r in range(len(samples_list) + 1)) combinations = [] for i in iterable: combinations.append(list(i)) return combinations[1:]
0a1be4fb2cca86e8682acf05f51fc11ec5dcbbae
695,406
import os def symlink_copy(src, dst): """Makes an absolute symlink from src to dst. :param str src: The file to which the symlink will point. :param str dst: The symlink file to create. """ src = os.path.realpath(src) return os.symlink(src, dst)
eff73f65a27d8c34c89a5e645f24f6b2939c406c
695,407
def time_to_str(time_in_seconds): """ Takes a time in Seconds and converts it to a string displaying it in Years, Days, Hours, Minutes, Seconds. """ seconds = time_in_seconds minutes = None hours = None days = None years = None if seconds > 60: minutes = seconds // 60 seconds -= (seconds / 60) if minutes and minutes > 60: hours = minutes // 60 minutes %= 60 if hours and hours > 24: days = hours // 24 hours %= 24 if days and days > 365: years = days // 365 days %= 365 s = '' if years: s += '{:d} Year(s), '.format(int(years)) if days: s += '{:d} Day(s), '.format(int(days)) if hours: s += '{:d} Hour(s), '.format(int(hours)) if minutes: s += '{:d} Minute(s)'.format(int(minutes)) s += (', ' if hours else ' ') + 'and ' s += '{:0.3f} Second(s)'.format(seconds) return s
ac79955ae1745180719de7260ad1f3e4e3f7f1e3
695,408
import torch def kernelize_with_rbf(d, mu, gamma=1.0, eps=1e-6): """ Takes a distance matrix `d` of shape `[n_batch, n_particles, n_particles, 1]` and maps it onto a normalized radial basis function (RBF) kernel representation of shape `[n_batch, n_particles, n_particles, n_kernels]` via `d_{ij} -> f_{ij} where `f_{ij} = (g_{ij1}, ..., g_{ijK}) / sum_{k} g_{ijk} and `g_{ijk} = exp(-(d_{ij} - mu_{k})^{2} / gamma^{2})`. Parameters ---------- d: PyTorch tensor distance matrix of shape `[n_batch, n_particles, n_particles, 1]` mu: PyTorch tensor / scalar Means of RBF kernels. Either of shape `[1, 1, 1, n_kernels]` or scalar gamma: PyTorch tensor / scalar Bandwidth of RBF kernels. Either same shape as `mu` or scalar. Returns ------- rbfs: PyTorch tensor RBF representation of distance matrix of shape `[n_batch, n_particles, n_particles, n_kernels]` Examples -------- """ rbfs = torch.exp(-(d - mu).pow(2) / gamma ** 2) + eps rbfs = rbfs / rbfs.sum(dim=-1, keepdim=True) return rbfs
64286bad845eec3da58752f53f51a97083bee91b
695,409
import csv def readData(fileName): """ Remark Very bad implementation. Could be improved by using iter() and reading file in chunks. This method reads the data file into list of lists returns List<List<str>>, int """ allRows = [] with open(fileName) as csvfile: reader = csv.reader(csvfile, delimiter=' ') for row in reader: rowEnd = row.pop().rstrip(';') row.append(rowEnd) allRows.append(row) for x in range(1, len(allRows)): allRows[x] = [float(i) for i in allRows[x]] return allRows, len(allRows)
44b787dd69070e93a912d9def4d10e01fc9495ea
695,410
def run(df, dt): """General warpper function to claculate folds by scaffold Args: df (DataFrame): Dataframe with standardized smiles Returns: Tuple (DataFrame, DataFrame): a datframe with successfully calculated fold information, datafarem with failed molecules """ return dt.process_dataframe(df)
8d6f793f22511ba2ca2923de1c6dbfb1c986ba1d
695,411