content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import torch def reference(t): """ Step-function signal with `nstep` steps bounded in [`lo`, `hi`] """ lo, hi = -.8, .8 nstep = 4 tlen = 25+1 return torch.floor(t*nstep/tlen)*(hi-lo)/nstep + lo
8d623f76a6f4aa9731fecdd6201e0b73149cdd0e
695,635
def credibility(GLOB_CONC, b, DISC, C, A): """ Calculates the credibility vectors for a given boundary reference action using the global concordance vector and the discordance matrix for the same boundary reference action. :param GLOB_CONC: Dictionary containing the global concordances vectors for a given boundary reference action. :param b: Name of the boundary reference actions for which you want to calculate the credibility indices. :param DISC: Matrix of discordance of actions with regard to the same given boundary reference action. :param C: List containing the names of the criteria as strings. :param A: List containing the names of the actions as strings. :return Credibility: Dictionary containing two vectors corresponding to the credibility of the over ranking of the actions Si by the boundary reference action defined as input bk, and to the over ranking of de bk by the actions Si. The keys are '(ai,bk)' and '(bk,ai)'. """ Credibility = {} cred_1 = [] cred_2 = [] for i in range(len(A)): cr1 = 1 cr2 = 1 for j in range(len(C)): if DISC['d(ai,{})'.format(b)][i][j] > GLOB_CONC['C(ai,{})'.format(b)][i]: cr1 = cr1 * (1 - DISC['d(ai,{})'.format(b)][i][j]) / (1 - GLOB_CONC['C(ai,{})'.format(b)][i]) else: cr1 = cr1 * 1 if DISC['d({},ai)'.format(b)][i][j] > GLOB_CONC['C({},ai)'.format(b)][i]: cr2 = cr2 * (1 - DISC['d({},ai)'.format(b)][i][j]) / (1 - GLOB_CONC['C({},ai)'.format(b)][i]) else: cr2 = cr2 * 1 cred_1.append(cr1 * GLOB_CONC['C(ai,{})'.format(b)][i]) cred_2.append(cr2 * GLOB_CONC['C({},ai)'.format(b)][i]) Credibility['σ(ai,{})'.format(b)] = cred_1 Credibility['σ({},ai)'.format(b)] = cred_2 return Credibility
b0c85689bcf359b502d7489ee8b60e79266fe66f
695,636
import struct def defpacket(name: str, **kwargs): """Define a protocol packet.""" fmt: str = ">" + "".join(kwargs.values()) msg_type = namedtuple(name, kwargs.keys()) # type: ignore class _MessageType: length = struct.calcsize(fmt) @staticmethod def decode(data: bytes, allow_excessive=False): """Decode binary data as message.""" return msg_type._make( struct.unpack( fmt, data if not allow_excessive else data[0 : struct.calcsize(fmt)] ) ) @staticmethod def encode(*args) -> bytes: """Encode a message into binary data.""" return struct.pack(fmt, *args) @staticmethod def extend(ext_name, **ext_kwargs): """Extend a message type with additional fields.""" fields = {**kwargs, **ext_kwargs} return defpacket(ext_name, **fields) return _MessageType
1fa103a20b6b0f83476afc624b5d5c36d0afd8ba
695,637
def config_file(): """ Creates a ThreatPlaybook config file to store Authorization Token :return: Path of config file """ # directory = expanduser(path='~/.threatplaybook') config_file_path = 'config' open(config_file_path,'a').close() return config_file_path
ca75c6d9205b7ab31e616f9b03316719226c1067
695,638
def traffic_features_normalize(features): """ normalize temporal features. :param features: ndarray, features. :return: ndarray """ features = features.astype('float') # 0 1 2 3 4 5 6 7 8 # ['year', 'month', 'day', 'hour', 'minute', 'weekday', 'holiday', 'timepoint', 'timepoint'] # 2015 1~12 1~31 0~23 0~59 1~7 0 0~1439 0~1439 features[:, 0] /= 3000 features[:, 1] /= 13 features[:, 2] /= 32 features[:, 3] += 1 features[:, 3] /= 25 features[:, 4] += 1 features[:, 4] /= 61 features[:, 5] /= 8 features[:, 6] = 0.5 features[:, 7] += 1 features[:, 7] /= (24 * 60 + 1) features[:, 8] = features[:, 7] return features
7f26e84878dd68e1ee0e898fbee24d276c48afcd
695,640
def nvl(value, default=''): """Get specified value, or an empty string if value is empty :param object value: value to be checked :param object default: default value to be returned if value is *false* :return: input value, or *default* if value is *false* >>> from pyams_utils.unicode import nvl >>> nvl(None) '' >>> nvl('foo') 'foo' >>> nvl(False, 'bar') 'bar' """ return value or default
5334bc6e5a142e217bc40600bd493bd0c39b037c
695,641
def get_SI_land(y_ob, y_fg, i89, i150): """ compute scattering index over land """ SI_ob = y_ob[i89, :] - y_ob[i150, :] SI_fg = y_fg[i89, :] - y_fg[i150, :] return (SI_ob + SI_fg)/2
b9471e7e5ce284ca6527b147d65609a60ba534cf
695,642
def gl_get_project_membership(gl, user_id): """ Get a list of all projects where user_id is a member :param gl: GitLab instance :param user_id: user id (not username!) this field is (gitlab User object).id """ gl_projects = gl.projects.list(as_list=False) print(" found project manager ", gl_projects) projects=[] for proj in gl_projects: #proj = gl.projects.get(project.id) #print(proj) #print(proj._links['members']) members = proj.members.list()#gl.http_list(proj._links['members']) for member in members: #print(member) if member.id == user_id: print(" found member ", member.name, ' in project ', proj.id, proj.name, proj.web_url) projects.append((proj.id,proj.http_url_to_repo)) print(" Projects with user id = ", user_id) for project in projects: print(project) return projects
470cdc1e6c65fec54279902793bd32958e9e7ffb
695,643
def njit_time_point_thresh(wf_in, threshold, tp_max, tp_out): """ Find the last timepoint before tp_max that wf_in crosses a threshold wf_in: input waveform threshold: threshold to search for tp_max: time of a maximum of a waveform that the search starts at tp_out: final time that waveform is less than threshold """ for i in range(tp_max, 0, -1): if(wf_in[i]>threshold and wf_in[i-1]<threshold): tp_out = i return tp_out tp_out = 0 return tp_out
cebeecb4bc176bb72015b28aedaf6b0ddca8e1b6
695,644
import subprocess def last_commit_date(): """Returns the date of the last commit.""" return subprocess.check_output(['git', 'log', '-1', '--pretty=%ad', '--date=format:%d %b %H:%M', 'py/calendon']).decode().strip()
46fb33c2756cb049d5b7dd7bc4a7eccb70bd4b90
695,645
def sum(num1, num2): """ This function take the sum of two """ return num1 + num2
1a98f453da80148abd673f1bffe28406717e8ab9
695,646
def cli(ctx, category_id): """Get details of a given category. Output: details of the given category """ return ctx.gi.categories.show_category(category_id)
4b249650629a268c02f55ee19c2f1ba677a824c6
695,647
import os def root_dir(): """Retrives the root testing directory. Returns: The root testing directory. """ dirname = os.path.abspath(os.path.dirname(__file__)) return os.path.abspath(os.path.join(dirname, os.pardir))
910ce5391447b2232d270334e0b0f882ef876cf0
695,648
import json import base64 def custom_header(algorithm, media): """ Creates JWT header. Author: Lucas Antognoni Arguments: algorithm (str): Signing algorithm. media (str): Media type of JWS. Response: header (bytes): Base64 encoded header. """ header = { "alg": algorithm, "typ": media } str = json.dumps(header) enc = str.encode() return base64.urlsafe_b64encode(enc).replace(b'+', b'-').replace(b'/', b'_').replace(b'=', b'')
89810f8c310a903f484a30ccc315197e7c87e050
695,649
from datetime import datetime def get_year_days_num(): """ This function calculate number of days in year :return: number of days in year """ year = datetime.now().year if year % 4 == 0: if year % 100 == 0 and year % 400 != 0: return 365 return 366 return 365
5dc718b9a31d1acdc0e8e166420f72f4890034aa
695,650
from pathlib import Path import os def find_project_root(): """ We're using the project structure for tests, find the directory that is the project root, traversing up from the current working dir. :return: """ path = Path(os.getcwd()) while path is not '/': if (path / 'arvet').is_dir() and (path / 'arvet' / 'config').is_dir(): return path path = path.parent # Move up a folder level raise FileNotFoundError("Could not find the project root, run this test from somewhere within the project")
884bf3602dd1d2ec884b5f85d6c7dfd528557f21
695,651
def calc_julian_day(date): """ Calculates the Julian Day (days since 01 Jan) for a given date. Needed to download files from AWS server Parameters ------------ date : str Date to be converted to julian day. Format: YYYYMMDD, YYYYMMDDHH, or YYYYMMDDHHMM Returns ------------ julian_day : str Date converted to Julian Day """ # Format: YYYYMMDDHHMM # J F M A M J J A S O N D # 1 2 3 4 5 6 7 8 9 10 11 12 # 0 1 2 3 4 5 6 7 8 9 10 11 days_per_month = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] leap_years = [2016, 2020, 2024, 2028, 2032] year = date[:4] month = date[4:6] day = date[6:8] if (int(year) in leap_years): days_per_month[1] += 1 curr_month = int(month) - 2 julian_day = int(day) while (curr_month >= 0): julian_day += days_per_month[curr_month] curr_month -= 1 julian_day = str(julian_day) if (int(julian_day) < 100): julian_day = '0' + julian_day if (int(julian_day) < 10): julian_day = '0' + julian_day return julian_day
5e9675209b8f14c5881313a92580f4eb8e4f10b3
695,652
import torch def drop_path(x, drop_prob=0., training=False): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). We follow the implementation https://github.com/rwightman/pytorch-image-models/blob/a2727c1bf78ba0d7b5727f5f95e37fb7f8866b1f/timm/models/layers/drop.py # noqa: E501 """ if drop_prob == 0. or not training: return x keep_prob = 1 - drop_prob # handle tensors with different dimensions, not just 4D tensors. shape = (x.shape[0], ) + (1, ) * (x.ndim - 1) random_tensor = keep_prob + torch.rand( shape, dtype=x.dtype, device=x.device) output = x.div(keep_prob) * random_tensor.floor() return output
dc2cd12de51b20c5102004b31e3c893fc8476654
695,653
def _path_to_release(path): """Compatibility function, allows us to use release identifiers like "3.0" and "3.1" in the public API, and map these internally into storage path segments.""" if path == "v3": return "3.0" elif path.startswith("v3."): return path[1:] else: raise RuntimeError(f"Unexpected release path: {path!r}")
84ea8a22e3a1d82df249161fd76b12c370f70742
695,654
import math def pi(): """ Número pi. .. math:: \pi Returns: float: El número pi. """ return math.pi
bc3656f54d6da66b5191063f847f280ca6cd2d8c
695,655
import sys import re import random def non_aminoacid_converter(seq, amino_con): """ Converts Non amino acid characters from protein sequence """ if amino_con == '':sys.stderr.write("usage: Provide a choice for replacing non amino acid characters\n");sys.exit(-1) flag = 0 if len(amino_con)>1: if amino_con != 'random':flag = 1 else: if re.match(r'[^GPAVLIMCFYWHKRQNEDST]', amino_con, re.IGNORECASE):flag = 1 if flag == 1:sys.stderr.write("usage: Replace aminoacid chioce -"+ amino_con +"- failed. Pick a valid aminoacid single letter code/random\n");sys.exit(-1) amino_con = amino_con.upper() opt_seq = [] for i in range(len(seq)): if re.search(r'[^GPAVLIMCFYWHKRQNEDST]', seq[i], re.IGNORECASE): if amino_con == 'RANDOM': aminoacid = 'GPAVLIMCFYWHKRQNEDST' line = '' for single_amino in seq[i]: if re.match(r'[^GPAVLIMCFYWHKRQNEDST]', single_amino, re.IGNORECASE): r_amino = random.choice(aminoacid) line += r_amino else: single_amino = single_amino.upper() line += single_amino opt_seq.append(line) else: seq[i] = re.sub(r'[^GPAVLIMCFYWHKRQNEDST|gpavlimcfywhkrqnedst]', amino_con, seq[i]) seq[i] = seq[i].upper() opt_seq.append(seq[i]) else: seq[i] = seq[i].upper() opt_seq.append(seq[i]) return opt_seq
7bb5fba8c5e294ca4ef313055c5484c00ea05f4a
695,656
import tempfile import ssl def create_ssl_ctx(pem): """Create temporary file to store plain-text cert and create ssl context from it. This doesn't seem really secure since it requires using the filesystem, but I can't see another way as the low-level openssl api requires a file and will not accept a string or file like object. """ with tempfile.NamedTemporaryFile('wb') as temp_cert: temp_cert.write(pem) temp_cert.flush() del pem ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) ssl_ctx.load_cert_chain(temp_cert.name) return ssl_ctx
08f92925519a1876aae4ec31f10112dd74e85bb3
695,657
def bitwise_or(first_value, second_value): """Perform a bitwise or operation.""" return first_value | second_value
21cd3ba65fded59c152183445e59f1aa99b4c72e
695,659
import numpy as np def get_2D_cylindrical_map_in_cylindrical_coords( system, var, phimin, phimax, Nphi, Nr, rmin=None, rmax=None, time=0, z=0 ): """Create a 2D map of the eigenmode var stored in system.result[var]. This function assumes that the eigenmodes have the form f(r)*exp(i kz z + i m phi). It returns a map in the r-phi plane at a fixed value of z (default 0) """ # Create linear grid in phi dphi = (phimax - phimin) / (Nphi - 1) phig = (np.arange(Nphi)) * dphi # Create linear grid in r if rmin is None: rmin = system.grid.zmin dr = (rmax - rmin) / Nr rg = (0.5 + np.arange(Nr)) * dr + rmin # Contruct meshgrids rr, phiphi = np.meshgrid(rg, phig) # Azimuthal mode number m = system.m # Wavenumber kz = system.kz val = np.zeros((Nphi, Nr)) def return_real_ampl(f, phi, z): """""" return ( 2 * f * np.exp( 1j*kz*z + 1j*m*phi + system.result[system.eigenvalue]*time ) ).real # Interpolate onto r-grid if type(var) is str: yr = system.grid.interpolate(rg, system.result[var].real) yi = system.grid.interpolate(rg, system.result[var].imag) else: yr = system.grid.interpolate(rg, var.real) yi = system.grid.interpolate(rg, var.imag) y = yr + 1j * yi for i in range(Nphi): val[i, :] = return_real_ampl(y, phig[i], z) # This is how you would plot the map # xx = rr * np.cos(phiphi) # yy = rr * np.sin(phiphi) # plt.pcolormesh(xx, yy, val) # plt.axis('equal') # plt.show() return (rr, phiphi, val)
f73c9ae47f67c5e28a1bbdf497a5dc13e22255dc
695,660
def _check_decorated_arbitrary(to_check:list, parameters:list) ->dict: """ Checks and modifies arbitrary parameters Checks and modifies the arbitrary parameters (for 'args') of the decorated function. NOTE: Used error codes ---------------------------------------------- [A.0]: not expected param [A.1]: missing param [A.2]: wrong type [A.3]: restriction not met ---------------------------------------------- params: ------- to_check : list The instructions to check against (only relevant ones). parameters : list The parameters (arbitrary) to check (only relevant ones). returns: -------- dict Contains info about success, error msg & modified parameters. Format: { 'succes':bool, 'error':str, 'mod':list } """ # create result dict result = { 'success':False, 'error':"", 'mod':[] } # check if there are more parameters than # definitions if len(to_check) < len(parameters): # build error msg msg = "[A.0]: The are more arbitrary parameters than expected (found '{a}', expected '{e}').".format(a = str(len(parameters)), e = str(len(to_check))) result.update({'error':msg}) return result # iterate through each positional argument for i in range(len(to_check)): """ CAUTION: you do not have to use '.keys()' to iterate through a dict, but in this case we have a mixed type (list & dict). To be as distinct as possible we use .key() to indicate iteration through dictionaries. """ # was the position provided? try: parameters[i] except Exception as e: # [A.1]: is a default value given? if "default" in to_check[i].keys(): # add default result['mod'].append(to_check[i]['default']) continue # else return the error msg else: # create msg msg = "[A.1]: The '*args' parameter at position '{i}' is missing.".format(i = str(i)) result.update({'error':msg}) return result # [A.2]: correct type? try: assert(isinstance(parameters[i], to_check[i]['type'])) except AssertionError as e: # check if it is a filled tuple (--> Union) if isinstance(to_check[i]['type'], tuple): typeFormat = " | ".join([str(e.__name__) for e in to_check[i]['type']]) else: typeFormat = str(to_check[i]['type'].__name__) # create msg msg = "[A.2]: The '*args' parameter at position '{i}' needs to be a(n) {t}.".format(i = str(i), t = typeFormat) result.update({'error':msg}) return result # passed restriction? if "restriction" in to_check[i].keys(): # [A.3]: check if it meets the restriction try: value = parameters[i] assert(eval(to_check[i]['restriction'])) except AssertionError as e: # create msg msg = "[A.3]: The '*args' parameter at position '{i}' does not meet the restriction '{r}'. Value is currently '{v}'.".format(i = str(i), r = to_check[i]['restriction'], v = str(parameters[i])) result.update({'error':msg}) return result # all passed? --> add to new result['mod'].append(parameters[i]) # did all work? result['success'] = True return result
f2d374c2c77428ecce4662107f3c5f286dbf24af
695,661
import numpy def predict_linear_regression(fitted_model, array): """ It's possible the array doesn't have enough values Assumes array does not have a constant term """ if len(array) == fitted_model.params - 1: array = numpy.concatenate(([1],array)) return fitted_model.params @ array
600c328138fde2dc4028c08f7bc0ff966aea4655
695,662
def format_html_desc(node): """ Formats a XML node into a HTML format. """ desc_buf = '' for desc in node.childNodes: if desc.nodeName == "#text": if desc.data is not '': if desc.parentNode.nodeName != "p": desc_buf += "<p>" + desc.data + "</p>" else: desc_buf += desc.data else: desc_buf += "<" + desc.nodeName + ">" \ + format_html_desc(desc) \ + "</" + desc.nodeName +">" return desc_buf
dca4878a4284d83cb2934c54fbd430ef6698c153
695,663
import warnings def assert_warns(wtype, f, *args, **kwargs): """Check that a function call `f(*args, **kwargs)` raises a warning of type wtype. Returns the output of `f(*args, **kwargs)` unless there was no warning, in which case an AssertionError is raised. """ # Check that f() raises a warning, but not an error. with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") res = f(*args, **kwargs) assert len(w) >= 1, "Expected warning was not raised." assert issubclass(w[0].category, wtype), \ "Warning raised was the wrong type (got %s, expected %s)" % ( w[0].category, wtype) return res
aa27979e2c9bba268a676ed070296e52add7b210
695,664
def _SetMockCheckRunGitBehavior(mock_obj, command_output_map): """Sets the behavior of a mock function according to the given mapping.""" # Unused argument 'cwd', expected in args list but not needed. # pylint: disable=W0613 def FakeCheckRunGit(in_command, cwd=None): for command, output in command_output_map: if command == in_command: return output mock_obj.side_effect = FakeCheckRunGit
a05a8f890a242f40232c029047e92bdcc468c434
695,665
import torch def collate_fn(batch): """Zero-pads model inputs and targets based on number of frames per step """ # Right zero-pad mgc with extra single zero vector to mark the end input_lengths, ids_sorted_decreasing = torch.sort( torch.LongTensor([x[0].size(2) for x in batch]), dim=0, descending=True) max_target_len = input_lengths[0] mgc_dim = batch[0][0].size(1) in_mgc_padded = torch.FloatTensor(len(batch), 1, mgc_dim, max_target_len) in_mgc_padded.zero_() out_mgc_padded = torch.FloatTensor(len(batch), 1, mgc_dim, max_target_len) out_mgc_padded.zero_() for i in range(len(ids_sorted_decreasing)): in_mgc = batch[ids_sorted_decreasing[i]][0] out_mgc = batch[ids_sorted_decreasing[i]][1] in_mgc_padded[i, :, :, :in_mgc.size(2)] = in_mgc out_mgc_padded[i, :, :, :out_mgc.size(2)] = out_mgc return in_mgc_padded, out_mgc_padded
c35b78fdfc2e6f1d626ec2019899e5b4b7880dea
695,666
import hashlib def get_fingerprint(contents: str) -> str: """ Generate a fingerprint for the contents of a virtual relation. This fingerprint is used by the server for caching purposes. :param contents: The full contents of a tsv file :returns: md5 sum representing the file contents """ md5 = hashlib.md5() md5.update(repr(contents).encode()) return md5.hexdigest()
65dd77ca873b8928af5af1f3ea036555e835c418
695,667
def crop_image(frame, bbox): """Return the cropped image from frame specified by bbox""" x_start, x_end = int(bbox[0]), int(bbox[2]) y_start, y_end = int(bbox[1]), int(bbox[3]) crop_img = frame[y_start:y_end, x_start:x_end, :].copy() return crop_img
702477cd6c98a6170d254b3837d2dfbd605d8ae3
695,669
import math def GeometricMean(values): """Compute a rounded geometric mean from an array of values.""" if not values: return None # To avoid infinite value errors, make sure no value is less than 0.001. new_values = [] for value in values: if value > 0.001: new_values.append(value) else: new_values.append(0.001) # Compute the sum of the log of the values. log_sum = sum(map(math.log, new_values)) # Raise e to that sum over the number of values. mean = math.pow(math.e, (log_sum / len(new_values))) # Return the rounded mean. return int(round(mean))
bd1bb53943c8db09c18f1e4710c36000fa07671d
695,670
from typing import Tuple import string def return_c_values(cardinality: int) -> Tuple[list, list]: """Return categorical values for C+ and C-. Create string values to be used for the categorical variable c. We build two sets of values C+ and C-. All values from C+ end with "A" and all values from C- end with "B". The cardinality input determines len(c_pos) + len(c_neg). Args: cardinality (int): cardinality of c Returns: c_pos (list): categorical values from C+ sample c_neg (list): categorical values from C- sample """ suffixes = [ "{}{}".format(i, j) for i in string.ascii_lowercase for j in string.ascii_lowercase] c_pos = ["{}A".format(s) for s in suffixes][:int(cardinality / 2)] c_neg = ["{}B".format(s) for s in suffixes][:int(cardinality / 2)] return c_pos, c_neg
7b25599b2fb9efb1f053317d19f94f73b5889a36
695,671
import pathlib from typing import List def confirm_text_in_file(file_path: pathlib.Path, tag: str, text: str) -> bool: """Confirm the expected text is in the file after the tag.""" lines: List[str] = [] with file_path.open('r') as f: lines = f.readlines() found_tag = False for line in lines: if line.find(tag) >= 0: found_tag = True continue if found_tag and line.find(text) >= 0: return True return False
d468c52624166f150aef3f27f8cbdfd558bb0cd5
695,672
import os def get_template_dir(): """Find and return the ntc-templates/templates dir.""" try: template_dir = os.path.expanduser(os.environ["NET_TEXTFSM"]) index = os.path.join(template_dir, "index") if not os.path.isfile(index): # Assume only base ./ntc-templates specified template_dir = os.path.join(template_dir, "templates") except KeyError: # Construct path ~/ntc-templates/templates home_dir = os.path.expanduser("~") template_dir = os.path.join(home_dir, "ntc-templates", "templates") index = os.path.join(template_dir, "index") if not os.path.isdir(template_dir) or not os.path.isfile(index): msg = """ Valid ntc-templates not found, please install https://github.com/networktocode/ntc-templates and then set the NET_TEXTFSM environment variable to point to the ./ntc-templates/templates directory.""" raise ValueError(msg) return os.path.abspath(template_dir)
fe5146391bd28390a64e7079c7a6c0e93f078a84
695,673
import torch def mat2euler(mat): """ Convert rotation matrix to euler angles. https://github.com/pulkitag/pycaffe-utils/blob/master/rot_utils.py#L283 Args: mat: rotation matrix in zyx format -- size = [B, 3, 3] Returns: angle: rotation angle along 3 axis (in radians, it's not unique) -- size = [B, 3] """ cy_thresh = 1e-10 cy = torch.sqrt(mat[:, 2, 2]*mat[:, 2, 2] + mat[:, 1, 2]*mat[:, 1, 2]) if (cy > cy_thresh).any(): # cos(y) not close to zero, standard form z = torch.atan2(-mat[:, 0, 1], mat[:, 0, 0]) # atan2(cos(y)*sin(z), cos(y)*cos(z)) y = torch.atan2(mat[:, 0, 2], cy) # atan2(sin(y), cy) x = torch.atan2(-mat[:, 1, 2], mat[:, 2, 2]) # atan2(cos(y)*sin(x), cos(x)*cos(y)) else: # cos(y) (close to) zero, so x -> 0.0 (see above) # so r21 -> sin(z), r22 -> cos(z) and z = torch.atan2(mat[:, 1, 0], mat[:, 1, 1]) y = torch.atan2(mat[:, 0, 2], cy) # atan2(sin(y), cy) x = torch.zeros_like(mat[:, 0, 0]) return torch.cat([x.unsqueeze(-1), y.unsqueeze(-1), z.unsqueeze(-1)], -1).view(-1, 3)
a940f61fd00ce28c01bce7704920a49cf8bab8eb
695,674
def _encode_dates(not_valid_before, not_valid_after): """ Encode dates into compressed certificate format. :param datetime not_valid_before: Certificate not valid before date. :param datetime not_valid_after: Certificate not valid after date. :returns bytearray: Compressed certificate dates format (3 bytes) """ expire_years = not_valid_after.year - not_valid_before.year if not_valid_after.year == 9999: expire_years = 0 # This year is used when indicating no expiration elif expire_years > 31: expire_years = 1 # We default to 1 when using a static expire beyond 31 enc_dates = bytearray(b'\x00'*3) enc_dates[0] = (enc_dates[0] & 0x07) | ((((not_valid_before.year - 2000) & 0x1F) << 3) & 0xFF) enc_dates[0] = (enc_dates[0] & 0xF8) | ((((not_valid_before.month) & 0x0F) >> 1) & 0xFF) enc_dates[1] = (enc_dates[1] & 0x7F) | ((((not_valid_before.month) & 0x0F) << 7) & 0xFF) enc_dates[1] = (enc_dates[1] & 0x83) | (((not_valid_before.day & 0x1F) << 2) & 0xFF) enc_dates[1] = (enc_dates[1] & 0xFC) | (((not_valid_before.hour & 0x1F) >> 3) & 0xFF) enc_dates[2] = (enc_dates[2] & 0x1F) | (((not_valid_before.hour & 0x1F) << 5) & 0xFF) enc_dates[2] = (enc_dates[2] & 0xE0) | ((expire_years & 0x1F) & 0xFF) enc_dates = bytes(enc_dates) return enc_dates
f396f1e336572e825abe2563751c720b93c47807
695,675
import json def read_shapefile_data(path,file_name): """ Reads in a json file containing the path to shapefiles, on regional, province and municipality level and crs foe encoding """ d=json.load(open(path+file_name)) path_shapefiles=d["path_shapefiles"] regions=d["regions"] provinces=d['provinces'] municipalities=d['municipalities'] territories=d['area_territoriali'] crs=d['crs'] return path_shapefiles,regions,provinces,territories,municipalities,crs
b209b594debabcd26f5cd8dd6a8f41fac0207560
695,676
def combine_reciprocal_hits(keep_df, other_df): """ """ missed_samples = set(other_df.index.values).difference( set(keep_df.index.values)) for each in missed_samples: hit = other_df.loc[each, 'B_id'] if hit not in keep_df['B_id'].values: new_row = [hit] + [None for i in range(keep_df.shape[1] - 1)] keep_df.loc[each] = new_row return keep_df
b83ca2caf2808725948f062076676087dec8f4e3
695,677
def zstrip(chars): """Strip all data following the first zero in the string""" if '\0' in chars: return chars[:chars.index("\0")] return chars
0c03f677008b6195723f3d62e55f108d4d265742
695,678
from typing import Tuple def channel_error(input_shape: Tuple[int, int, int]) -> ValueError: """ Value Error Message for Channel Error :param input_shape: Three Int Tuple :return: Custom text Value Error """ return ValueError( f"The input must have 3 channels; got `input_shape={str(input_shape)}`" )
a74db84d57f95524fd95e7f2d7225308949369cd
695,679
import json def _json_dumps(value): """ json.dumps parameters for dumping unicode into JS """ return json.dumps(value, separators=(",", ":"), ensure_ascii=False)
79a9d8b51df110ce19baa6a022d38a24a8492591
695,680
def _extended_gcd(a, b): """Returns (g, x, y) such that a*x + b*y = g = gcd(a, b)""" x, x_old, y, y_old = 0, 1, 1, 0 while a != 0: (q, a), b = divmod(b, a), a y, y_old = y_old, y - q * y_old x, x_old = x_old, x - q * x_old return b, x, y
e96a65990cc9e6165867ccfd7756dcf0ae2b33d8
695,681
def guess_type(text): """ Guess the type of a value encoded in a string. """ # int try: int(text) return int except: pass # float try: float(text) return float except ValueError: pass # string return str
bd2c7fa52ff4ee79f87d9018493ccb3f1394daa6
695,682
def slash_join(*args: str) -> str: """ Joins ``args`` with a single "/" >>> slash_join('http://example.com', 'a/', '/foo/') 'http://example.com/a/foo/' """ if not args: return '' append_slash = args[-1].endswith('/') url = '/'.join([arg.strip('/') for arg in args]) return url + '/' if append_slash else url
d771fa53a2932e12eabe2ff9f7c2d707ca3ff862
695,683
import re def extract_channel_name(channel): """Extracts channel name. Hiddens default system channels.""" channel_name_regexp = r'\(([^}]+)\)' # Hidden default system channels channel_system_default_regexp = r'SYSTEM.' # Hidden default system channels for automatic definition of receiver and server-connection match = re.findall(channel_name_regexp, channel) channel_name = ''.join(match) if not (re.search(channel_system_default_regexp, channel_name)): return channel_name else: pass
bc407b27aebffcb94373f99b564fb38d9bee7e78
695,684
import math def list_values_approx_equal(num_list, rel_tol): """ Check if all values in a list are within a relative tolerance of each other :param num_list: List of numbers :param rel_tol: The relative numerical tolerance :return: Truth value """ for i in range(len(num_list)-1): for j in range(i, len(num_list), 1): if not math.isclose(num_list[i], num_list[j], rel_tol=rel_tol): return False return True
02007cbdbc5591cf33f64946f9d80bb8eb69b4ee
695,685
import networkx import re def build_graph(data): """Builds the graph based on given data. :param data: json data as a dictionary :type data: dictionary :returns: networkx graph. """ graph = networkx.DiGraph() for chain in data['xpaths']: previous = None length = len(chain) for index, key in enumerate(reversed(chain)): uid = key['uid'] label = re.sub(r'\s.*$', '', uid) if index == 0: color = 'green' elif index == length - 1: color = 'red' else: color = 'blue' graph.add_node(uid, value=label, color=color) if previous is not None: graph.add_edge(uid, previous) previous = uid return graph
fbbac7bb222ec509b4509915129e2d3c67d0d476
695,686
def _has_all_keys(op_dict, all_ops): """Check all keys.""" return all([k in op_dict.keys() for k in all_ops]) and len(all_ops) == len(op_dict)
eae453726a70d85bd1f5aba172a129c6d99c1b5c
695,687
def sqlobj_from_dict(obj, values): """ Merge in items in the values dict into our object if it's one of our columns. """ for c in obj.__table__.columns: if c.name in values: setattr(obj, c.name, values[c.name]) # This return isn't necessary as the obj referenced is modified, but it makes it more # complete and consistent return obj
2aa65226f5bc5abb9870ab4a9e414dff691beaa7
695,688
def _get_working_shape_and_iterations(requested_shape, max_power_of_two=13): """Returns the necessary size for a square grid which is usable in a DS algorithm. The Diamond Square algorithm requires a grid of size n x n where n = 2**x + 1, for any integer value of x greater than two. To accomodate a requested map size other than these dimensions, we simply create the next largest n x n grid which can entirely contain the requested size, and return a subsection of it. This method computes that size. PARAMETERS ---------- requested_shape A 2D list-like object reflecting the size of grid that is ultimately desired. max_power_of_two an integer greater than 2, reflecting the maximum size grid that the algorithm can EVER attempt to make, even if the requested size is too big. This limits the algorithm to sizes that are manageable, unless the user really REALLY wants to have a bigger one. The maximum grid size will have an edge of size (2**max_power_of_two + 1) RETURNS ------- An integer of value n, as described above. """ if max_power_of_two < 3: max_power_of_two = 3 largest_edge = max(requested_shape) for power in range(1, max_power_of_two+1): d = (2**power) + 1 if largest_edge <= d: return (d, d), power #failsafe: no values in the dimensions array were allowed, so print a warning and return # the maximum size. d = 2**max_power_of_two + 1 print("DiamondSquare Warning: Requested size was too large. Grid of size {0} returned""".format(d)) return (d, d), max_power_of_two
c5f61347c17cc584d68dc6cccae7cd78c4a15906
695,689
def generate_parameters(): """Returns a set of parameters (and their reduced values) for which regression values for each model property are known. """ epsilon = 98.0 sigma = 0.37800 bond_length = 0.15 quadrupole = 0.01 quadrupole_star_sqr = (quadrupole * 3.1623) ** 2 / (epsilon * 1.38065 * sigma ** 5) bond_length_star = bond_length / sigma return ( epsilon, sigma, bond_length, bond_length_star, quadrupole, quadrupole_star_sqr, )
d49a0904bb2e9513df6e779d45973ef302187724
695,690
def unmap_from_unit_interval(y, lo=0., hi=1.): """ Linearly map value in [0, 1] to [lo_val, hi_val] """ return y * (hi - lo) + lo
35a6dfbad855f80fa2034eb88cb41fcedc9a00d5
695,691
def add(a, b): """ Fake dispatcher method to test invoke. """ return a + b
de0df81043ca8657fef0f40eb7b4def11db66807
695,692
def parse_raw(sqlContext, input, user): """Parses the raw json for a user""" df = sqlContext.read.json(input + "/" + user + "/" + user + ".json", multiLine=True) return df
f8ff9544e222d92aa3825d9739ddec7d5970e8e5
695,693
from typing import List from typing import Tuple import argparse def argument_parser(args: List[str]) -> Tuple[List[str], str, bool, str]: """ Use argparse to allow for the processing of input paths, a save location and adjusting the verbosity of the program. we pass args explicitly to simplify testing Args: args (list(str)): list of sys.argv arguments excluding the program name Returns: image_paths (list(str)): List of image paths save_path (str): Directory path to save outputs to verbose (bool): Whether or not to provide verbose output tesseract_path (str): Absolute path to tesseract.exe """ parser = argparse.ArgumentParser( description="Process images to straighten images and extract text", ) parser.add_argument( "paths", metavar="N", type=str, nargs="*", # ? is zero or one, * is zero or more, + is one or more help="File or folder paths. Files must be jpg, png or gif", ) parser.add_argument( "-s", "--save", required=False, type=str, nargs="?", default=None, # returned if argument not used const="||cwd||", # returned if argument has no trailing parameter help="Save data to current working directory or a specified folder", ) parser.add_argument( "-v", "--verbose", action="store_true", help="Display intermediate steps in processing", ) parser.add_argument( "-t", "--tesseract", required=False, type=str, default=None, # returned if argument not used help="Specify location of tesseract.exe", ) parsed = parser.parse_args(args) image_paths = parsed.paths save_path = parsed.save verbose = parsed.verbose tesseract_path = parsed.tesseract return image_paths, save_path, verbose, tesseract_path
dfd32467344fb5128cab20387a912bbb98c3b111
695,694
def parse_match(ref_index, read_index, length, read_sequence, ref_sequence): """ Process a cigar operation that is a match :param alignment_position: Position where this match happened :param read_sequence: Read sequence :param ref_sequence: Reference sequence :param length: Length of the operation :return: This method updates the candidates dictionary. """ n_mismatches = 0 mismatches = list() for i in range(0, length): allele = read_sequence[i] ref = ref_sequence[i] if allele != ref: # mismatch n_mismatches += 1 mismatches.append([ref, allele, ref_index+i, ref_index+i+1, read_index+i, read_index+i+1]) return n_mismatches, mismatches
e75796f831f038dceeab36236895b092358d7190
695,695
def get_run_id_keys(json_results, run_id_list): """This function finds the key used in the json_results dictionary for a given run ID. These dictionary keys are the files names and there are potentially many keys that are associated with a run ID. For the intended purposes of this function (metadata comparison), any file associated with the run ID will have the same metadata as any other file associated with the run ID; it doesn't matter which one we use. Args: json_results (dict) - benchmark results run_id_list (list) - List of strings of the run-IDs being compared Returns: run_id_keys (dict) - Dictionary relating run-IDs (five character unique IDs) and the key for said run ID in the json_results dictionary. """ run_id_keys = {} for run_id in run_id_list: for key in json_results: if run_id in key: if run_id not in run_id_keys: run_id_keys[run_id] = key break return run_id_keys
c3550508576fe595b01c2569c64d889f2edf07dc
695,696
def _confirm_loop(msg, logger): """ Confirm in a loop that the user wants to do the thing. Returns a tuple of (yes/no, yesall) :param str msg: :param Logger logger: :rtype: (bool, bool) """ while True: logger.log("%s (yes/yesall/no)", msg) confirm = input() if confirm.lower() == 'yesall': return True, True if confirm.lower() in ('yes', 'y'): return True, False if confirm.lower() in ('no', 'n'): return False, False
20a386015bf56a8ab00adcea64c3dd48b0b9a881
695,697
import argparse def getParserArgs(): """Parser setup for launcher arguments. """ parser = argparse.ArgumentParser( description="Matt's final project for CDD I & II.", fromfile_prefix_chars='@') parser_merge = parser.add_argument_group('merge') parser_merge.add_argument('-m', '--merge', action='store_true', default=False, help='Enables merge option.') parser_merge.add_argument('-r', '--pread', action='store', help='--r *.pcap (only wildcards)') parser_merge.add_argument('-w', '--pwrite', action='store', help='--w all.pcap') parser_merge = parser.add_argument_group('parse') parser_merge.add_argument('--parse', action='store', default=None, help='--parse file.xml. Parses a Core XML topology.') parser_search = parser.add_argument_group('search') parser_search.add_argument('--search', action='store_true', default=False, help='Enables search option.') parser_search.add_argument('-f', '--file', action='store', default=None, help='PcapNG file.') parser_search.add_argument('-s', '--src', action='store', help='Source IPv4 address.') parser_search.add_argument('-d', '--dst', action='store', help='Destination IPv4 address.') parser_search.add_argument('-i', '--ip-id', action='store', default=None, help='IP identification number.') parser_search.add_argument('-t', '--trace', action='store_true', default=False, help='Prints packet\'s trace through the topology') parser_search.add_argument('--hide-bcast', action='store_true', default=False, help='Hides broadcast emulated interfaces packets.') parser_search.add_argument('--print-readable', action='store_true', default=False, help='Prints packets in a readable format. Warning: Flooding.') parser_icmp = parser.add_argument_group('icmp') parser_icmp.add_argument('--icmp', action='store_true', default=None, help='Enables ICMP protocol.') parser_icmp.add_argument('--icmp-ident', action='store', default=None, help='Filter by ICMP ident.') parser_tcp = parser.add_argument_group('tcp') parser_tcp.add_argument('--tcp', action='store_true', default=False, help='Enables search over TCP protocol.') parser_tcp.add_argument("--tcp-port", action='store', default=None, help="80, 22, 21, 23, etc") parser_tcp.add_argument("--tcp-proto", action='store', default=None, help="HTTP, SSH, FTP, etc") parser_udp = parser.add_argument_group('udp') parser_udp.add_argument('--udp', action='store_true', default=None, help='Enables search over UDP protocol.') parser_udp.add_argument("--udp-port", action='store', default=None, help="53, 67, 68, 69, etc") parser_udp.add_argument("--udp-proto", action='store', default=None, help="DNS, DHCP, NTP, etc") return parser.parse_args()
b2926dd65c83b4092ad7b0167c71ea4f68374faa
695,698
def make_uniform(planes_dict, uniques, padding): """ Ensure each section has the same number of images This function makes the output collection uniform in the sense that it preserves same number of planes across sections. It also captures additional planes based on the value of the padding variable Args: planes_dict (dict): planes to keep in different sections uniques (list): unique values for the major grouping variable padding (int): additional images to capture outside cutoff Returns: dictionary: dictionary containing planes to keep """ # max no. of planes max_len = max([len(i) for i in planes_dict.values()]) # max planes that can be added on each side min_ind = min([min(planes_dict[k]) for k in planes_dict]) max_ind = max([max(planes_dict[k]) for k in planes_dict]) max_add_left = uniques.index(min_ind) max_add_right = len(uniques) - (uniques.index(max_ind)+1) # add planes in each section based on padding and max number of planes for section_id, planes in planes_dict.items(): len_to_add = max_len - len(planes) len_add_left = min(int(len_to_add)/2+padding, max_add_left) len_add_right = min(len_to_add - len_add_left+padding, max_add_right) left_ind = int(uniques.index(min(planes)) - len_add_left) right_ind = int(uniques.index(max(planes)) + len_add_right)+1 planes_dict[section_id] = uniques[left_ind:right_ind] return planes_dict
8f67f7226dcf8846707f9d190eb9b15ccb1b27e9
695,700
def index_div_by_2(List, i): """Get list element via index.""" i = i / 2 return List[int(i)]
f2334f9bd77ed8d9ed4fc5db9b58208d241174f0
695,701
import contextlib import wave def read_wave(path): """Reads a .wav file. Takes the path, and returns (PCM audio data, sample rate). """ with contextlib.closing(wave.open(path, 'rb')) as wf: num_channels = wf.getnchannels() # print(num_channels) assert num_channels == 1 sample_width = wf.getsampwidth() assert sample_width == 2 sample_rate = wf.getframerate() assert sample_rate == 16000 pcm_data = wf.readframes(wf.getnframes()) return pcm_data, sample_rate
6c2aa73a313147f99992a2d2523a1ce1d7c5252b
695,703
from os.path import splitext def save_image(image, aspect_ratio, filename, format): """ Save the image with the aspect_ratio details in the name. """ name, ext = splitext(filename) outfile = ''.join([name, '-', str(aspect_ratio), ext]) image.save(outfile, format) return outfile
1bcbdd85e2ffcbab0e1a4398f9a141d35545e9cf
695,704
import codecs def count_features(path): """ path: location of a corpus file with whitespace-delimited tokens and │-delimited features within the token returns: the number of features in the dataset """ with codecs.open(path, "r", "utf-8") as f: first_tok = f.readline().split(None, 1)[0] return len(first_tok.split(u"│")) - 1
4fa3092b4d7d2844817c2e2ceb0c6cf0332949da
695,705
from pathlib import Path def fixture_db_uri(db_path: Path) -> str: """Return the uri to an in memory database""" return "sqlite:///" + str(db_path)
e3f4ed4099b7515e4c650310bbcc84fab4a00f75
695,707
import pathlib def file_is_valid(file_p: pathlib.PosixPath) -> bool: """ 验证文件有效性:根据本项目命名规律,所有扩展名为.py或.md文件才是需要目录的 """ return file_p.suffix in (".py", ".md")
918b67ebbb2bcd0421029bea4527ed371ad4c937
695,708
import inspect def get_object_name(obj): """Return a human readable name for the object.""" if inspect.isclass(obj) or inspect.isfunction(obj): cls = obj else: cls = obj.__class__ if cls.__module__.startswith('zine.plugins.'): prefix = cls.__module__.split('.', 2)[-1] elif cls.__module__.startswith('zine.'): prefix = cls.__module__ else: prefix = 'external.' + cls.__module__ return prefix + '.' + cls.__name__
5ba5ab360045a5091b0cceb3b6db2a04a2cbd28f
695,709
def inv_factorize(outputs, ridges, Xtrain, Ytrain, Xtest, Ytest, Ktrain=None, Ktest=None): """return factors needed to solve ridge problem via inv_solve() Parameters ---------- - [see solve_ridge()] """ factors = {'XTX': Xtrain.T.dot(Xtrain)} if len(ridges) > 1: factors['XTY'] = Xtrain.T.dot(Ytrain) else: factors['Xtrain'] = Xtrain factors['Ytrain'] = Ytrain if ('predictions' in outputs) or ('performance' in outputs): factors['Xtest'] = Xtest if 'performance' in outputs: factors['Ytest'] = Ytest return factors
ea0c5fda0efa1c9032ae8728608f6fe6103b23e0
695,711
def extract_tuple( t, num, padding = None ): """This lets us extract a specified number of values from a tuple, even if there aren't that many in the tuple itself. """ result = list(t) if len(result) < num: remainder = num - len(result) result.extend( [padding] * remainder ) return tuple(result)
f1512c485111e18a76ab85e41ea4d7c584122918
695,712
def arrayRankTransform(arr): """ :type arr: List[int] :rtype: List[int] """ arr_1 = arr[:] arr_1.sort() list_1 = [] final_list = [] j=0 for i in range(len(arr_1)): if i>0 and arr_1[i] != arr_1[i-1]: list_1.append(j+1) elif i==0: list_1.append(j+1) elif i>0 and arr_1[i] == arr_1[i-1]: list_1.append(j) j = j-1 else: list_1.append(j+1) j+=1 dict_1 = dict(zip(arr_1,list_1)) for j in range(len(arr)): final_list.append(dict_1[arr[j]]) return final_list
7c7ecb540f726d92c7ffc7760d340ea91237e4de
695,713
import subprocess def get_clang_tidy_warnings( line_filter, build_dir, clang_tidy_checks, clang_tidy_binary, files ): """Get the clang-tidy warnings """ command = f"{clang_tidy_binary} -p={build_dir} -checks={clang_tidy_checks} -line-filter={line_filter} {files}" print(f"Running:\n\t{command}") try: child = subprocess.run(command, capture_output=True, shell=True, check=True,) except subprocess.CalledProcessError as e: print( f"\n\nclang-tidy failed with return code {e.returncode} and error:\n{e.stderr.decode('utf-8')}" ) raise output = child.stdout.decode("utf-8", "ignore") return output.splitlines()
0a1487a504453b6e5267d2dd106c7894c379c2ad
695,714
def index_storage_mode_to_param(value, default="plasma"): """Converts the index storage mode to what Couchbase understands""" if value == "default": return default elif value == "memopt": return "memory_optimized" else: return value
1eb736f1b1014c5893b82c4edeb4f9f8e611fadd
695,715
import re def untokenize(words): """ Untokenizing a text undoes the tokenizing operation, restoring punctuation and spaces to the places that people expect them to be. Ideally, `untokenize(tokenize(text))` should be identical to `text`, except for line breaks. """ # standardize long pauses to "..." for i in range(len(words)): match = re.sub(r'\.{2,}', r'...', words[i]) if match != words[i]: words[i] = match text = ' '.join(words) step1 = text.replace("。",".").replace("’","'").replace("`` ", '"').replace(" ''", '"').replace(" ` ", " '").replace(" ,",",") step2 = step1.replace(" -- ", " - ").replace("—","-").replace("–","-").replace('”','"').replace('“','"').replace("‘","'").replace("’","'") step3 = step2.replace(" '", "'").replace(" n't", "n't").replace("n' t", "n't").replace("t' s","t's").replace("' ll", "'ll").replace("I' m", "I'm").replace( "can not", "cannot").replace("I' d", "I'd").replace("' re", "'re").replace("t ' s", "t's").replace("e' s", "e's") step4 = step3.replace("? !", "?!").replace("! !", "!!").replace("! ?", "!?").replace("n'y","n't").replace('yarning','yawning').replace(" om V", " on V") step5 = step4.replace('. . .', '...').replace("wan na", "wanna").replace("gon na", "gonna") step6 = re.sub(r'(?<=[a-zA-Z])\s+(?=[.,:;\?!])', r'', step5) step7 = re.sub(r'(\S)(\.{3})', r'\1 \2', step6) step8 = re.sub(r'(\.{3})(\S)', r'\1 \2', step7) return step8.strip()
91a94a119dd35a62aeae42765e8df221dc5ecb5e
695,716
def pobierztypy(ile, maks): """Funkcja pobiera od użytkownika jego typy wylosowanych liczb""" print("Wytypuj %s z %s liczb: " % (ile, maks)) typy = set() i = 0 while i < ile: try: typ = int(input("Podaj liczbę %s: " % (i + 1))) except ValueError: print("Błędne dane!") continue if 0 < typ <= maks and typ not in typy: typy.add(typ) i = i + 1 return typy
71d5c4c2ceb14eec9126516f49c38ea214b6f0af
695,717
def determine_row(sheet, site_hpo_id): """ Function is used to find the row in the sheet where you can find all the error information for the HPO ID in question :param Sheet (dataframe): pandas dataframe to traverse. Represents a sheet with numbers indicating data incompleteness. hpo_id (string): string representing the HPO ID used to generate the e-mail :returns row_idx (int): row index on the particular sheet where you can find the information regarding data incompleteness """ row_num = 9999999 hpo_id_col = sheet['src_hpo_id'] for idx, site_id in enumerate(hpo_id_col): if site_hpo_id == site_id: row_num = idx if row_num == 9999999: # never reassigned raise ValueError("The HPO ID was not found in the Excel file.") return row_num
a19a7da300952b750dbf8004ad7435b284baed93
695,718
def calc_install(runs, cost_index, system_index, tax_rate): """ Calculate total install cost for a manufacturing job. :param runs: Number of runs in the job. :param cost_index: Cost Index for the item. :param system_index: Cost Index for the star system where construction would occur. :param tax_rate: Tax rate for the facility where construction would occur. Rate should be represented as a percent. (e.g. the 10% of cost index tax charged in NPC stations is given as 10) :return: """ job_fee = runs * float(cost_index) * float(system_index) facility_tax = job_fee * float(tax_rate) / 100 return job_fee + facility_tax
bf0b9da1f07e6e10008f4a1e1e498d1d3d6845bd
695,719
def xys2dists(x, y, xarr, yarr): """ euclidean distances to each combination of coordinates from the origin x/y = x/y-coordinate of the origin xarr/yarr = x/y-coordinates of mark points """ dx = abs(xarr - x) dy = abs(yarr - y) return (dx*dx + dy*dy) ** 0.5
c6721e7d89274a49f2b7ada20adf060b04daa61f
695,720
def end_game(game, *args): """Ends the game.""" end_message = args[0] print(end_message) return True
71b1aaa0d5c24e5a16c7f6acd2226a5042401f0e
695,721
def is_boundary(horizon, x): """ Function which marks displacement boundary constrained particles 2 is no boundary condition (the number here is an arbitrary choice) -1 is displacement loaded IN -ve direction 1 is displacement loaded IN +ve direction 0 is clamped boundary """ # Does not live on a boundary bnd = 2 # Does live on boundary if x[0] < 1.5 * horizon: bnd = -1 elif x[0] > 1.0 - 1.5 * horizon: bnd = 1 return bnd
0e8252be537ab72b2a469e2d3eadb0d1336b6326
695,722
from math import floor, log10 def sci_notation(num, decimal_digits=1, precision=None, exponent=None): """ Returns a string representation of the scientific notation of the given number formatted for use with LaTeX or Mathtext, with specified number of significant decimal digits and precision (number of decimal digits to show). The exponent to be used can also be specified explicitly. """ if exponent is None: exponent = int(floor(log10(abs(num)))) coeff = round(num / float(10**exponent), decimal_digits) if precision is None: precision = decimal_digits return r"${0:.{1}f}\times$".format(coeff,precision)\ + "10" + r"$^{{{0:d}}}$".format(exponent)
4990887697d86a5ce5453f289c559991652c5269
695,723
def get_product(): """Gets the product that the image generator is being used to build""" return "BIG-IP"
05c9875b1a70f1f669dc518d8c02aef18fc1af1f
695,724
import re def split_hostname_index(name, default=None): """ Split the index out of a hostname. E.g. splits "test^1" into ("test", 1). If no index is present, returns (name, default). """ match = re.match(r'^(.*)~(\d+)$', name) if match: return match.group(1), int(match.group(2)) else: return name, default
eb4d036e0cdc96d43d1a23946a56f48f88d887c8
695,725
from datetime import datetime def footer_datetime(timestamp: str) -> datetime: """Takes an embed timestamp and returns a timezone-aware datetime object.""" return datetime.fromisoformat(timestamp)
a7c26eb6c4855af55eaaaee1f28a46d9a14934bf
695,726
from math import sqrt, exp, pi def gaussian(x, mean, var): """Given the mean and variance of a 1-d gaussian, return the y value for a given `x` value. .. math:: \\frac{1}{\\sigma\\sqrt{2\\pi}}e^{-\\frac{1}{2}(\\frac{x-\\mu}{\\sigma})^2} """ denom = sqrt(2*pi*var) num = exp(-((x-mean)**2)/(2*var)) ret = num/float(denom) #print "Gaussian of x=%s (m=%s, var=%s) is %s" % (x, mean, var, ret) return ret
2160b6c27352898d33e204144e3b0a7f1982c183
695,728
def get_modified_path(originalPath): """Modifies the sentence's string so it can be used as a path""" return "".join(c for c in originalPath if c.isalpha())
5039fc1ec9b4360ee31d5c7d2f9268fd8c6a22c6
695,729
def template_range(value): """Return a range 1..value""" try: value = int(value) except: value = 0 return range(1, value + 1)
7d4b369b76be1828187f5490de9e2609c04de6d3
695,730
import os def remove_empty_file(_files): """Return only non empty files.""" return [_file for _file in _files if os.path.getsize(_file.abspath) > 0]
293ed9cb6a9c99a4496a823da75fdcb6bc7befab
695,731
def v6_int_to_packed(address): """Represent an address as 16 packed bytes in network (big-endian) order. Args: address: An integer representation of an IPv6 IP address. Returns: The 16-byte packed integer address in network (big-endian) order. Raises: ValueError: If address is negative or too large for an IPv6 address. """ try: return address.to_bytes(16, 'big') except OverflowError: raise ValueError('Address negative or too large for IPv6')
5f4bb8cbed9f1cddace9c0257efc6f5ec2e2d78d
695,732
from typing import List def pascal_row(n: int) -> List[int]: """Returns n-th row of Pascal's triangle""" result = [1] x, numerator = 1, n for denominator in range(1, n // 2 + 1): x *= numerator x = int(x / denominator) result.append(x) numerator -= 1 if n & 1 == 0: result.extend(reversed(result[:-1])) else: result.extend(reversed(result)) return result
4f29bf341f2c135096710b5970a2f11c0ca3ae63
695,734
def tokenizer(x): """ Trivial tokenizer """ return x
49b9559363def5686c254164d9038087c29d3e66
695,735
def split_into_rgb_channels(image): """ Split the target image into its red, green and blue channels. image - a numpy array of shape (rows, columns, 3). output - three numpy arrays of shape (rows, columns) and dtype same as image, containing the corresponding channels. """ red = image[:, :, 2] green = image[:, :, 1] blue = image[:, :, 0] return red, green, blue
f59e865332a8203747391653caa942f6885533d9
695,736
import argparse def parse_args(): """Parse input arguments.""" parser = argparse.ArgumentParser(description="") parser.add_argument("--det", dest="detections_file_path", nargs="+", required=True, help="File with detector output.") parser.add_argument("--gt", dest="ground_truth_file_path", nargs="+", required=True, help="File with ground truth bounding boxes.") parser.add_argument("--r", "--reasonable", dest="is_reasonable_subset", action="store_true", help="Apply filter to ground truth: clip bounding boxes on image borders \ (according to `imsize` parameter values) and treat bounding boxes with height \ out of `objsize` range as ignored.") parser.add_argument("--v", "--visible", dest="is_visible_subset", action="store_true", help="Apply filter to ground truth: delete bounding boxes with visible \ tags other than visible.") parser.add_argument("--f", "--filter", dest="do_detections_filtering", action="store_true", help="Apply filter to detector output: remove bounding boxes with height " "out of `objsize` range.") parser.add_argument("--im", "--imsize", dest="image_size", nargs=2, type=int, default=(1920, 1080), help="Image resolution. Used for filtering.") parser.add_argument("--obj", "--objsize", dest="object_size", nargs=2, type=int, default=(10, 600), help="Viable object height range. Used for filtering.") parser.add_argument("--o", "--result", dest="result_file_path", default="", help="Path to file to save results to.") parser.add_argument("--show", dest="show_plots", action="store_true", help="Show plots with quality curves.") parser.add_argument("--mm", "--multiple_matches", dest="allow_multiple_matches", action="store_true", help="Allow multiple matches per one ignored ground truth bounding box.") parser.add_argument("--c", "--class_lbl", dest="class_lbl", type=str, default="pedestrian", help="Target class.") parser.add_argument("--s", "--square", dest="treat_as_square", action="store_true", help="Treat object sizes as sqrt from square.") return parser.parse_args()
445d3018a4643813dcab84f8658b636ae75fc056
695,737
def get_active_units(json): """ID and return only active unit IDs.""" units = [] for unit in json: if unit['deactivated'] is None and unit['sensor_device_id'].startswith('99000512'): print(unit['sensor_device_id']) units.append({ 'unit_id': unit['sensor_device_id'], 'owner': unit['owner_id']['owner_name'], 'activated': unit['activated'], 'evo_type': unit['shipper_box_model_type'] }) return units
601a8b1f72fd43c8f2c70624434e657c9b6548bc
695,741
def binary_to_decimal(binary): """Converts a binary number(str) into a decimal(int)""" reversed_binary = binary[::- 1] # i = corresponds to power of 2 when reversed decimal = 0 # keep track of sum for i, value in enumerate(reversed_binary): if value == "0": continue # ignore 0 because no value decimal += 2**i # multiply 2 by i b/c i = exponent return decimal
c5a25cc9594f52e886b1b2a7d10667de709e5d0b
695,742
def short_hex(value): """ Convert to a nice hex number without the 0x in front to save screen space :param value: value :return: short string hex representation """ hex1 = hex(value) hex2 = hex1[2:] if len(hex2) == 1: hex2 = "0" + hex2 return hex2
8d8c0b28c02dc3ba6a13c12fb4e44cb3e1229c7c
695,743
def find_subtractive_combinations(string): """ Finds subtractive combination pairs in string. PARAMETERS: string : str RETURNS: ( (str pair, int index), ... ) Tuple containing all ordered subtractive combination pairs found and the respective index at which they start """ ivxlcdm = ["I", "V", "X", "L", "C", "D", "M"] subtractive_pairs = [] previous_char = "M" # Max char (first case always goes through) count = 0 for char in string: if char not in ivxlcdm[:ivxlcdm.index(previous_char) + 1]: # char <= previous_char subtractive_pairs.append((previous_char + char, count-1)) previous_char = char count += 1 return tuple(subtractive_pairs)
b3fdc589e98d69ac4af9be219853a8e82f24a43a
695,744
def swap_columns(a, b, array): """ Function that swaps columns of a given matrix :param a: int :param b: int :param array: numpy array :return: array_swapped: numpy array with the columns swapped """ array_swapped = array.copy() array_swapped[:, a] = array[:, b] array_swapped[:, b] = array[:, a] return array_swapped
40b848b24c6047d6faf7f07c0e9a5e18d78101fc
695,745
import string import random def random_string(length): """Generate random string of the given length.""" pool = string.ascii_letters + string.digits return "".join(random.choice(pool) for _ in range(length))
f5e83dd2215d708b0ce5f8bd3e344b8fed12277d
695,746