content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import os def get_key(audio_file): """ Given an audio file path, return its ID. """ return os.path.basename(audio_file)[:-4]
ca85360f2da8a02068a90e8cdbe4605aa89e1997
693,050
def convert_back_to_year(val): """ get something between 0 and 1, return a year between 1922 and 2011 """ assert val >= 0 and val <= 1 return 1922. + val * ( 2011. - 1922. )
3e378b53063503a5c4ffb13e4bdecc235bf10042
693,051
import random from uuid import uuid4 def reproduce_agents(params, substep, state_history, prev_state): """ Generates an new agent through an nearby agent pair, subject to rules. Not done. """ # 3rd Party # 1st Party get_free_location = prev_state['utils']['get_free_location'] nearby_agents = prev_state['utils']['nearby_agents'] is_neighbor = prev_state['utils']['is_neighbor'] check_location = prev_state['utils']['check_location'] agents = prev_state['agents'] sites = prev_state['sites'] food_threshold = params['reproduction_food_threshold'] reproduction_food = params['reproduction_food'] reproduction_probability = params['reproduction_probability'] busy_locations = [agent['location'] for agent in agents.values()] already_reproduced = [] new_agents = {} agent_delta_food = {} for agent_type in set(agent['type'] for agent in agents.values()): # Only reproduce agents of the same type specific_agents = {label: agent for label, agent in agents.items() if agent['type'] == agent_type} for agent_label, agent_properties in specific_agents.items(): location = agent_properties['location'] if (agent_properties['food'] < food_threshold or agent_label in already_reproduced): continue kind_neighbors = nearby_agents(is_neighbor, location, specific_agents) available_partners = [label for label, agent in kind_neighbors.items() if agent['food'] >= food_threshold and label not in already_reproduced] reproduction_location = get_free_location(check_location, location, sites, busy_locations) if reproduction_location is not False and len(available_partners) > 0: reproduction_partner_label = random.choice(available_partners) reproduction_partner = agents[reproduction_partner_label] already_reproduced += [agent_label, reproduction_partner_label] agent_delta_food[agent_label] = -1.0 * reproduction_food agent_delta_food[reproduction_partner_label] = -1.0 * reproduction_food new_agent_properties = {'type': agent_type, 'location': reproduction_location, 'food': 2.0 * reproduction_food, 'age': 0} new_agents[uuid4()] = new_agent_properties busy_locations.append(reproduction_location) return {'agent_delta_food': agent_delta_food,'agent_create': new_agents}
4f7b8844e3ee017e53864ae62179dcc91b412b40
693,052
import time def datetime_to_ms(dt): """ Convert an unaware datetime object to milliseconds. This will be a UTC time. The SMC stores all times in UTC and will do the time conversions based on the local timezone. Example of converting a datetime to milliseconds:: utc_time = datetime.strptime("2018-06-04T00:00:00", "%Y-%m-%dT%H:%M:%S") datetime_to_ms(utc_time) :param dt datetime: pass in python datetime object. :return: value representing the datetime in milliseconds :rtype: int """ return int(time.mktime(dt.timetuple()) * 1000)
4fe473d0a563c54846e1f0be8d9fc879883c0122
693,053
def unexcused_marker(events_unexcused): """ if the brother has 3 or more unexcused absences the first column of the row is marked for the secretary's ease of viewing/use. Puts 3+, 5+, and 7+ as depending on the number of unexcused absences. :param list[Event] events_unexcused: a list of events that the brother in question has unexcused :returns: a list containing a string with info about the number of unexcused absences this specific brother has. An empty string if it's less than 3. This is the starting point for any row containing brother attendance info """ if events_unexcused >= 7: cell = ['>>>7+>>>'] elif events_unexcused >= 5: cell = ['>>5+>>'] elif events_unexcused >= 3: cell = ['>3+>'] else: cell = [''] return cell
cfebc80b735a60577ff509592daa0bf9b3d10867
693,055
def _to_list(val): """Return the variable converted to list type.""" if isinstance(val, list): return val else: return [val]
4fddae97a267fd879182cec75b1fdb553a892857
693,056
import os def setup_base_cache(): """Set up the base cache directory. Returns: str: The path to the base cache directory. """ if os.environ.get('POPPER_CACHE_DIR', None): base_cache = os.environ['POPPER_CACHE_DIR'] else: base_cache = os.path.join( os.environ.get( 'XDG_CACHE_HOME', os.path.join( os.environ['HOME'], '.cache')), '.popper') if not os.path.exists(base_cache): os.makedirs(base_cache) return base_cache
7993193af481e97c58f955292979ecbe94c33888
693,057
def get_networks(vcenter, the_vm, username): """Obtain a list of all networks a VM is connected to. :Returns: List :param vcenter: An established connection to vCenter :type vcenter: vlab_inf_common.vmware.vcenter.vCenter :param the_vm: The virtual machine that owns the specific NIC :type the_vm: vim.VirtualMachine :param username: The name of the user who owns the VM :type username: String """ networks = [] user_networks = {x:y for x,y in vcenter.networks.items() if x.startswith(username)} for net_name, net_object in user_networks.items(): for vm in net_object.vm: if vm.name == the_vm.name: networks.append(net_name.replace('{}_'.format(username), '')) return networks
a2c6d691fe0ebe1bc77a1a17f05de4ed4e7c342c
693,058
def L6_summary_createseriesdict(cf,ds): """ Purpose: Create a dictionary containing lists of variables, operators and formats for use by the daily, annual and cumulative routines. Usage: series_dict = L6_summary_createseriesdict(cf,ds) where cf is a control file object ds is an OzFluxQC data structure series_dict is a dictionary of various variable lists Author: PRI Date: June 2015 """ ts = int(ds.globalattributes["time_step"]) series_dict = {"daily":{},"annual":{},"cumulative":{},"lists":{}} # adjust units of NEE, NEP, GPP and ER sdl = series_dict["lists"] sdl["nee"] = [item for item in cf["NEE"].keys() if "NEE" in item[0:3] and item in ds.series.keys()] sdl["gpp"] = [item for item in cf["GPP"].keys() if "GPP" in item[0:3] and item in ds.series.keys()] sdl["fre"] = [item for item in cf["ER"].keys() if "ER" in item[0:2] and item in ds.series.keys()] sdl["nep"] = [item.replace("NEE","NEP") for item in sdl["nee"]] sdl["nep"] = [item for item in sdl["nep"] if item in ds.series.keys()] sdl["co2"] = sdl["nee"]+sdl["nep"]+sdl["gpp"]+sdl["fre"] for item in sdl["co2"]: series_dict["daily"][item] = {} series_dict["cumulative"][item] = {} series_dict["daily"][item]["operator"] = "sum" series_dict["daily"][item]["format"] = "0.00" series_dict["cumulative"][item]["operator"] = "sum" series_dict["cumulative"][item]["format"] = "0.00" sdl["ET"] = [item for item in ds.series.keys() if "ET" in item[0:2]] sdl["Precip"] = [item for item in ds.series.keys() if "Precip" in item[0:6]] sdl["h2o"] = sdl["ET"]+sdl["Precip"] for item in sdl["h2o"]: series_dict["daily"][item] = {"operator":"sum","format":"0.00"} series_dict["cumulative"][item] = {"operator":"sum","format":"0.00"} if "Ah" in ds.series.keys(): series_dict["daily"]["Ah"] = {"operator":"average","format":"0.00"} if "Cc" in ds.series.keys(): series_dict["daily"]["Cc"] = {"operator":"average","format":"0.0"} if "Fc" in ds.series.keys(): series_dict["daily"]["Fc"] = {"operator":"average","format":"0.00"} if "Fe" in ds.series.keys(): series_dict["daily"]["Fe"] = {"operator":"average","format":"0.0"} if "Fh" in ds.series.keys(): series_dict["daily"]["Fh"] = {"operator":"average","format":"0.0"} if "Fg" in ds.series.keys(): series_dict["daily"]["Fg"] = {"operator":"average","format":"0.0"} if "Fn" in ds.series.keys(): series_dict["daily"]["Fn"] = {"operator":"average","format":"0.0"} if "Fsd" in ds.series.keys(): series_dict["daily"]["Fsd"] = {"operator":"average","format":"0.0"} if "Fsu" in ds.series.keys(): series_dict["daily"]["Fsu"] = {"operator":"average","format":"0.0"} if "Fld" in ds.series.keys(): series_dict["daily"]["Fld"] = {"operator":"average","format":"0.0"} if "Flu" in ds.series.keys(): series_dict["daily"]["Flu"] = {"operator":"average","format":"0.0"} if "ps" in ds.series.keys(): series_dict["daily"]["ps"] = {"operator":"average","format":"0.00"} if "q" in ds.series.keys(): series_dict["daily"]["q"] = {"operator":"average","format":"0.0000"} if "RH" in ds.series.keys(): series_dict["daily"]["RH"] = {"operator":"average","format":"0"} if "Sws" in ds.series.keys(): series_dict["daily"]["Sws"] = {"operator":"average","format":"0.000"} if "Ta" in ds.series.keys(): series_dict["daily"]["Ta"] = {"operator":"average","format":"0.00"} if "Ts" in ds.series.keys(): series_dict["daily"]["Ts"] = {"operator":"average","format":"0.00"} if "ustar" in ds.series.keys(): series_dict["daily"]["ustar"] = {"operator":"average","format":"0.00"} if "Ws" in ds.series.keys(): series_dict["daily"]["Ws"] = {"operator":"average","format":"0.00"} series_dict["annual"] = series_dict["daily"] series_dict["monthly"] = series_dict["daily"] return series_dict
7355292aa0c3829ecfbecb1acc1af28d4152b2d7
693,059
def get_dotted(data, path): """access nested dict by dotted helper""" parts = path.split('.') if len(parts) == 1: return data.get(parts[0]) if (parts[0] in data) and isinstance(data[parts[0]], dict): return get_dotted(data[parts[0]], '.'.join(parts[1:])) return None
4b33c9ea1a87c2de75c5f3b3e7b375d5ac6049ba
693,060
def dscp_to_tos(dscp): """Convert dscp value to tos.""" tos = int(bin(dscp * 4), 2) return tos
02e644bbb04beb7f588e1cdf3f8661957325b588
693,061
def should_inject_custom(code_info): """Returns True if the code looks like suitable to inject our custom stuff into it. """ return code_info.command == 'custom'
518525112e2e5c7dfe5dcb85c9e2496b44822900
693,062
import re def build_match_and_apply_functions(pattern, replace): """Assemble regex patterns.""" def matches_rule(word): return re.search(pattern, word) def apply_rule(word): return re.sub(pattern, replace, word) return matches_rule, apply_rule
8ca7d1626f519e19e086eb60260b8d968229b4cf
693,063
import re def repl(lookup, string): """Replaces keywords within a string. Args: lookup: dict in which to look up keywords. string: string with embedded keywords. Ex. %key%. Return: String containing the replacements.""" return re.sub("%([\w_]+)%", #If it is not in the lookup, leave it alone lambda m: lookup[m.group(1)] if m.group(1) in lookup else "%{0}%".format(m.group(1)), string)
6d74a90e05f83240b5e02f3f076124d8e9061c2d
693,064
def sort_files(files): """Returns a sorted version of the given list of File's (or other structures that define an 'id' data member). The files will be sorted according to their id, and duplicate entries will be removed. Parameters ---------- files : list of :py:class:`bob.db.base.File` The list of files to be uniquified and sorted. Returns ------- sorted : list of :py:class:`bob.db.base.File` The sorted list of files, with duplicate `BioFile.id`\s being removed. """ # sort files using their sort function sorted_files = sorted(files) # remove duplicates return [f for i, f in enumerate(sorted_files) if not i or sorted_files[i - 1].id != f.id]
3ada6e925da246ff31ef63fdb863e9318ac17792
693,065
from typing import Union from pathlib import Path import re def _alter_spark_sql(sql: str, hadoop_local: Union[str, Path]) -> str: """Handle special paths in SQL code so that it can be used locally. :param sql: A SQL query. :param hadoop_local: The local path of Hadoop. :return: The altered SQL code which can be used locally. """ sql = re.sub(r"viewfs://[^/]+/", "/", sql) prefixes = ["/sys/", "/apps", "/user"] for prefix in prefixes: sql = sql.replace(prefix, f"{hadoop_local}{prefix}") return sql
83389ac58f0e976119274654f2078ed2e5c3ae47
693,066
from datetime import datetime def from_timestamp(timestamp: str) -> datetime: """Parses the raw timestamp given by the API into a :class:`datetime.datetime` object.""" return datetime.strptime(timestamp, "%Y%m%dT%H%M%S.000Z")
81d3c0a5297fa0053ae9a951147fac6f46907956
693,067
def format_action(a): """ Convert Action object into string representation for pompdp file e.g scan machine (0, 0) 0.0scan e.g. exploit service 1 on machine (1, 0) 1.0exp1 """ address = "a{0}{1}".format(a.target[0], a.target[1]) if a.is_scan(): return address + "scan" else: return "{0}exp{1}".format(address, a.service)
b701538922db069ba550bfa6abeeb2fed02ce30a
693,068
import re def get_win_valid_filename(filename: str, sub: str = '') -> str: """ 获取windows合法文件名 """ patn = r'[\\/:*?"<>|\r\n]+' if sub and re.match(patn, sub): sub = '' return re.sub(patn, sub, filename)
39b87113ed391e90d52a4e17d319fb6e7f2f62a2
693,069
def pos_next_to(pos_a, pos_b): """ Test if two positions are next to each other. The positions have to line up either horizontally or vertically, but positions that are diagonally adjacent are not counted. """ xa, ya = pos_a xb, yb = pos_b d = abs(xa - xb) + abs(ya - yb) return d == 1
eae09addf8a119c5ce9f401af8703068ea98c8c9
693,070
def wemo_entity_suffix_fixture(): """Fixture to select a specific entity for wemo_entity.""" return ""
525db1d0c74ce5bc09991cd11c31275069622687
693,071
import random import string def gen_dummy_object(class_, doc): """Create a dummy object based on the definitions in the API Doc.""" object_ = { "@type": class_ } if class_ in doc.parsed_classes: for prop in doc.parsed_classes[class_]["class"].supportedProperty: if "vocab:" in prop.prop: prop_class = prop.prop.replace("vocab:", "") object_[prop.title] = gen_dummy_object(prop_class, doc) else: object_[prop.title] = ''.join(random.choice( string.ascii_uppercase + string.digits) for _ in range(6)) return object_
86e981c7f62ccddda0463145c825d43dc1c3c476
693,072
def getStringsFromFile(list_file): """"Return list from file ignoring blanks and comments""" l = [] with open(list_file) as f: for line in f: line = line.strip() if len(line) > 0 and not line.startswith("#"): l.append(line) return l
456604f0a431e67a6c9806de421d9032893fbcaf
693,073
def remove_pruning(module, name='weight'): """Embed mask to the pruned weight and remove the pruning method""" module.get_pruning_parameters('method', name=name).remove() return module
57ac4f6a5fe63e560323bd4b47864472203dd936
693,074
import torch def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1, padding=0): """3x3 convolution with padding""" return torch.nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=padding, groups=groups, bias=False, dilation=dilation)
9df1275c4b671e8f97c9831323fd32a3916a6652
693,075
def remove_moves_occupied(state, moves): """ Prende una lista di mosse e rimuove le posizioni già occupate guardando la board (state.board) :param state: :param moves: :return: """ for index, value in enumerate(state.board): if value != 'O': if index in moves: moves.remove(index) return moves
48ab5fadf06152c99d0c4d05eb6ef615abf55e28
693,076
def scale01(arr): """ Linearly scale the values of an array in the range [0,1] :param arr: input ndarray :return: scaled ndarray """ return arr / arr.max()
335f6fdb96fc47facab63827322816382280bfbe
693,077
def get_value(): """ Function that just returns a number. :return: 4 """ return 4
8dd3282bd48551abb1e38420d7eceffcb5a0f059
693,078
def find(haystack, needle): """ >>> find("ll", "hello") -1 >>> find("", "") 0 >>> find("hello", "ll") 2 >>> find("aaaaabba", "bba") 5 >>> find("bbaaaaaa", "bba") 0 >>> find("aaaaa", "bba") -1 """ m = len(haystack) n = len(needle) if m < n: return -1 if m == n == 0: return 0 for i in range(m-n+1): j = 0 done = False while not done and j < n: if needle[j] != haystack[i+j]: done = True j += 1 if not done: return i return -1
a3ac03b1cd80368112f47f0333e4513641c0c820
693,079
import os def _get_lines_from_file(filepath): """ Reads dependency:tree command output file """ if os.path.isfile(filepath) is not True: return None rstripped_lines = [] with open(filepath, 'r') as f: lines = f.readlines() for line in lines: rstripped_lines.append(line.rstrip()) return rstripped_lines
b1e3acd13e6b7a6a9f0552e27af3e3216847d740
693,080
def _args_to_str_func(handler, v, scope, context, path): """Used to display extra information when recursion limit is reached. This is the message-creating function used with an _EvaluationHandler, so it takes the same arguments as that. """ return "Limit was reached with the following path - {0}".format(path)
db386853e1cf419d63ec1670f6e077c1d4f90094
693,081
import base64 def _encode_key_id_from_hashed(hashed: bytes) -> str: """Encode the 30 bytes hash of the public key data. First encode the hash with base32, then divide into 12 groups like so: ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP :param hashed: :return: """ s = base64.b32encode(hashed).decode().rstrip("=") block_size = 4 key_id = ":".join((s[i : i + block_size] for i in range(0, len(s), block_size))) remain = len(s) % block_size if remain: key_id += s[-remain:] return key_id
532a34c56311a3fc5a43f45e4b9df8d65b0bae48
693,082
import argparse def get_args(): """ Get arguments from command line with argparse. """ parser = argparse.ArgumentParser( prog='aligned_bam_to_cpg_scores.py', description="""Calculate CpG positions and scores from an aligned bam file. Outputs raw and coverage-filtered results in bed and bigwig format, including haplotype-specific results (when available).""") parser.add_argument("-b", "--bam", required=True, metavar="input.bam", help="The aligned BAM file.") parser.add_argument("-f", "--fasta", required=True, metavar="ref.fasta", help="The reference fasta file.") parser.add_argument("-o", "--output_label", required=True, metavar="label", help="Label for output files, which results in [label].bed/bw.") parser.add_argument("-p", "--pileup_mode", required=False, choices=["model", "count"], default="model", help="Use a model-based approach to score modifications across sites (model) " "or a simple count-based approach (count). [default = %(default)s]") parser.add_argument("-d", "--model_dir", required=False, default=None, metavar="/path/to/model/dir", help="Full path to the directory containing the model (*.pb files) to load. [default = None]") parser.add_argument("-m", "--modsites", required=False, choices=["denovo", "reference"], default="denovo", help="Only output CG sites with a modification probability > 0 " "(denovo), or output all CG sites based on the " "supplied reference fasta (reference). [default = %(default)s]") parser.add_argument("-c", "--min_coverage", required=False, default=4, type=int, metavar="int", help="Minimum coverage required for filtered outputs. [default: %(default)d]") parser.add_argument("-q", "--min_mapq", required=False, default=0, type=int, metavar="int", help="Ignore alignments with MAPQ < N. [default: %(default)d]") parser.add_argument("-a", "--hap_tag", required=False, default="HP", metavar="TAG", help="The SAM tag containing haplotype information. [default: %(default)s]") parser.add_argument("-s", "--chunksize", required=False, default=500000, type=int, metavar="int", help="Break reference regions into chunks " "of this size for parallel processing. [default = %(default)d]") parser.add_argument("-t", "--threads", required=False, default=1, type=int, metavar="int", help="Number of threads for parallel processing. [default = %(default)d]") return parser.parse_args()
2be055082af4ad3ff6cd9b37d31bdb87650943ef
693,083
from datetime import datetime def return_published_date(entry): """ブログ記事の公開日を返す ドラフトの場合も返される仕様だった """ publish_date_str = entry.find( "{http://www.w3.org/2005/Atom}published" ).text return datetime.fromisoformat(publish_date_str)
08ef7ab2d7c2d63cb20c5e6bc29504fd8050705b
693,084
def to_plotly_rgb(r, g, b): """Convert seaborn-style colour tuple to plotly RGB string. Args: r (float): between 0 to 1 g (float): between 0 to 1 b (float): between 0 to 1 Returns: a string for plotly """ return f"rgb({r * 255:.0f}, {g * 255:.0f}, {b * 255:.0f})"
a08723dda4be60609bc60498fb1879b876eea71f
693,085
def manage_keywords(d): """ split and iterate over keywords :param d: dictionary that contains the keywords extracted from the configuration file :return: a dictionary with mutually combined keywords """ keylist = d.keys() for key in keylist: if " " in key: kv = key.split(" ") for k in kv: #add each part of a keyword to the dict d[k] = d[key] / 2. #add the joint parts of the keyword to the dict d["".join(kv)] = d[key] return d
f74309dc9d54b374dc058be190bb84b386962dce
693,086
import pickle def load_annotation_file(filename): """ Load the annotation file. """ return pickle.load(open(filename, 'rb'))
d418eb6ac5f2842d7cef505a48e553aeed366796
693,088
def echo(): """test double for asserting on the lines that would get send to click.echo""" class _echo: lines = [] def __call__(self, line): self.lines.append(line) return _echo()
c514422a3cb7b7c9d034070c8739e4698223b79f
693,089
import numpy def scatpratt(x,y,errx,erry,a,b): """ This is an alternative way of computing the "raw" scatter about the best-fit linear relation in the presence of measurement errors, proposed by Pratt et al. 2009, A&A, 498, 361. In the words of Cavagnolo et al. (2010): *We have quantified the total scatter about the best-fit relation using a weighted estimate of the orthogonal distances to the best-fit line (see Pratt et al. 2009).* Usage: .. function:: sd=scatpratt(x,y,errx,erry,a,b) :param x,y: X and Y data arrays :param errx, erry: standard deviations in X and Y :param a,b: slope and intercept of best-fit linear relation :rtype: float sd with the scatter about the best-fit. v1 Mar 20 2012 """ N=numpy.size(x) # Equation 4 from Pratt et al. 2009 sdsq=erry**2+a**2*errx**2 wden=1./N*numpy.sum(1./sdsq) # Denominator of w w=1./sdsq/wden # Equation 3 from Pratt et al. 2009 sdrawsq=1./(N-2.)*numpy.sum(w*(y-a*x-b)**2) sdraw=numpy.sqrt(sdrawsq) return sdraw
f603e303bb4f044445c47ad365fe63a3c700c9b6
693,090
import os import re def find_amiga_os_39_iso_file(os39_dir): """Find Amiga OS 3.9 iso file""" # return none, if os39 dir doesn't exist if not os.path.exists(os39_dir): return None # get amiga os 3.9 iso files from os39 directory amiga_os_39_iso_files = [os.path.join(os39_dir, _f) for _f in os.listdir(os39_dir) \ if os.path.isfile(os.path.join(os39_dir, _f)) and re.search(r'amigaos3\.9\.iso$', _f)] # return none, if amiga os 3.9 iso files exist doesn't exist if len(amiga_os_39_iso_files) == 0: return None return amiga_os_39_iso_files[0]
8b1ed1f73072c60efd6b6de4f907740ec725a64f
693,091
import torch def linspace(start: torch.Tensor, stop: torch.Tensor, num_samples: int) -> torch.Tensor: """Generalization of linspace to arbitrary tensors. Args: start (torch.Tensor): Minimum 1D tensor. Same length as stop. stop (torch.Tensor): Maximum 1D tensor. Same length as start. num_samples (int): The number of samples to take from the linear range. Returns: torch.Tensor: (D, num_samples) tensor of linearly interpolated samples. """ diff = stop - start samples = torch.linspace(0, 1, num_samples) return start.unsqueeze(-1) + samples.unsqueeze(0) * diff.unsqueeze(-1)
44155176573f276937b10292cc11f45de1d0d277
693,092
def contains_all_value(page): """ Validate if both title and text are set Returns -------- bool True if all values are set False if not all values are set """ if len(page.getElementsByTagName('title')) == 0: return False if len(page.getElementsByTagName('text')) == 0: return False return True
d2098f9db6cf98cdaf29e0c57438814c8339b095
693,093
import math def calculate_trigonometric_function(radian_x: float) -> float: """This function refer to Taylor's theorm. A remainder for cos or sin at radian_x follows Cauchy's remainder. The function has 2 steps. Step1 get a degree of Taylor polynomial function at radian_x. Step 2 calculates sin and cos at radian_x and return two values """ # Step1: get a degree of Taylor polynomial function at radian_x degree = 0 error = 5 * 1e-6 upper_limit = radian_x ** degree / math.factorial(degree) while upper_limit >= error: # Upper limit of Cauchy's remainder for sin or cos at radian_x degree += 1 upper_limit = radian_x ** degree / math.factorial(degree) # Step2: Calculate sin, cos at radian_x sin = 0 cos = 0 for i in range(degree): sin += (-1) ** i * radian_x ** ((2 * i) + 1) / math.factorial(2 * i + 1) cos += (-1) ** i * radian_x ** (2 * i) / math.factorial(2 * i) return sin + cos
0fb5e85cf218613671a1246523dab7883a6d0d0e
693,094
import os def read_version(): """Read the library version""" path = os.path.join( os.path.abspath(os.path.dirname(__file__)), 'code_advent', '_version.py' ) with open(path) as f: exec(f.read()) return locals()['__version__']
064d176daf91f32280104815ddd4dab9666c3863
693,095
import torch def sparsity_ch(tensor): """Channel-wise sparsity for 4D tensors""" if tensor.dim() != 4: return 0 num_filters = tensor.size(0) num_kernels_per_filter = tensor.size(1) # First, reshape the weights tensor such that each channel (kernel) in the original # tensor, is now a row in the 2D tensor. view_2d = tensor.view(-1, tensor.size(2) * tensor.size(3)) # Next, compute the sums of each kernel kernel_sums = view_2d.abs().sum(dim=1) # Now group by channels k_sums_mat = kernel_sums.view(num_filters, num_kernels_per_filter).t() nonzero_channels = len(torch.nonzero(k_sums_mat.abs().sum(dim=1))) return 1 - nonzero_channels/num_kernels_per_filter
59094a3a2b6d80f5393b56f5eaaf70d3d261ab79
693,096
def getPersonURL(person): """ Return the address the view for this Person is available at. """ return person.organizer.linkToPerson(person)
a7836e80ba92c16011742a49b5d8ce1cc91d8996
693,097
import re def parse_paren_list(row): """Parses the nested list of attributes at the start of a LIST response""" # eat starting paren assert(row[0] == '(') row = row[1:] result = [] # NOTE: RFC3501 doesn't fully define the format of name attributes name_attrib_re = re.compile("^\s*(\\\\[a-zA-Z0-9_]+)\s*") # eat name attributes until ending paren while row[0] != ')': # recurse if row[0] == '(': paren_list, row = parse_paren_list(row) result.append(paren_list) # consume name attribute else: match = name_attrib_re.search(row) assert(match is not None) name_attrib = row[match.start():match.end()] row = row[match.end():] name_attrib = name_attrib.strip() result.append(name_attrib) # eat ending paren assert(')' == row[0]) row = row[1:] # done! return result, row
c3f264a1bdc4f5e1df5c03cb6469ff6c920832da
693,098
import numpy def absdeg(deg): """Change from signed degrees to 0-180 or 0-360 ranges deg: ndarray Movement data in pitch, roll, yaw (degrees) Returns ------- deg_abs: ndarray Movement translated from -180:180/-90:90 degrees to 0:360/0:180 degrees Example ------- deg = numpy.array([-170, -120, 0, 90]) absdeg(deg) # returns array([190, 240, 0, 90]) """ d = numpy.copy(deg) if numpy.max(numpy.abs(deg)) > 90.0: d[deg < 0] = 360 + deg[deg < 0] else: d[deg < 0] = 180 + deg[deg < 0] return d
b0c51509c427596db88ad0af465db098075ef88e
693,099
def calc_sse(mod, meas): """ """ return ((mod-meas)**2).sum()
2563eed56d6b31cbba778960bc4682c3dd53d47c
693,100
def minus(*, alpha=None, omega): """Define monadic symmetric numbers and dyadic subtraction. Monadic case: - 1 2 ¯3 4J1 ¯1 ¯2 3 ¯4J¯1 Dyadic case: 1 - 3J0.5 ¯2J¯0.5 """ if alpha is None: alpha = 0 return alpha - omega
3e614ce62cfacded565a09865568cac8bfaa396f
693,101
def get_syslog_facility(facility): """ get_syslog_facility() -- Get human-readable syslog facility name. Args (required): facility (int) - Facility number. Returns: Name of facility upon success. "UNKNOWN" on failure. """ facilities = ( (0, "KERNEL"), (1, "USER"), (2, "MAIL"), (3, "DAEMON"), (4, "AUTH"), (5, "SYSLOG"), (6, "LPR"), (7, "NEWS"), (8, "UUCP"), (9, "TIME"), (10, "AUTH"), (11, "FTP"), (12, "NTP"), (13, "AUDIT"), (14, "ALERT"), (15, "CLOCK"), (16, "LOCAL0"), (17, "LOCAL1"), (18, "LOCAL2"), (19, "LOCAL3"), (20, "LOCAL4"), (21, "LOCAL5"), (22, "LOCAL6"), (23, "LOCAL7"), ) try: return facilities[facility][1] except (IndexError, TypeError): return "UNKNOWN"
1508eba6b68d7e8c499effba6b678e29ded9f9ae
693,102
import re def _strip_product_code(api_version): """Strips the product code from the api version string.""" match = re.match(r"^([A-Z]+-)?([0-9]+)((\.[0-9]+)*)", api_version) if match is None: raise ValueError("Invalid build number: " + api_version) return match.group(2) + match.group(3)
3d978e7d280b4f88dc3e8daa2b256f840e8c4404
693,103
import os import json def load_multiple_pdf_ignore_list(): """ When a publication/revision has more than one pdf, use this result to get the pdfs that are duplicates, or they can be ignored. The pdfs that aren't main articles, and neither are in this list can be considered valious supplemental data. The dict has been manually populated. """ this_dir = os.path.dirname(os.path.realpath(__file__)) json_filename = "multiple_pdfs_ignore_list.json" file = os.path.join(this_dir, json_filename) with open(file, 'r') as fp: ignore_list = json.load(fp=fp) return ignore_list
64a26ee9e950137ffa639e653a8e04393b32ed08
693,104
def eurosat_norm(b, index): """ :param b: band to be normalized (2D array) :param index: index of the given band (int between 0 and 11) :return: normalized band """ mean_EuroSAT = [0.44929576, 0.4386203, 0.45689246, 0.45665017, 0.47687784, 0.44870496, 0.44587377, 0.44572416, 0.4612574, 0.3974199, 0.47645673, 0.45139566] std_EuroSAT = [0.2883096, 0.29738334, 0.29341888, 0.3096154, 0.29744068, 0.28400135, 0.2871275, 0.28741345, 0.27953532, 0.22587752, 0.302901, 0.28648832] return (b - mean_EuroSAT[index]) / std_EuroSAT[index]
245b4fcea80007226fb6d8784da107a7e8ab72db
693,105
def _define_binary_experiment_parameters(model_search_cv): """Define th binary experiment parameters.""" scoring = model_search_cv.scoring if isinstance(scoring, list): scoring_cols = ['mean_test_%s' % scorer for scorer in scoring] else: scoring_cols = ['mean_test_score'] group_keys = ['Dataset', 'Oversampler', 'Classifier', 'params'] estimator_type = model_search_cv.estimator._estimator_type return scoring, scoring_cols, group_keys, estimator_type
1d583fad1b6882841c6ceb7275138794662e0372
693,106
def find_by_key(target: str, data: dict) -> str: """ Returns the value of the target key from a nested Python dictionary. """ for key, value in data.items(): if isinstance(value, dict): return find_by_key(target, value) elif key == target: return value return ""
ce37416b4e5e36ccbe2a89938a74ada0a951d55e
693,107
from typing import List from typing import Any def reverse_list(L: List[Any], N: int) -> List[Any]: """Cuts the list after the N-th element and reverses its order. Parameters ---------- L : list List to be reversed. N : int Index at which the list will be cut if it is smaller than the list length. Returns ------- list Reversed list. """ if N < len(L): L = L[:N] reversed_list = [L[len(L) - 1 - i] for i, x in enumerate(L)] return reversed_list
591f9e9cc5966a6a30d9ca0c7240575cfb488d5c
693,108
def add(path: str, content: str, encoding: str = "utf-8") -> int: """ add(path: str, content: str, encoding = "utf-8") -> int ---- Add content into file. Return amount of written symbols. If file doesn't exist, create it. """ with open(path, "a", encoding = encoding) as file: size = file.write(content) return size
c225d5ec2c224250514ac3fc4fe358e70627eea5
693,109
def binary_to_int(x_bin: str) -> int: """ Inverse of `int_to_binary`. :Example: >>> binary_to_int('1010') 10 """ return int(x_bin,2)
f07c7cbb009d6a13e684801c217edd5cdaf98265
693,110
def convert_list_for_sql(my_list): """ Convert a python list to a SQL list. The function is primarly used when trying to format SQL queries by passing an argument. Arguments: my_list: list of elements to be used in a SQL query Example: 1. convert_list_for_sql([1, 2, 3]) returns '1, 2, 3' 2. convert_list_for_sql(['Simone', 'Dan']) returns ''Simone', 'Dan'' """ final_list = [] for item in my_list: if isinstance(item, str): _item = '\'{}\''.format(item) else: _item = item final_list.append(_item) return ", ".join([str(item) for item in final_list])
a2c3d5f43b5a0ba6f52b7c0a086b5e68c74ba13f
693,112
def _runtime_maybe(ctx, variants): """Check predicates at runtime and return matched value or None.""" for value, predicate in variants: if callable(predicate): if predicate(ctx): return value elif predicate: return value
461480e21943d3c9cbd4b8afc45fc794fdb12850
693,113
def get_rows_greater_than_avg(df, column): """ Return all the rows(with all columns) where the value in a certain 'column' is greater than the average value of that column. row where row.column > mean(data.column) Args: df: A pandas DataFrame object representing the data. column: Name of the column to fill Returns: df: Pandas DataFrame object. """ mean = df[column].mean() df=df[df[column] > mean] return df
02e414411c2e54cea1258af92436318d3a27500e
693,114
def read_line_in_str(line, number_of_loci): """ read str data in each line and check the format and data type (float); return information and STR-data separately. """ col = line.strip().split("\t") # check the column number if len(col) != (number_of_loci + 6): print("\tTermination!Can not read the file because it has wrong column number at some line.") return # check if the str is numeric info_part = col[:6] str_part = [] for i in col[6:]: try: str_part.append(float(i)) except ValueError: print("\tTermination!There is a non-numeric value in the STR data.") return return info_part, str_part
0ddfb60e57c034318e676bb1faa30ce724c9275e
693,116
def read_description(): """Read README.md and CHANGELOG.md.""" try: with open("README.md") as r: description = "\n" description += r.read() with open("CHANGELOG.md") as c: description += "\n" description += c.read() return description except Exception: return "Some useful script for Orangepi/Raspberrypi boards"
877ef1be944827ca7d45a7b67e6edbe44d4c6dfb
693,117
def get_attrs(obj, config_attrs): """ Given an object obtains the attributes different to None. :param obj: object containing the attributes. :param config_attrs: a list of all the configurable attributes within the object. :return attr_data_dict: A dictionary containing all the attributes of the given object that have a value different to None. """ attr_data_dict = {} for attr_name in config_attrs: attr_data_dict[attr_name] = getattr(obj, attr_name) return attr_data_dict
18348e05d799406961169dcb195531b25fe03946
693,119
def asm_label(address): """ Return a local label name for asm at <address>. """ return '.asm_%x' % address
81a9f6a722b22bb0ccaaec9909e0e840baa7ab54
693,120
def get_danmaku(danmaku_queue): """Get a danmaku from a danamku queue. :param danmaku_queue: the queue to recieve danmaku. """ return danmaku_queue.dequeue()
f730d57ce2649174801a2365b5ea16001744ce41
693,121
def recipRank(cls, ranking): """ This function takes a class and returns its reciprocal rank score in the sorted association list of scores [(cls1,score), ..., (clsN,score)] 'ranking' (sorted in descending order by score). Note that the reciprocal rank for classes not in the ranked list of scores is assumed to be 0.0. @param cls: [the class in question.] @param ranking: [the ranked list of scores [(cls1,score), ..., (clsN,score).]] @return: [the reciprocal rank of 'cls'.] >>> ranking = [('a',1.5), ('b',0.45), ('c',0.22), ('d',0.1)] >>> '%0.2f' % recipRank('c',ranking) '0.33' >>> '%0.1f' % recipRank('a',ranking) '1.0' >>> '%0.1f' % recipRank('b',ranking) '0.5' """ rr = 0.0 rank = 0 for i in ranking: rank += 1 if cls == i[0]: rr = 1./rank break return rr
1fe59a39b2076d7a3780f10601d07d3f815d6cae
693,122
from pathlib import Path def is_python(path: Path) -> bool: """Returns True if |path| ends in .py.""" return path.suffix == '.py'
dbedd33fad8ff4c1da84aef24b0cb7f4ef24f0cd
693,123
def get_integrated_intensity(ave_azav, peak_bin, delta_bin=3): """ Get the average integrated intensity. Sum the bin values from the peak bin to delta bin on each side. Parameters ---------- ave_azav: ndarray radially binned average azimuthal intensity from used curves peak_bin: int peak radial bin delta_bin: int number of bins on each side of peak to include in sum Returns ------- integrated_intensity: float the calculated integrated intensity """ low = peak_bin - delta_bin high = peak_bin + delta_bin integrated_intensity = ave_azav[low: high].sum(axis=0) return integrated_intensity
2367ef173591d3b887406d4f18ac3b0a788b0baf
693,124
def remove_restricted(list): """ Remove restricted (system) Amazon tags from list of tags """ clean_list = [] try: for dict in list: if 'aws' not in dict['Key']: clean_list.append(dict) except Exception: return -1 return clean_list
7168747c3a3dacc54a6ee70e64569708068d0091
693,126
def check_mirc_exploit(proto) -> bool: """Verifies that the nickname portions of the protocol does not contain any binary data. Vars: :proto: The text before the second : hopefully a nickname. :returns: True if there is binary data and False if it is clean. """ for let in str(proto): if ord(let) == 58: return False if ord(let) in (1, 3, 31, 2, 22, 15, 31, 42): continue if ord(let) < 29 or ord(let) > 500: return True return False
065e646d4566190a9a3961be9cc4294976b8472e
693,127
def time_major(data): """ Return the data in time-major form (Most often used to turn a single batch into this form) *(Assumes the traces are already padded)* @param data is an array of sequences @return batch contains the batch in **time-major** form [max_time, batch_size] padded with the PAD symbol """ # Swap axis return data.swapaxes(0, 1)
dcd47e3eef8fd16f74494b816a6418db3536d4af
693,128
def program_representation(functionInfos): """Find program representation.""" keys = ['ft01_BBInMethod', 'ft02_BBWithOneSuccessor', 'ft03_BBWithTwoSuccessors', 'ft04_BBWithMoreThanTwoSuccessors', 'ft05_BBWithOnePredecessor', 'ft06_BBWithTwoPredecessors', 'ft07_BBWithMoreThanTwoPredecessors', 'ft08_BBWithOnePredOneSuc', 'ft09_BBWithOnePredTwoSuc', 'ft10_BBWithTwoPredOneSuc', 'ft11_BBWithTwoPredTwoSuc', 'ft12_BBWithMoreTwoPredMoreTwoSuc', 'ft13_BBWithInstructionsLessThan15', 'ft14_BBWithInstructionsIn[15-500]', 'ft15_BBWithInstructionsGreaterThan500', 'ft16_EdgesInCFG', 'ft17_CriticalEdgesInCFG', 'ft18_AbnormalEdgesInCFG', 'ft19_DirectCalls', 'ft20_ConditionalBranch', 'ft21_AssignmentInstructions', 'ft22_ConditionalBranch', 'ft23_BinaryIntOperations', 'ft24_BinaryFloatPTROperations', 'ft25_Instructions', 'ft26_AverageInstruction', 'ft27_AveragePhiNodes', 'ft28_AverageArgsPhiNodes', 'ft29_BBWithoutPhiNodes', 'ft30_BBWithPHINodesIn[0-3]', 'ft31_BBWithMoreThan3PHINodes', 'ft32_BBWithArgsPHINodesGreaterThan5', 'ft33_BBWithArgsPHINodesGreaterIn[1-5]', 'ft34_SwitchInstructions', 'ft35_UnaryOperations', 'ft36_InstructionThatDoPTRArithmetic', 'ft37_IndirectRefs', 'ft38_AdressVarIsTaken', 'ft39_AddressFunctionIsTaken', 'ft40_IndirectCalls', 'ft41_AssignmentInstructionsWithLeftOperandIntegerConstant', 'ft42_BinaryOperationsWithOneOperandIntegerConstant', 'ft43_CallsWithPointersArgument', 'ft44_CallsWithArgsGreaterThan4', 'ft45_CallsThatReturnPTR', 'ft46_CallsThatReturnInt', 'ft47_ConstantZero', 'ft48_32-bitIntegerConstants', 'ft49_ConstantOne', 'ft50_64-bitIntegerConstants', 'ft51_ReferencesLocalVariables', 'ft52_DefUseVariables', 'ft53_LocalVariablesReferred', 'ft54_ExternVariablesReferred', 'ft55_LocalVariablesPointers', 'ft56_VariablesPointers'] values = [] for data in functionInfos: values.append([data.features[key] for key in keys]) return [sum(x) for x in zip(*values)]
702cbc73d057f5c3554d8bbf037f067631d8bf01
693,129
import re def parse_params(path): """Parse a path fragment and convert to a list of tuples. Slashes separate alternating keys and values. For example /a/3/b/5 -> [ ['a', '3'], ['b', '5'] ].""" parts = re.split('/',path) keys = parts[:-1:2] values= parts[1::2] return zip(keys,values)
c79bc783374f314c00c559fb61879e7671eb8f5a
693,130
def circ_supply(height: int, nano: bool = False) -> int: """ Circulating supply at given height, in ERG (or nanoERG). """ # Emission settings initial_rate = 75 fixed_rate_blocks = 525600 - 1 epoch_length = 64800 step = 3 # At current height completed_epochs = max(0, height - fixed_rate_blocks) // epoch_length current_epoch = completed_epochs + min(1, completed_epochs) blocks_in_current_epoch = max(0, height - fixed_rate_blocks) % epoch_length current_rate = max(0, initial_rate - current_epoch * step) # Components fixed_period_cs = min(fixed_rate_blocks, height) * initial_rate completed_epochs_cs = sum( [ epoch_length * max(0, initial_rate - step * (i + 1)) for i in range(completed_epochs) ] ) current_epoch_cs = blocks_in_current_epoch * current_rate # Circulating supply cs = fixed_period_cs + completed_epochs_cs + current_epoch_cs if nano: cs *= 10**9 return cs
3a0c1889ab5a0869ec4033d263489bbb08dd0864
693,131
import argparse def parse_args(): """ parse input args """ parser = argparse.ArgumentParser() # for local excel analysis parser.add_argument( "--log_path", type=str, default="./log", help="benchmark log path") parser.add_argument( "--device_name", type=str, default=None, help="device name, e.g. gpu_t4, gpu_p4") # for benchmark platform parser.add_argument( "--post_url", type=str, default=None, help="post requests url, None will not post to benchmark platform") parser.add_argument( "--output_json_file", type=str, default="./post_data.json", help="post requests json, cannot be none") # basic arguments for framework parser.add_argument( "--frame_name", type=str, default="paddle", help="framework name") parser.add_argument("--api", type=str, default="cpp", help="test api") parser.add_argument( "--framework_version", type=str, default="0.0.0", help="framework version") parser.add_argument( "--model_version", type=str, default="0.0.0", help="model version") parser.add_argument( "--cuda_version", type=str, default="0.0.0", help="cuda version") parser.add_argument( "--cudnn_version", type=str, default="0.0", help="cudnn version") parser.add_argument( "--trt_version", type=str, default="0.0.0", help="TensorRT version") return parser.parse_args()
43333df986830dba63d252ebfc503304b80c43ae
693,132
import torch def pad_tensor(tensor, seq_len): """Pad tensor with last element along 0 dimension.""" sz = list(tensor.size()) sz[0] = seq_len - tensor.size()[0] % seq_len tail = tensor[-1].clone().expand(sz).to(tensor.device) tensor = torch.cat((tensor, tail)) return tensor
213593e4152dde391132b247a8f729fcbb284fec
693,133
def badtoken(t): """check if t is punctuation, space, or newline char""" return t.is_punct or t.text in [' ', '\n']
92432504e6d0f0fc0720747c7bf2e97b1fc59c90
693,135
def _get_labeled_order(logger): """Get the order in which papers were labeled.""" label_order = [] n_initial = 0 n_queries = logger.n_queries() for query_i in range(n_queries): try: label_methods = logger.get("label_methods", query_i) except KeyError: continue label_idx = logger.get("label_idx", query_i) for i in range(len(label_idx)): if label_methods[i] == "initial": n_initial += 1 label_order.extend(label_idx) return label_order, n_initial
cac8124a486db63db66f2592d6675f47b35d5471
693,136
def segment_string_into_two(input_string, dictionary): """ Simple solution Assumes that a string can only be broken into 2 words """ n = len(input_string) for i in range(1, n): prefix = input_string[:i] if prefix in dictionary: suffix = input_string[i:] if suffix in dictionary: return prefix + " " + suffix
fe7b06a3305351739de8b6c28536293579d334a8
693,138
def _GetLoadBalancingScheme(args, messages, is_psc): """Get load balancing scheme.""" if not args.load_balancing_scheme: # The default is EXTERNAL for non-PSC forwarding rules. return None if is_psc else messages.ForwardingRule.LoadBalancingSchemeValueValuesEnum.EXTERNAL if args.load_balancing_scheme == 'INTERNAL': return messages.ForwardingRule.LoadBalancingSchemeValueValuesEnum.INTERNAL elif args.load_balancing_scheme == 'EXTERNAL': return messages.ForwardingRule.LoadBalancingSchemeValueValuesEnum.EXTERNAL elif args.load_balancing_scheme == 'EXTERNAL_MANAGED': return messages.ForwardingRule.LoadBalancingSchemeValueValuesEnum.EXTERNAL_MANAGED elif args.load_balancing_scheme == 'INTERNAL_SELF_MANAGED': return (messages.ForwardingRule.LoadBalancingSchemeValueValuesEnum .INTERNAL_SELF_MANAGED) elif args.load_balancing_scheme == 'INTERNAL_MANAGED': return (messages.ForwardingRule.LoadBalancingSchemeValueValuesEnum .INTERNAL_MANAGED) return None
9704929ad28188e76bc066361af86c36bc3cede7
693,140
def _parse_search_results(json_result): """Search results are divided into 'statuses' and 'search_metadata'. The former contains the tweets themselves, and the latter contains the max_id to use to retrieve the next batch of tweets""" statuses = json_result.get('statuses') metadata = json_result.get('search_metadata') next_results = metadata.get('next_results') return statuses, next_results
63fb0a04297e65f0e2cdb723b0c03c8d46abfdaa
693,141
def contains_unusual_content(result: dict) -> bool: """ returns True if the response indicates the PDF contains unusual content (Launch, Sound, Movie, ResetForm, ImportData and JavaScript actions) by checking if ISO 19005.1 clause 6.6.1 is among the failure reasons. :param result: The parsed JSON response from POSTing a PDF to verapdf :return: True if the PDF contains unusual content, otherwise False """ assertions = result["testAssertions"] for assertion in assertions: status = assertion["status"] specification = assertion["ruleId"]["specification"] clause = assertion["ruleId"]["clause"] if status == "FAILED" and specification == "ISO_19005_1" and clause == "6.6.1": return True return False
98d5fcacaf0c69dbe3c17e037d6b78232bfea9da
693,142
def get_channels( public, stable, server, intranet, group, add_dependent_channels=False ): """Returns the relevant conda channels to consider if building project. The subset of channels to be returned depends on the visibility and stability of the package being built. Here are the rules: * public and stable: returns the public stable channel * public and not stable: returns the public beta channel * not public and stable: returns both public and private stable channels * not public and not stable: returns both public and private beta channels Public channels have priority over private channles, if turned. Args: public: Boolean indicating if we're supposed to include only public channels stable: Boolean indicating if we're supposed to include stable channels server: The base address of the server containing our conda channels intranet: Boolean indicating if we should add "private"/"public" prefixes on the conda paths group: The group of packages (gitlab namespace) the package we're compiling is part of. Values should match URL namespaces currently available on our internal webserver. Currently, only "bob" or "beat" will work. add_dependent_channels: If True, will add the conda-forge channel to the list Returns: a list of channels that need to be considered. """ if (not public) and (not intranet): raise RuntimeError( "You cannot request for private channels and set" " intranet=False (server=%s) - these are conflicting options" % server ) channels = [] channels_dict = {} # do not use '/public' urls for public channels prefix = "/software/" + group if stable: channels += [server + prefix + "/conda"] channels_dict["public/stable"] = channels[-1] else: channels += [server + prefix + "/conda/label/beta"] # allowed betas channels_dict["public/beta"] = channels[-1] if not public: prefix = "/private" if stable: # allowed private channels channels += [server + prefix + "/conda"] channels_dict["private/stable"] = channels[-1] else: channels += [server + prefix + "/conda/label/beta"] # allowed betas channels_dict["private/beta"] = channels[-1] upload_channel = channels_dict[ "{}/{}".format( "public" if public else "private", "stable" if stable else "beta" ) ] if add_dependent_channels: channels += ["conda-forge"] return channels, upload_channel
b3378686aa6bf549c71393e43ac22966f8228f50
693,143
import numpy def _jet_wrapped(siz): """ Provides a jet-like colormap array for sub-aperture processing Parameters ---------- siz : int the size of the colormap Returns ------- numpy.ndarray the `siz x 3` colormap array """ siz = int(siz) red_siz = max(1, int(siz/4)) # create trapezoidal stack trapezoid = numpy.hstack( (numpy.arange(1, red_siz+1, dtype=numpy.float64)/float(red_siz), numpy.ones((red_siz, ), dtype=numpy.float64), numpy.arange(red_siz, 0, -1, dtype=numpy.float64)/float(red_siz))) out = numpy.zeros((siz, 3), dtype=numpy.float64) # create red, green, blue indices green_inds = int(0.5*(siz - trapezoid.size)) + numpy.arange(trapezoid.size) red_inds = ((green_inds + red_siz) % siz) blue_inds = ((green_inds - red_siz) % siz) # populate our array out[red_inds, 0] = trapezoid out[green_inds, 1] = trapezoid out[blue_inds, 2] = trapezoid return out
64b3f334fb990157a669199515da848cc20ff084
693,145
from typing import List def divide_into_tweet(text: str) -> List[str]: """Takes a textstring and divides it unto a list of strings that are less than or equal to 276 characters long. Args: text (str): text to be divided into tweets Returns: List[str]: list of tweets less than 276 """ puncts = [".", ",", ";", "--"] tweets = [] while len(text) > 280: cut_where, cut_why = max((text.rfind(punc, 0, 276), punc) for punc in puncts) if cut_where <= 0: cut_where = text.rfind(" ", 0, 276) cut_why = " " cut_where += len(cut_why) tweets.append(text[:cut_where].rstrip()) text = text[cut_where:].lstrip() tweets.append(text) return tweets
72351d6a691eda1ac5244f7709f0a2ac9ab68881
693,146
def rescale_and_format(x: float, divider: float) -> str: """Текстовое представление данных. Умножает на множитель и форматирует с округлением до тысяч, разделением разрядов и выравниванием вправо.""" return f"{round(x / divider, -3):,.0f}".replace(",", " ").rjust(9)
20d2777f578e74110c54895f5e6c38a8c60411f6
693,147
def validate_text(text): """ Returns True if text exists and is more than white spaces, False otherwise.""" return bool(text) and not text.isspace()
838dbf793c918d76def644256d0d15f4ef1e62bb
693,148
import re def normalize_whitespace(text): """ Replace non-breaking spaces with regular spaces, collapse runs of whitespace, and strip whitespace from the ends of the string. """ s = text.replace(u"\xa0", u" ").replace(u"&nbsp;", " ").replace(r"\S+", " ") return re.sub(r"\s+", " ", s).strip()
c9c06c959be9455b39e5579d1bec4e3d4948e0c8
693,149
def flip_date_format(date: str) -> str: """Goes from YYYY/MM/DD to DD/MM/YYYY and vice-versa""" a, m, b = date.replace("-", "/").split("/") return f"{b}/{m}/{a}"
c63cbc0b89663b9969461474bf8e4cfc91303feb
693,150
import os def basename(path): """ Get basename. @param (str) path @return (str) """ return os.path.basename(path)
69fff2ed0deaacb4e262addd464ee8aafad7d422
693,152
def load_doc(filename): """ Function to load doc into memory. """ file = open(filename, mode='rt', encoding='utf-8') text = file.read() file.close() return text
08d0d72bbfab73708d9d9e3cd6344e667a27f4b7
693,153
import re def read_urls(filename): """Returns a list of the puzzle URLs from the given log file, extracting the hostname from the filename itself, sorting alphabetically in increasing order, and screening out duplicates. """ puzzle_urls = [] # create empty list for puzzle URLS to go into server_name = "https://" + filename.split("_")[1] # find server name from # filename with open(filename, "r") as puzzle_file: """ Read each line in the file and append the image URL to the list of puzzle URLs, each preceded by the server name from the filename to get an accurate list of URLs """ pattern = r"\S+puzzle+\S+" for line in puzzle_file: url_path_find = re.search(pattern, line) # find where if url_path_find: path = url_path_find.group() puzzle_urls.append(server_name + path) unique_urls = {} # create dict to determine unique urls for url in puzzle_urls: unique_urls[url] = url sorted_urls = sorted(unique_urls, key=lambda u: u[-8:]) return sorted_urls
541fe96c08e4a700d58d5400bf966599fb7dc55a
693,154
def clean_hotel_name(string): """ """ r = string.strip() return r
191980c83cc4837400b59fdfec78b99d9cd95943
693,155
import pathlib def name_from_path(path: str) -> str: """Generate a model name from the H5 path.""" name = pathlib.Path(path) return name.name[: -len(name.suffix)]
2cc77fb1d5694213ff5e28a70790e4e6540c9f56
693,156
def parse_color(key, string) -> str: """ parses the color html of form 'rgb(0,0,0)' Args: key: string that is a color string: associated string value in the html Returns: str """ assert key == "color" string = string.replace("rgb(", "").replace(")", "").split(", ") return string
9205af775b1624b06064d2905ffb7d62b117676a
693,157
import os def _read_names(path): """ Read line-separated time series names from ascii file Parameters ---------- path : str Key file path Returns ------- list Time series names Notes ----- Keys are stored on ASCII file as one key per line. The file is terminated by END. """ with open(os.path.join(path, path), 'r') as f: names = [l.strip() for l in f if not l.startswith(("**", "'")) and not l.upper().strip() == "END"] return names
bdf6f1d29427be32683061ab9d4187da72621e5c
693,158
def get_title(cube, nexp): """Get the plot title.""" if nexp == 1: model = cube.attributes['model_id'] experiment = cube.attributes['experiment_id'] physics = cube.attributes['physics_version'] run = cube.attributes['realization'] mip = 'r%si1p%s' %(run, physics) title = '%s, %s (%s)' %(model, experiment, mip) if experiment == 'historicalMisc': legloc = 3 else: legloc = 2 else: title = None legloc = 2 return title, legloc
b375077c8f8bb63d4f0e6c0aa890e68d8941c799
693,159
def check_reference_allele(reference_base, bpm_record_group): """ Check whether the given reference base (on the plus strand) is queried by any record in a group of BPMRecords Args: reference_base (string): The reference base bpm_record_group (iter(BPMRecord)): Iterable of BPMRecords Returns: bool: True if record group contains reference allele """ if any([reference_base in record.plus_strand_alleles for record in bpm_record_group]): return True return False
476c70e9e1ef3703b8b33b2c310808c6d713dbe2
693,160