content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def genFaces(Z):
"""should be numbered like so
Z[0] = corner[:]
Z[1] = corner + yDelta
Z[2] = corner + zDelta
Z[3] = Z[2] + yDelta
Z[4] = corner + xDelta
Z[5] = Z[4] + yDelta
Z[6] = Z[4] + zDelta
Z[7] = Z[6] + yDelta
"""
# list of sides' polygons of figure
faces = [[Z[0],Z[1],Z[3],Z[2]],
[Z[0],Z[1],Z[5],Z[4]],
[Z[0],Z[2],Z[6],Z[4]],
[Z[4],Z[5],Z[7],Z[6]],
[Z[2],Z[6],Z[7],Z[3]],
[Z[1],Z[5],Z[7],Z[3]]]
return faces | 2199af974197bed45eae7cc12820b17726448849 | 693,276 |
import torch
def collate_batch(batch):
""" Combines different samples in order to form a batch
Arguments:
batch (list): list of dictionaries
Returns:
dict where each sample has been concatenated by type
"""
result = {}
for key in batch[0].keys():
values = (elem[key] for elem in batch)
values = tuple(values)
result[key] = torch.cat(values)
return result | 1b9c180e2dfc21eb87d9466769b43670be3e7b8e | 693,277 |
def CorrectUpdateMask(ref, args, request):
"""Returns the update request with the corrected mask.
The API expects a request with an update mask of 'schema', whereas the inline
schema argument generates an update mask of 'schema.columns'. So if --schema
was specified, we have to correct the update mask.
Args:
ref: The entry resource reference.
args: The parsed args namespace.
request: The update entry request.
Returns:
Request with corrected update mask.
"""
del ref
if args.IsSpecified('schema'):
request.updateMask = request.updateMask.replace('schema.columns', 'schema')
return request | 5967c1bc844b9000732e1f9fda8dbce611faaa1d | 693,278 |
def binary_search_iterative(list_, p, r, element):
""" ricerca binaria versione iterativa
Parametri:
list_ (list): lista di numeri interi
p (int): indice di inzio dell'array
r(int): indice di fine dell'array
element (int): elemento da trovare
Valore di Ritorno:
int: indice dell'elemento all'interno dell'array, altrimenti se non è presente ritorna -1
"""
# Controllo che l'elemento non sia fuori dal range della lista
if element < list_[p] or element > list_[r]:
return -1
while p <= r:
q = (p + r) // 2 # trovo l'elemento centrale
if list_[q] == element:
return q
if list_[q] > element:
r = q - 1
else:
p = q + 1
return -1 | bca6572347671fd9974eefe03ba8ba3fc9d1c339 | 693,279 |
import os
def format_expected_path(ext, path, out_base):
"""Generate the location to use for the *expected* output file for a
given test `path` and output extension key `ext`.
The resulting path is located "next to" the test file, using its
basename with a different extension---for example `./foo/bar.t`
becomes `./foo/bar.ext`. If the test path is a directory, the file is
placed *inside* this directory, and `out_base` is used for the filename
instead of the test name (e.g., `./foo/bar.t/out_base.ext`).
"""
# When the test is a directory, place results there and use
# `out_base`. Otherwise, when the test is a file, put results
# *alongside* the file, in the same parent directory, and use the
# test filename to generate the output filename (ignoring
# `out_base`).
if os.path.isdir(path):
dirname = path
base = out_base
else:
dirname = os.path.dirname(path)
filename = os.path.basename(path)
base, _ = os.path.splitext(filename)
return os.path.join(dirname, '{}.{}'.format(base, ext)) | c15a3439ff40d1316e49c762bd55a7fc6b8caf4e | 693,280 |
def s(switch_num: int) -> str:
""" Returns the switch name for a given switch number """
return f's{switch_num}' | cfef24939ef690892a72e1f144a4f1e76b2be37b | 693,281 |
def groupSubjects(listings):
"""Group business types and listings by business grouping mongo id"""
grouped = {}
for l in listings:
if l['parent_subject_id']['$oid'] in grouped:
grouped[l['parent_subject_id']['$oid']].append(l)
else:
grouped[l['parent_subject_id']['$oid']] = [l]
return grouped | a71b00f60b186cc5d780bcfb9684acc6fa44aa9e | 693,282 |
def merge_sequences(list1, list2, **kwargs):
"""
Return a new list of model objects merging the lists ``list1`` and
``list2`` keeping the original ``list1`` order and discarding
duplicates.
"""
list1 = list1 or []
list2 = list2 or []
merged = []
existing = set()
for item in list1 + list2:
key = item.to_tuple(**kwargs)
if not key in existing:
merged.append(item)
existing.add(key)
return merged | 6e6e42a641a7d6c68f2c739f7e1f5a23e0ba90c5 | 693,283 |
def binary_search(arr, key, low, high):
"""
arr: array that contains keys
key: a key to be searched in array arr
low: low indice of array arr
high: high indice of array arr
"""
middle = (low+high) // 2
if low > high:
return -1 #key is not found
if arr[middle] == key:
return middle
if arr[middle] < key:
return binary_search(arr, key, low, middle-1)
else:
return binary_search(arr, key, middle+1, high) | 88f6e30ad5750436b98481a26a7ce045ee91bbf4 | 693,285 |
def dlv_strip(line):
"""Strips away the DLV title and stuff, if present."""
if line[:4] == "Cost" or line[:3] == "DLV":
return ""
if line[:12] == "Best model: ":
line = line[12:]
line = line.strip()
return line | cf0a3627b35ae5b540d6b73e716998ec73e6040e | 693,286 |
def is_anon_user(user):
"""
Determine if an user is anonymous or not.
"""
return user.username[0:10] == 'anon_user_' | 1261ed9b8f807d8ed195643f3d507128cac3189d | 693,288 |
import socket
import os
def _mksockerr(err):
"""
Construct a ``socket.error`` instance based on a specified socket
error number.
:param int err: A value from ``errno``.
:returns: The specified socket error.
:rtype: ``socket.error``
"""
return socket.error(err, os.strerror(err)) | 623b061347a7fc7955b8ec9742d2fe07926fdb3d | 693,289 |
def identity(x):
"""Transparent function, returning what's been input"""
return x | 8e443dfaaf1c0f0e9b07aaf8cf631a1dd5137b19 | 693,291 |
def _merge(left, right):
"""Merger helper."""
result = []
while left and right:
if left[0] <= right[0]:
result.append(left[0])
left = left[1:]
else:
result.append(right[0])
right = right[1:]
while left:
result.append(left[0])
left = left[1:]
while right:
result.append(right[0])
right = right[1:]
return result | c9e15559ff8d4644fadbaf3befd69e4392e132c7 | 693,292 |
def merge_dict(lhs, rhs, override=True):
"""Merge nested right dict into left nested dict recursively.
:param lhs: dict to be merged into.
:type lhs: dict
:param rhs: dict to merge from.
:type rhs: dict
:param override: the value in rhs overide the value in left if True.
:type override: boolean
"""
if not isinstance(lhs, dict) or not isinstance(rhs, dict):
if override:
return rhs
else:
return lhs
for key, value in rhs.items():
if key not in lhs:
lhs[key] = rhs[key]
else:
lhs[key] = merge_dict(lhs[key], value, override)
return lhs | c31815669d767965d7a19c55ee4331ab26011767 | 693,293 |
def conductance(g, c):
"""Evaluates the conductance of the clustering solution c on a given graph g."""
n = len(g.vs)
m = len(g.es)
ASSc = {}
AS = {}
for col in set(c):
ASSc[col] = 0
AS[col] = 0
for i in range(n):
current_clr = c[i]
for j in g.neighbors(i):
if c[i] == c[j]:
AS[current_clr] += 1/2
else:
ASSc[current_clr] += 1
AS[current_clr] += 1
phi_S = {}
for col in set(c):
if (min(AS[col], m-AS[col]+ASSc[col]) == 0):
phi_S[col] = 0
else:
phi_S[col] = ASSc[col]/min(AS[col], m-AS[col]+ASSc[col])
phi = 0
for col in set(c):
phi -= phi_S[col]
phi /= len(set(c))
phi += 1
intra_cluster_phi = min(phi_S.values())
inter_cluster_phi = 1 - max(phi_S.values())
return phi, intra_cluster_phi, inter_cluster_phi | 4994d00ac4e81417dafacabdf5fa2aa64571c960 | 693,294 |
def ajuste(w, x, d, y):
""" Define a taxa de aprendizagem e ajusta o valor do w. """
taxa_aprendiz = 0.01
return w + taxa_aprendiz * (d - y) * x | 0f1529a7fd42ea348d08dcb64f3e7c642b98b4a7 | 693,295 |
def 방_이름(방):
"""
똑같은 보안 문제가 있습니다. '방'은 안전한 값이라고 믿을 수 있나요?
globals에서 찾는 것보다 나은 방법은 무엇일까요?
"""
for key, value in globals().items():
if value == 방:
return key | 9b24428211f915a90a60d09df2169f5f4ee1a681 | 693,296 |
from typing import Union
def create(num: int, pos: int) -> Union[int, None]:
"""Creates a particle at `pos` if possible and returns the new state.
Parameters
----------
num : int or Spinstate
The number representing the binary state.
pos : int
The index of the state element.
Returns
-------
new : int or None
The newly created state. If it is not possible to create the state `None`
is returned.
Examples
--------
>>> new = create(0, pos=0) # binary: 0000
>>> binstr(new, width=4)
'0001'
>>> new = create(1, pos=0) # binary: 0001
>>> new is None
True
>>> new = create(1, pos=1) # binary: 0001
>>> binstr(new, width=4)
'0011'
"""
op = 1 << pos
if not op & num:
return num ^ op
return None | 86f3b2ca61cc02858c71b80c3145bbe2f0ecb19a | 693,297 |
def removeAtomFromMol(mol, idx):
"""
This function removes an atom from the pybel moleule object.
"""
# mol should contain the atom coordinates
atom = mol.OBMol.GetAtomById(idx)
# delete the atom
mol.OBMol.DeleteAtom(atom)
return mol | c5131992c49a24232ae29f09edc5dcf00979a031 | 693,299 |
def find_previous_context(request):
"""
Founded previous context and return its in string
:param request: request from "Create order price" Dialogflow intent
:return: String with previous context
"""
contexts = request.data["queryResult"]["outputContexts"]
possible_previous_contexts = {"creating_order_additional_notes": False, "creating_order_dates_destinations_plus": False}
for context in contexts:
for key, value in possible_previous_contexts.items():
if key in context["name"]:
possible_previous_contexts[key] = True
if possible_previous_contexts["creating_order_additional_notes"] == True:
searched_previous_context = "creating_order_additional_notes"
else:
searched_previous_context = "creating_order_dates_destinations_plus"
return searched_previous_context | cb2c222b66f304e71f71f78144a957aabe918cc0 | 693,300 |
def get_name( individual ):
""" Return the name for the individual in the passed data section. """
name = individual['name'][0]['value']
# the standard unknown code is not good for svg output
if '?' in name and '[' in name and ']' in name:
name = 'unknown'
return name.replace( '/', '' ).replace('"','"').replace("'","’") | b802dcef89d91a46467de867996b1febe5811bfc | 693,302 |
import math
def factorial(x):
"""Return x factorial as an integer"""
return math.factorial(x) | 03abe171c08e510f3e126f15b8ed9f51e50744fd | 693,303 |
def process_tex(lines):
"""
Remove unnecessary section titles from the LaTeX file.
"""
new_lines = []
for line in lines:
if (
line.startswith(r"\section{gyptis.")
or line.startswith(r"\subsection{gyptis.")
or line.startswith(r"\subsubsection{gyptis.")
or line.startswith(r"\paragraph{gyptis.")
or line.startswith(r"\subparagraph{gyptis.")
):
pass # skip!
else:
new_lines.append(line)
return new_lines | 7088ce5c49df4a333702825b19f2da9000b74220 | 693,304 |
import collections
def frequent_sleeper(sleep_tracker):
"""Return ID that sleeps the most * the minute they sleep most often."""
sleepiest_guard, most_sleeps = None, 0
for guard, sleeps in sleep_tracker.items():
num_sleeps = len(sleeps)
if num_sleeps > most_sleeps:
sleepiest_guard, most_sleeps = guard, num_sleeps
sleep_counter = collections.Counter(sleep_tracker[sleepiest_guard])
sleepiest_time = sleep_counter.most_common(1)[0][0]
return sleepiest_guard * sleepiest_time | f1675f80fb0efcc32e9c8d8fc1391ad013b9c3e4 | 693,305 |
def prime_check(n):
"""Checks if natural number n is prime.
Args:
n: integer value > 0.
Returns:
A boolean, True if n prime and False otherwise.
"""
assert type(n) is int, "Non int passed"
assert n > 0, "No negative values allowed, or zero"
if n == 1:
return False
i = 2
while i*i < n + 1:
if n != i and n % i == 0:
return False
i += 1
return True | cb51cd3d4c82d0a6fa5500d2a8a655ae6760fd69 | 693,306 |
from bs4 import BeautifulSoup
def get_text(text: str) -> str:
"""
Возвращаем текст страницы без html тегов
:param text:
:return:
"""
try:
result = BeautifulSoup(text, "lxml").get_text()
assert result, "Don't got text from html"
except AssertionError:
result = ''
return result | f96634e7e331202e2f5f10e85cbd0dd6825d627b | 693,307 |
def _get_mean_fcpm0(data: list):
"""
Calculates the average initial guess at fuzzy c-partitioned matrix.
-----------------------------------------------------------------------------------
!!! For statistical evaluation !!!
-----------------------------------------------------------------------------------
Parameters:
-----------------------------------------------------------------------------------
data: List (ndarray, int)
Result from fcm_train().
Returns:
-----------------------------------------------------------------------------------
mean_fcpm0: 2d array, (S, N)
Average initial guess at fuzzy c-partitioned matrix.
"""
mean_fcpm0 = data[0][2] / len(data) # Starting with the fcpm0 matrix of the 1st image
for i in range(1, len(data)):
mean_fcpm0 += data[i][2] / len(data) # Adding the fcpm0 matrices from all other images
return mean_fcpm0 | fafce9fe109229ae8b530ad5e2c0eb47f8cb88b1 | 693,308 |
def extract_repair_sequence(seq):
"""
Extract the sequence of node repairs
"""
sort_seq = seq.sort()
repair_seq_order = sort_seq[0][sort_seq[0] != 0]
repair_seq_nodes = sort_seq[1][sort_seq[0] != 0]
return repair_seq_order, repair_seq_nodes | 91c0bb112d30af8385e10f57e55a95a0beef7c23 | 693,309 |
import os
def proot_distribution_dir(cwd):
"""Return proot distribution dir from cwd."""
return os.path.join(cwd, "_proot") | bd48a227103e2a71445e7244a75ccbefa04821d6 | 693,310 |
def generate_chebyshev_betas(n=10):
"""
Generate the first n beta coefficients for monic chebyshev polynomials
Source for the recurrence relation:
https://www3.nd.edu/~zxu2/acms40390F11/sec8-3.pdf, accessed 11/07/17
:param n: Number of required coefficients, must be >2
:return: List of the first n coefficients
"""
return [3.14158433] + [0.5] + [0.25] * (n - 2) | 1d05b67fbf3c00648506faebeb03649581959d77 | 693,312 |
import glob
def find_files(directory,pattern):
"""
Returns a list of files that matches the pattern in a given directory
"""
files = glob.glob(directory + '/' + pattern)
return files | 721414560553debbdd37d71dcb7d003bb86a2e24 | 693,313 |
def _checkFuture(future):
"""
Wrapper for futures that lets checking of their status using 'eventually'
"""
def _check():
if future.cancelled():
return None
if future.done():
return future.result()
raise Exception()
return _check | 77db96cfe1bb5219af928db5c02b8127b4896421 | 693,314 |
import json
def load_data(file_path):
"""load data"""
data = {}
with open(file_path, 'r') as f:
for line in f.readlines():
if line.strip():
example = json.loads(line)
data[example[list(example.keys())[0]]] = example
return data | 9d5f4aa8e951514c62bfd145ac9f0e80aa8b7dd3 | 693,315 |
def validate_config(config):
"""Validate configuration"""
errors = []
s3_required_config_keys = [
'account',
'dbname',
'user',
'password',
'warehouse',
's3_bucket',
'stage',
'file_format'
]
snowflake_required_config_keys = [
'account',
'dbname',
'user',
'password',
'warehouse',
'file_format'
]
required_config_keys = []
# Use external stages if both s3_bucket and stage defined
if config.get('s3_bucket', None) and config.get('stage', None):
required_config_keys = s3_required_config_keys
# Use table stage if none s3_bucket and stage defined
elif not config.get('s3_bucket', None) and not config.get('stage', None):
required_config_keys = snowflake_required_config_keys
else:
errors.append("Only one of 's3_bucket' or 'stage' keys defined in config. "
"Use both of them if you want to use an external stage when loading data into snowflake "
"or don't use any of them if you want ot use table stages.")
# Check if mandatory keys exist
for k in required_config_keys:
if not config.get(k, None):
errors.append("Required key is missing from config: [{}]".format(k))
# Check target schema config
config_default_target_schema = config.get('default_target_schema', None)
config_schema_mapping = config.get('schema_mapping', None)
if not config_default_target_schema and not config_schema_mapping:
errors.append("Neither 'default_target_schema' (string) nor 'schema_mapping' (object) keys set in config.")
return errors | f7af61c15ba5c28a9002151ce965bc0128e5943c | 693,316 |
def is_glibc_ref(s):
"""
Return True if s looks like a reference to GLIBC as typically found in
Elfs.
"""
return '@@GLIBC' in s | aa10b4685594cdfbdb8b89f56c79a4637ac06b3c | 693,317 |
def _int(v):
"""Convert to int for excel, but default to original value."""
try:
if v is None or v == '':
return ''
if type(v) is str:
# we handle strings like '2,345.00'
return int(float(v.replace(',', '')))
return int(v)
except ValueError:
return v | 65e6c9a609a41a7afde48cd4937bbed44119dfe5 | 693,318 |
def count_genres(row_df):
"""
A trade off degree based in the user genre count
:param row_df: A user dataframe row based in the genres distribution
:return: A float that is the trade off degree
"""
count = 0
for i, number in row_df.iteritems():
if number > 0.0:
count += 1
return count / len(row_df) | 1f9e90000074e057d8d286b2fc1bbe7741c9f1cc | 693,319 |
from pathlib import Path
import json
def dbt_manifest(dbt_manifest_file: Path) -> dict:
"""
Get the dbt manifest.
Parameters
----------
manifest_file : Path
The dbt manifest file.
Returns
-------
out : dict
The manifest
"""
manifest_file = Path(__file__).parent / "dbt/data/manifest.json"
with manifest_file.open("r") as file:
manifest = json.load(file)
return manifest | fca0eba85e520cca8034c759354cd1c9adffdef4 | 693,320 |
def _get_resource_type(attribute, root, type_, method):
"""Returns ``attribute`` defined in the resource type, or ``None``."""
if type_ and root.resource_types:
types = root.resource_types
r_type = [r for r in types if r.name == type_]
r_type = [r for r in r_type if r.method == method]
if r_type:
if hasattr(r_type[0], attribute):
if getattr(r_type[0], attribute) is not None:
return getattr(r_type[0], attribute)
return [] | 98c69dce73deea1cc7ceadb1cb9bf82900a3329e | 693,321 |
def normal_repr(x, *_):
"""
Register this with a class to indicate that its own
__repr__ method is already fine. This prevents it from
being supressed when its output is a bit long.
"""
return repr(x) | 8d5ab92644b1dda16d679a5b17a7e2b0b1896ed3 | 693,322 |
def date_match(line, pattern):
"""
If line matches pattern then return the date as an integer, else None.
"""
match = pattern.match(line)
if match:
d = "".join([m for m in match.groups() if m])
while len(d) < 12:
d = d + "00"
dt = int(d)
return dt
return | 0e92af70971db3d415652251e814629da9a69100 | 693,323 |
import os
import json
import logging
def _get_model_dir(model_dir):
"""Adjusts model dir for multi-worker training.
Checkpointing and Saving need to happen on each worker and they need to write
to different paths as they would override each others. This utility function
adjusts the base model dir passed as a flag using Vertex AI cluster topology
"""
def _is_chief(task_type, task_id):
return ((task_type == 'chief' and task_id == 0) or task_type is None)
tf_config = os.getenv('TF_CONFIG')
print(tf_config)
if tf_config:
tf_config = json.loads(tf_config)
if not _is_chief(tf_config['task']['type'], tf_config['task']['index']):
model_dir = os.path.join(model_dir, 'worker-{}').format(tf_config['task']['index'])
logging.info('Setting model_dir to: %s', model_dir)
return model_dir | c8291c9b6131a4bbeacd05dce979dc92688fe26d | 693,324 |
def hello_world():
""" @Useless """
return "Hello, World! I'm serving a VW model" | c1f06f3608dcb2a2cd0716789aa387e40d6670e6 | 693,325 |
def atom(cls):
"""
An :class:`.Atom` instance.
"""
return cls(3, -5).clone() | 2b7d0c4f461bb4d49d7a828fd94f65d277dfa290 | 693,326 |
def recursive_update(original_dict: dict, new_dict: dict) -> dict:
"""Recursively update original_dict with new_dict"""
for new_key, new_value in new_dict.items():
if isinstance(new_value, dict):
original_dict[new_key] = recursive_update(
original_dict.get(new_key, {}), new_value
)
else:
original_dict[new_key] = new_value
return original_dict | 5c85e5fc14571fdffb88f4c4822b7e369b790bfa | 693,328 |
import numpy
def logistic(x):
"""
Computes :math:`\\frac{1}{1 + e^{-x}}`.
"""
return 1. / (1. + numpy.exp(-x)) | 4c040f167980b8066a51732019a51c4e78e1e66b | 693,331 |
def all_subclasses(cls):
"""Recursively returns all the subclasses of the provided class.
"""
subclasses = cls.__subclasses__()
descendants = (descendant for subclass in subclasses
for descendant in all_subclasses(subclass))
return set(subclasses) | set(descendants) | 82ba2bdf287ac01a23d117593b3bae20eddf9cd6 | 693,332 |
def _is_mxp_footer(line):
"""Returns whether a line is a valid MXP footer."""
return line.strip().startswith('- - - - - - - - - - - - - - - - - -') | 9f2e33b32197de7a83109093cf17c5e4c2a7e705 | 693,333 |
def hostname(fqdn):
"""Return hostname part of FQDN."""
return fqdn.partition('.')[0] | 2def763bcc2dc9112314414a9f43621235f100ff | 693,334 |
import random
def de_jong_step_b_function(transposed_decimal_pop_input):
"""De Jong Step - B Test Function"""
y = []
for individual in transposed_decimal_pop_input:
de_jong_step_func_4 = 0
term_1 = 0
term_2 = random.gauss(0, 1)
for idx, xi in enumerate(individual):
term_1 = term_1 + idx*(xi**4)
de_jong_step_func_4 = term_1 + term_2
y.append(de_jong_step_func_4)
return y | e9990e2e933f77c6d5add3ab4d81f0385ca5b1d9 | 693,336 |
import time
def retry_enumerate(iterable, start=0, max_time=3600):
"""
Wrapper around enumerate that retries if memory is unavailable.
"""
retries = 0
seconds = 0
while True:
try:
return enumerate(iterable, start=start)
except OSError:
seconds = 2 ** retries
if seconds >= max_time:
raise
print('Low on memory. Retrying in {} sec.'.format(seconds))
time.sleep(seconds)
retries += 1
continue | 90bee6360ed6113a18dfcae5d44c684f76181a91 | 693,337 |
def parse_likwid_metrics(file_path,metrics,singlecore=False):
"""
Reads a single Peano output file and parses likwid performance metrics.
Args:
file_path (str):
Path to the Peano output file.
metrics (str[]):
A list of the metrics we want to read out.
singlecore (bool):
Specifies if the run was a singlecore run.
Returns:
A dict holding for each of the found metrics a nested dict that holds the following key-value pairs:
* 'Sum'
* 'Avg'
* 'Min'
* 'Max'
"""
columns = [ "Sum","Min","Max","Avg" ]
result = { }
for metric in metrics:
result[metric] = { }
for column in columns:
result[metric][column] = -1.0
try:
file_handle=open(file_path)
for line in file_handle:
for metric in metrics:
if singlecore:
if metric in line:
segments = line.split('|')
# | Runtime (RDTSC) [s] | 6.5219 |
value = float(segments[2].strip());
result[metric]["Sum"] = value
result[metric]["Min"] = value
result[metric]["Max"] = value
result[metric]["Avg"] = value
else:
if metric+" STAT" in line:
segments = line.split('|')
# | Runtime (RDTSC) [s] STAT | 27.4632 | 1.1443 | 1.1443 | 1.1443 |
result[metric]["Sum"] = float(segments[2].strip());
result[metric]["Min"] = float(segments[3].strip());
result[metric]["Max"] = float(segments[4].strip());
result[metric]["Avg"] = float(segments[5].strip());
except:
print ("Error: Could not process file '%s'!\n" % (file_path))
raise
return result | 7a4f2457790ffe12bc386b2d8f0d7374b19d9a26 | 693,338 |
def convert_to_list(list_or_dict):
"""Convert list to dict or return input list"""
if isinstance(list_or_dict, dict):
return [list_or_dict]
elif isinstance(list_or_dict, list):
return list_or_dict
else:
raise TypeError(f'Input should be a list or dict. Received {type(list_or_dict)}') | f2747ed1e4ef3f62c07b831cc97ce5ca677c8671 | 693,339 |
def bound_maker_erfrecterf(amplitude_bounds,translational_offset_bounds,stddev_bounds,vertical_offset_bounds,number_erfs):
"""
Create tuple with lower and upper bounds to be used in the curve fit
Args:
amplitude_bounds (tuple): bounds on the amplitudes of the gaussians
translational_offset_bounds (tuple): bounds on the translational offsets of the gaussians
stddev_bounds (tuple): bounds on the standard deviations of the gaussians
vertical_offset_bounds (tuple): bounds on the vertical offset of the gaussians
number_erfs (int): the number of erf-rect-erf features in the fit
Returns:
bounds (tuple): lists the bounds on the parameters used in the erf-rect-erf fits
"""
lower = [amplitude_bounds[0]]*number_erfs + [translational_offset_bounds[0]]*number_erfs + [stddev_bounds[0]]*number_erfs + [translational_offset_bounds[0]]*number_erfs + [stddev_bounds[0]]*number_erfs + [vertical_offset_bounds[0]]
upper = [amplitude_bounds[1]]*number_erfs + [translational_offset_bounds[1]]*number_erfs + [stddev_bounds[1]]*number_erfs + [translational_offset_bounds[1]]*number_erfs + [stddev_bounds[1]]*number_erfs + [vertical_offset_bounds[1]]
bounds = (lower, upper)
return bounds | 09b1eadb97d4204560a831ccaada796913d29db1 | 693,340 |
def kth_element(list_a: list, k: int):
"""Problem 3: Find the K'th Element of a List
Parameters
----------
list_a : list
The input list
k : int
The element to fetch
Returns
-------
element
The k'th element of the input list
Raises
------
TypeError
If the given argument is not of `list` type
ValueError
If the input list contains less than two elements, or the given k is less than 1
"""
if not isinstance(list_a, list):
raise TypeError('The argument given is not of `list` type.')
if len(list_a) < k:
raise ValueError(f'The input list contains less than [{k}] elements.')
if k < 1:
raise ValueError('The value of k cannot be less than 1.')
return list_a[k - 1] | 78e057ab595dfdb86622b0c7c6e3d3003788acd8 | 693,341 |
import statistics
def calculate_median_depths(document: dict) -> dict:
"""
Calculate the median depth for all hits (sequences) in a Pathoscope result document.
:param document: the pathoscope analysis document to calculate depths for
:return: a dict of median depths keyed by hit (sequence) ids
"""
depths = dict()
for hit in document["results"]:
depths[hit["id"]] = statistics.median(hit["align"])
return depths | 01aa2d4d539ad21f1e6ad71ea99532f1961a031a | 693,342 |
def example(a, b: int, *others, d: float, e=10):
"""Here is my docstring"""
return str(locals()) | fb40726087432096347656658cb55f808226a16f | 693,343 |
def string_to_ord(string):
"""Convert string to corresponding list of int values."""
return [ord(char) for char in string] | 2de4f1baa9ba96ba92d24e1b5306e5a4f42a4961 | 693,344 |
def slope(x1, y1, x2, y2):
"""
Calcula o coeficiente angular de dois pontos, ou seja, a variação no eixo y dividido
pela variação no eixo x.
:param x1: coordenada do primeiro ponto no eixo x
:param y1: coordenada do primeiro ponto no eixo y
:param x2: coordenada do segundo ponto no eixo x
:param y2: coordenada do segundo ponto no eixo y
:return: coeficiente angular
"""
return (y2 - y1) / (x2 - x1) | 9700325608f5c27aa931b4f1d7cdfcb3721183f9 | 693,346 |
def calc_time(url, time, total_time, total_count):
""" Calculate times """
count = len(time)
time_sum = sum(time)
return {
"url": url,
"count": count,
"time_sum": time_sum,
"time_avg": time_sum/count,
"time_max": max(time),
"time_med": sorted(time)[int((count+1)/2)] if count > 2 else time[0],
"time_perc": time_sum/total_time*100,
"count_perc": count/total_count*100} | 37f42fbff78d9a57ea1bc5cee4bb989782dd8fed | 693,348 |
def get_checksum_dict_from_txt(txt_file_path):
"""Method used to get the checksum file from a ggd recipe
get_checksum_dict_from_txt
===================
This method is used to obtain a ggd recipe's checksum file from the recipes checksum_file.txt.
Parameters:
----------
1) txt_file_path: (str) The file path to the recipe's checksums file
Return:
+++++++
1) (dict) The checksum file as a dictionary. Key = filename, value = md5sum for the file
"""
cs_dict = {}
with open(txt_file_path, "r") as cs:
for line in cs:
line_list = str(line).strip().split("\t")
## Skip empty lines
if len(line_list) < 2:
continue
cs_dict[line_list[0]] = line_list[1]
return cs_dict | c4da673063e470cb3e98847ffa88385bf280e4f7 | 693,349 |
def problem_8_7(cents):
""" Given an infinite number of quarters (25 cents), dimes (10 cents),
nickels (5 cents) and pennies (1 cent), write code to calculate the number
of ways of representing n cents.
"""
smaller_vals = {
25: 10,
10: 5,
5: 1
}
def num_combinations(change, val):
""" Count the number of combination of value which sum up to change.
Args:
change: int,
val: int, one of 25, 10, 5 or 1
Returns:
int, the number of combinations.
"""
if val == 1: # Only one way to return change using only pennies.
return 1
# Compute the change using smaller values first.
smaller_val = smaller_vals[val]
ways = num_combinations(change, smaller_val)
# Compute change using current value and
times = change / val
for i in range(times):
ways += num_combinations(change - i*val, smaller_val)
return ways
return num_combinations(cents, 25) | 09dbd9ebeacf54ee8312200802fb6e460f719e9c | 693,350 |
def possible_segments(N):
""" Generate the combination of segments """
segments = ((i, j, k) for i in range(N) for j in range(i + 1, N) for k in range(j + 1, N if N - (j+1) < 3 else j + 4 ))
return segments | 118d2d2cdbf6dbaccd0e438578d39e66bec4dffd | 693,351 |
def get_sh_input_config(cfg, data_source):
"""Get Sentinel Hub OGC configuration for given data source.
:param cfg: COnfiguration
:type cfg: dict
:param data_source: Sentinel Hub's data source
:type data_source: DataSource
:return: Sentinel Hub OGC configuration
:rtype: [type]
"""
for sh_input in cfg['sh_inputs']:
if sh_input['data_source'] == data_source.name:
return sh_input
return None | 95eebf6df4e3b713793504cf75b4d583060e0929 | 693,352 |
def calc_jaccard_index(multiset_a, multiset_b):
"""Calculate jaccard's coefficient for two multisets mutliset_a
and multiset_b.
Jaccard index of two set is equal to:
(no. of elements in intersection of two multisets)
_____________________________________________
(no. of elements in union of two multisets)
Note: intersection and union of two multisets is similar to union-all and
intersect-all operations in SQL.
Args:
multiset_a: list(int). First set.
multiset_b: list(int). Second set.
Returns:
float. Jaccard index of two sets.
"""
multiset_a = sorted(multiset_a[:])
multiset_b = sorted(multiset_b[:])
small_set = (
multiset_a[:] if len(multiset_a) < len(multiset_b) else multiset_b[:])
union_set = (
multiset_b[:] if len(multiset_a) < len(multiset_b) else multiset_a[:])
index = 0
extra_elements = []
for elem in small_set:
while index < len(union_set) and elem > union_set[index]:
index += 1
if index >= len(union_set) or elem < union_set[index]:
extra_elements.append(elem)
elif elem == union_set[index]:
index += 1
union_set.extend(extra_elements)
if union_set == []:
return 0
index = 0
intersection_set = []
for elem in multiset_a:
while index < len(multiset_b) and elem > multiset_b[index]:
index += 1
if index < len(multiset_b) and elem == multiset_b[index]:
index += 1
intersection_set.append(elem)
coeff = float(len(intersection_set)) / len(union_set)
return coeff | 89a4d358e77bef70710976f3339636a1880ef853 | 693,353 |
def session_ps_14bit(max_h, max_w):
"""Trim size to 14-bit limitation
"""
# why 16383 instead of 16384 for 14-bit?
max_h = max(max_h, 24)
max_w = max(max_w, 80)
max_h = min(max_h, 204) # 16383 // 80
max_w = min(max_w, 682) # 16383 // 24
if max_h >= 127 and max_w >= 129:
return 127, 129 # 127*129=16383
if max_h >= 129 and max_w >= 127:
return 129, 127 # 129*127=16383
if max_h * max_w <= 16383:
return max_h, max_w
return 16383 // max_w, max_w | 28b2dc812350a812cee21463ea8563479775cbdd | 693,354 |
def is_valid(isbn):
"""
Given a string the program will check if
the provided string is a valid ISBN-10.
:param isbn:
:return:
"""
# ISBN is invalid in case input string is empty
if not isbn or isbn == '':
return False
# Converting from strings to numbers
digits = []
for i in isbn:
if i.isdigit():
digits.append(int(i))
# Check digit of an ISBN-10 may be 'X' (representing '10')
if isbn[-1] == 'X':
digits.append(10)
# ISBN is invalid in case it has less than 10 digits
if len(digits) < 10:
return False
# Multiply ISBN members:
for n in range(10, 0, -1):
digits[n - 1] *= n
# Calculate mod and return the answer
# If the result is 0, then it is a valid ISBN-10, otherwise it is invalid:
return sum(digits) % 11 == 0 | 1df3d98a74cc1139db0f71ab6e7f8f1756dda278 | 693,356 |
from typing import Dict
def home() -> Dict:
"""API's homepage"""
return {
"API": "Videogame quotes API",
"version": "1.0.0",
"author": "Batucho",
"repository-url": "placeholder",
} | 81998a82ec269bfd89a7575075451985cefa76ea | 693,357 |
def euler_totient(n):
"""Euler's totient function or Phi function.
Time Complexity: O(sqrt(n))."""
result = n
for i in range(2, int(n ** 0.5) + 1):
if n % i == 0:
while n % i == 0:
n //= i
result -= result // i
if n > 1:
result -= result // n
return result | f51f8549ea50ba349d438dd5c79ecad61edc3ae0 | 693,358 |
def compute_prob(pattern_count, num_patterns, epsilon=1e-7):
"""
Compute probability of a pattern.
"""
return (pattern_count + epsilon) / ((num_patterns + epsilon) * (1 + epsilon)) | 6608c84b45643e13a6818ccddaf6c5b16a0828a5 | 693,359 |
def T4_p(p):
"""
Release on the IAPWS Industrial formulation 1997 for the Thermodynamic Properties of Water and Steam, September 1997
Section 8.2 The Saturation-Temperature Equation
Eq 31, Page 34
"""
beta = p ** 0.25
E = beta ** 2 - 17.073846940092 * beta + 14.91510861353
f = 1167.0521452767 * beta ** 2 + 12020.82470247 * beta - 4823.2657361591
G = -724213.16703206 * beta ** 2 - 3232555.0322333 * beta + 405113.40542057
D = 2 * G / (-f - (f ** 2 - 4 * E * G) ** 0.5)
_T4_p = (650.17534844798 + D - (
(650.17534844798 + D) ** 2 - 4 * (-0.23855557567849 + 650.17534844798 * D)) ** 0.5) / 2
return _T4_p | fd841ac002f9b9f58d155bfea06d65088215449e | 693,360 |
import re
def time_to_ps(tstr):
"""
Convert a time with unit to a float in pico seconds.
Supported units: fs, ps, ns, us, ms
"""
prefactors = ['', 'm', 'u', 'n', 'p', 'f']
m = re.match('([\d.]+)([{}]?)s'.format(''.join(prefactors)), tstr)
if m is None:
raise ValueError('Could not parse time: {}'.format(tstr))
val, prefactor = m.groups()
decade = -3 * prefactors.index(prefactor) + 12
return float(val) * 10**decade | 1af8c3016000c0c5dc3a4dd64c215e2ca0c6e31a | 693,362 |
def feasible(x, constr):
"""
Checks the inequality constraints at x. If x is a feasible point,
returns True. If it is infeasible, returns the index of the first
constraint violated.
"""
if constr==None:
return True
for i in range(len(constr)):
fi_x = constr[i](x)
if fi_x > 0:
return i
return True | 9b37ba34c463d01e08ab0808f9ed004b9744df0b | 693,363 |
def prop_exists(printer, ast):
"""Prints an exists property "E ..."."""
prop_str = printer.ast_to_string(ast["prop"])
return f'E{prop_str}' | 8dfe50e664d95e53a892f54eabba6f7d6f1786bf | 693,364 |
import re
def find_hashtags(text):
"""
This function extracts and returns all hashtags (characters that start with # sign) found in a string variable.
:param text: a string variable
:return: a list of characters
"""
return re.findall(r'[#][^\s#@]+', text) | ab74f50cedab129d1a1183ddb7ee70a0dfde8701 | 693,365 |
def expand_optimization_args(group):
"""Expands the optimization related arguments with pytorch_translate
specific arguments"""
group.add_argument(
"--subepoch-validate-interval",
default=0,
type=int,
metavar="N",
help="Calculates loss over the validation set every N batch updates. "
"Note that validation is done at the end of every epoch regardless. "
"A value of <= 0 disables this.",
)
group.add_argument(
"--stop-time-hr",
default=-1.0,
type=float,
metavar="N",
help="Stops training after N hours have elapsed. Use decimal values "
"for sub-hourly granularity. A value of < 0 disables this.",
)
group.add_argument(
"--stop-no-best-validate-loss",
default=-1,
type=int,
metavar="N",
help="Stops training after N validations have been run without "
"achieving a better loss than before. Note that this is affected by "
"--validation-interval in how frequently we run validation in the "
"first place. A value of < 0 disables this.",
)
group.add_argument(
"--stop-no-best-bleu-eval",
default=-1,
type=int,
metavar="N",
help="Stops training after N evals have been run without "
"achieving a better BLEU score than before. Note that this is affected "
"by --generate-bleu-eval-interval in how frequently we run BLEU eval "
"in the first place. A value of < 0 disables this.",
)
group.add_argument(
"--shrink-lr-no-best-bleu-eval",
default=5,
type=int,
metavar="N",
help="Decay learning rate after N evals have been run without "
"achieving a better BLEU score than before. This is to achieve "
"decay lr within an epoch, independent of lr_scheduler. "
"Note that this is affected by --generate-bleu-eval-interval in "
"how frequently we run BLEU eval in the first place. "
"A value of < 0 disables this.",
)
group.add_argument(
"--pruning-percentile",
type=int,
default=0,
help="Proportion of weights to prune. A value <=0 disables pruning."
" By default, prunes weights uniformly and ignores bias terms.",
)
group.add_argument(
"--parameters-to-prune",
default="all",
help="Names of layers to prune. Layers are pruned if the argument is "
"a substring of the layer name. Options are 'all', 'embed', 'lstm'. ",
)
group.add_argument(
"--loss-beam",
type=int,
default=0,
help="Beam size to use for 'sequence_nll' loss and 'sequence_risk' "
"loss. If zero, use --beam.",
)
return group | 48c48f39ed05c8854347d434e7d163d3ede06344 | 693,367 |
import warnings
def cast_unicode(s, encoding='utf8', errors='strict'):
"""cast bytes or unicode to unicode"""
warnings.warn(
"zmq.utils.strtypes is deprecated in pyzmq 23.",
DeprecationWarning,
stacklevel=2,
)
if isinstance(s, bytes):
return s.decode(encoding, errors)
elif isinstance(s, str):
return s
else:
raise TypeError("Expected unicode or bytes, got %r" % s) | 8d53fa11065305d7f696dd66b936b64382ad907b | 693,368 |
import os
from pathlib import Path
def set_paths(local_path = 'C:/Data/CISI',extract_data=False,base_calculation=False,cisi_calculation=False):
"""Function to specify required pathways for inputs and outputs
Args:
*local_path* (str, optional): local path. Defaults to 'C:/Data/CISI'.
*extract_data* (bool, optional): True if extraction part of model should be activated. Defaults to False.
*base_calculation* (bool, optional): True if base calculations part of model should be activated. Defaults to False.
*cisi_calculation* (bool, optional): True if CISI part of model should be activated. Defaults to False.
Returns:
*osm_data_path* (str): directory to osm data
*fetched_infra_path* (str): directory to output location of the extracted infrastructure data
*country_shapes_path* (str): directory to dataset with administrative boundaries (e.g. of countries)
*grid_path* (str): directory to feather file of consistent spatial grids
*infra_base_path* (str): directory to output location of the rasterized infrastructure data
*method_max_path* (str): directory to output location of the CISI based on the max of each asset
*method_mean_path* (str): directory to output location of the CISI based on the mean of the mean of a each asset
"""
# Set path to inputdata
#osm_data_path = os.path.abspath(os.path.join(local_path,'Datasets','OpenStreetMap')) #path to map with pbf files from OSM
osm_data_path = os.path.abspath(os.path.join('/scistor','ivm','data_catalogue','open_street_map','country_osm')) #path to map with pbf files from OSM at cluster
#grid_file = 'Holland_0.1degree.geofeather' #'global_grid_0_1.geofeather' #name of grid file
#grid_file = 'global_grid_0_1.geofeather' #'global_grid_0_1.geofeather' #name of grid file
#grid_file = 'North-America_025degree.geofeather' #'global_grid_0_1.geofeather' #name of grid file
grid_file = 'global_grid_025.geofeather'
#grid_file = 'global_grid_010degree.geofeather'
grid_path = os.path.abspath(os.path.join(local_path,'Outputs','Grid_data',grid_file)) #grid data
shapes_file = 'global_countries_advanced.geofeather'
country_shapes_path = os.path.abspath(os.path.join(local_path,'Datasets','Administrative_boundaries', 'global_countries_buffer', shapes_file)) #shapefiles with buffer around country
# Set path for outputs
base_path = os.path.abspath(os.path.join(local_path, 'Outputs', 'Exposure', 'CISI_global')) #this path will contain folders in which
# path to save outputs - automatically made, not necessary to change output pathways
fetched_infra_path = os.path.abspath(os.path.join(base_path,'Fetched_infrastructure')) #path to map with fetched infra-gpkg's
#fetched_infra_path = os.path.abspath(os.path.join('C:/Users/snn490/Documents','Fetched_infrastructure')) #path to map with fetched infra-gpkg's TEMPORARY
infra_base_path = os.path.abspath(os.path.join(base_path, 'Infrastructure_base_025')) #save interim calculations
method_max_path = os.path.abspath(os.path.join(base_path, 'index_025', 'method_max')) #save figures
method_mean_path = os.path.abspath(os.path.join(base_path, 'index_025', 'method_mean')) #save figures
#output_documentation_path = os.path.abspath(os.path.join(base_path, 'index', test_number)) #save documentation
#output_histogram_path = os.path.abspath(os.path.join(base_path, 'index', test_number)) #save documentation
#Create folders for outputs (GPKGs and pngs)
#Path(output_histogram_path).mkdir(parents=True, exist_ok=True)
Path(fetched_infra_path).mkdir(parents=True, exist_ok=True)
if extract_data:
return [osm_data_path,fetched_infra_path,country_shapes_path]
if base_calculation:
return [grid_path,fetched_infra_path,infra_base_path,country_shapes_path]
if cisi_calculation:
return [method_max_path,method_mean_path,infra_base_path] | 8af0f632ff6ad75156eb6fee55ae7be8dbd92c1d | 693,369 |
def get_code(filename):
"""arg : (string) : file name
return : (list) : list of all instruction in the code file """
file_content = []
with open (filename,'r') as f :
lines = f.readlines()
for line in lines :
file_content.append(line)
return file_content | 2d7841bbbd46ea110e464c58f8c9d853f3b353b5 | 693,370 |
import requests
def get_response_status(derived_from_url):
""" Get a response status code for derivedFrom value. Returns True if status code is 200."""
try:
r = requests.get(derived_from_url)
r.raise_for_status()
if r.status_code == 200:
return True
except requests.exceptions.HTTPError:
return False | d614455517742717b3d166481994f5c8b74d8e38 | 693,371 |
def find_organization_or_user_for_default_charges(user):
"""
Takes a user model and finds either the first non public organization or the user itself
:param user:
:return: namespace name to charge by default (organization or user if not part of any organization)
"""
namespace_to_charge = user.username
if (
user.default_namespace_charged is not None
and user.default_namespace_charged != ""
):
return user.default_namespace_charged
for org in user.organizations:
if org.organization_name != "public":
namespace_to_charge = org.organization_name
break
return namespace_to_charge | 3d74cd09d7177424617ddf499ca56a56d6181c27 | 693,372 |
import torch
def bhw_to_onehot_by_scatter(bhw_tensor: torch.Tensor, num_classes: int):
"""
Args:
bhw_tensor: b,h,w
num_classes:
Returns: b,h,w,num_classes
"""
assert bhw_tensor.ndim == 3, bhw_tensor.shape
assert num_classes > bhw_tensor.max(), torch.unique(bhw_tensor)
batch_size, h, w = bhw_tensor.shape
# bhw,c
one_hot = torch.zeros(size=(batch_size * h * w, num_classes)).scatter_(
dim=1, index=bhw_tensor.reshape(-1, 1), value=1
)
one_hot = one_hot.reshape(batch_size, h, w, num_classes)
return one_hot | fb68c76ead317c1bd7a43aa840c29ee2bf9a8516 | 693,373 |
def extract_ROI(image_path, image, ROI):
"""Extract the region of interest out of an image
:param image_path: path to the image file
:type image_path: str
:param image: the image matrix
:type image: numpy.ndarray
:returns: numpy.ndarray -- the region of interest
"""
if ROI is None:
return image
if len(ROI) != 4:
raise TypeError("ROI needs to be of length 4")
x, y, w, h = ROI
height, width, _ = image.shape
if x < 0 or y < 0 or x + w > width or y + h > height:
raise ValueError("Invalid dimensions for ROI for image: %s"
% image_path)
return image[x:x + w, y:y + h] | f13b74573f03d5971459a381d4b5fd631867eb29 | 693,374 |
from typing import Mapping
def dict_merge(dct, merge_dct, add_keys=True):
""" Recursive dict merge. Inspired by :meth:``dict.update()``, instead of
updating only top-level keys, dict_merge recurses down into dicts nested
to an arbitrary depth, updating keys. The ``merge_dct`` is merged into
``dct``.
This version will return a copy of the dictionary and leave the original
arguments untouched.
The optional argument ``add_keys``, determines whether keys which are
present in ``merge_dict`` but not ``dct`` should be included in the
new dict.
Args:
dct (dict) onto which the merge is executed
merge_dct (dict): dct merged into dct
add_keys (bool): whether to add new keys
Returns:
dict: updated dict
"""
dct = dct.copy()
if not add_keys:
merge_dct = {
k: merge_dct[k]
for k in set(dct).intersection(set(merge_dct))
}
for k, v in merge_dct.items():
if (k in dct and isinstance(dct[k], dict)
and isinstance(merge_dct[k], Mapping)):
dct[k] = dict_merge(dct[k], merge_dct[k], add_keys=add_keys)
else:
dct[k] = merge_dct[k]
return dct | 5d3e356952bff8ee5b84a3401379dcd422e7a271 | 693,375 |
def create_tags(tags: list) -> list:
"""Prepares tags for a new upload. Keeps as many old tags as possible while adding a "nightcore" tag."""
to_add = 'nightcore'
# The total number of characters in YouTube video tags can't exceed 400.
# We're adding the "nightcore" tag, so we'll only keep this many characters of the original tags.
target_len = 400 - len(to_add)
new_tags = []
length = 0
# Keep tags up until they can no longer fit within our target.
for tag in tags:
length += len(tag)
if length < target_len:
new_tags.append(tag)
else:
break
new_tags.append(to_add)
return new_tags | b051d5cd4a27a867ea02b47f6dea26d7b88951b6 | 693,376 |
def ravel_alive(is_alive, *args):
"""
Takes all is_alive ticks from all sessions and merges them into 1 dimension
"""
alive_selector = is_alive.nonzero()
return [arg[alive_selector] for arg in args] | 409d531bd0bb94d17f04d02b15d774b43006a123 | 693,377 |
def parse_conf_intervals(ci_fh):
"""
Parse the StAR conf_intervals.txt file, each row a pair of methods
with AUC difference (this time WITH sign, so we know which is better
and which worse) and confidence interval
Parameters:
ci_fh - open filehandle to read conf_intervals.txt from
Return value:
dict { (method1,method2) : (auc_difference, cilower, ciupper) }
mapping pair of methods to difference in AUC (method1 - method2),
and lower and upper confidence interval value
"""
ci_dict = {}
lineno = 1
for line in ci_fh:
if lineno == 1:
lineno += 1
continue
sline = line.split('\t')
(method1,method2) = sline[0].split('/')
method1 = method1.lstrip('"').rstrip('"')
method2 = method2.lstrip('"').rstrip('"')
deltaAUC = float(sline[1])
cipair = sline[2] # ( -0.0642863 , -0.0410837 )
cilower = cipair.split(' ')[1]
ciupper = cipair.split(' ')[3]
ci_dict[(method1,method2)] = (deltaAUC, cilower, ciupper)
lineno += 1
return ci_dict | 8276bea42a48a0c9065a00361f1c6196c7755c88 | 693,378 |
def delta_tau_i(kappa_i, p_1, p_2, g):
"""
Contribution to optical depth from layer i, Malik et al. (2017) Equation 19
"""
return (p_1 - p_2) / g * kappa_i | 399e4376e11eefecda77e6c444beec61f5c402a1 | 693,379 |
from typing import OrderedDict
def walkdict(dict, match):
"""
Finds a key in a dict or nested dict and returns the value associated with it
:param d: dict or nested dict
:param key: key value
:return: value associated with key
"""
for key, v in dict.items():
if key == match:
jobid = v
return jobid
elif isinstance(v, OrderedDict):
found = walkdict(v, match)
if found is not None:
return found | cbcfb23006e432dafea5ddddc5948ac32a811932 | 693,380 |
import subprocess
def get_default_configuration(board):
"""Get the default configuration for given board.
"""
return subprocess.check_output(["make",
"-s",
"BOARD=" + board,
"default-configuration"],
cwd="examples/default-configuration") | ec582044a979725be3093565dc95ceb415552883 | 693,381 |
def instance_to_queryset_string(instance):
""" Return a django queryset representation of an instance """
str_type = str(type(instance)).strip("<>'").split('.')[-1]
return "<{}: {}>".format(str_type, str(instance)) | 7e5165e7e17fca0bcad680ba299145c98f33ef19 | 693,382 |
def followers_count(user):
"""
Returns user followers count
:param user: An User instance
"""
if not user or user.is_anonymous():
return 0
return user.followers() | 8c386bc2199661b2a771371a0e7c1833569a71c9 | 693,383 |
def level_message(sequence, level):
"""Return simon level message which contains the simon sequence"""
message = "\033[1;95mSequence {}:\n\n\033[4m{}".format(level,
" ".join(map(str, sequence)))
return message | d9f883cdf99621d46cb0ec40b0140ccfa58fb62e | 693,384 |
import sys
def sync_performer(f):
"""
A decorator for performers that return a value synchronously.
The returned function accepts an intent and a box, and the wrapped
function will be called with only the intent. The result of the
function will be provided as the result to the box.
"""
def inner(dispatcher, intent, box):
try:
box.succeed(f(dispatcher, intent))
except:
box.fail(sys.exc_info())
return inner | c3f14a48005168e27e0e43a749aaed7124ff5544 | 693,385 |
def dict2txt_S(dict_D,add_num_elems=False):
"""returns a string representation of dict_D"""
if add_num_elems:
dict_S = "len=%s\n" % (len(dict_D))
else:
dict_S = ""
for key,val in dict_D.iteritems():
dict_S = dict_S + "\n'%s'='%s'" % (key,val)
return dict_S | 2d4df7dedf1208a7e5c336aa720f89a1379867ff | 693,386 |
from datetime import datetime
import random
def random_filename():
"""自动生成随机文件名"""
nowTime = datetime.now().strftime("%Y%m%d%H%M%S")#生成当前的时间
randomNum = random.randint(0,100)#生成随机数n,其中0<=n<=100
if randomNum<=10:
randomNum = str(0) + str(randomNum)
uniqueNum = str(nowTime) + str(randomNum)
return uniqueNum | 1e77987df94a0d1e25a5dc47383d8cd4845c0cc8 | 693,387 |
import struct
def serialize_close(code, reason):
"""
Serialize the data for a close frame.
This is the reverse of :func:`parse_close`.
"""
return struct.pack('!H', code) + reason.encode('utf-8') | a059239c8ae80d238631dbc4023dd9d275ae3ca7 | 693,388 |
def convert_taxa(rough_taxa, formatting_keys='%1.2f', hundredx=False):
"""Formats lists of numbers for table generation
INPUTS:
rough_taxa -- a list of lists with a descriptor string followed by
a list of corresponding values
formatting_keys -- a string describing the way the value should be
formatting using string formats. For example, %1.2f, %2d,
%i. A value of 'SKIP' will ignore that value and remove it
from the output list.
OUTPUTS:
formatted_taxa -- a list of string with formatting for the final table.
"""
# Checks the rough_taxa argument is sane
if not isinstance(rough_taxa, list):
raise TypeError('rough_taxa must have be a list of at least one '
'lists.\nrough_taxa is a %s.' % rough_taxa.__class__)
elif len(rough_taxa) == 0:
raise ValueError('rough taxa must have be a list of at least one '
'lists.\nrough_taxa does not have any elements.')
elif not isinstance(rough_taxa[0], list):
raise TypeError('rough taxa must have be a list of at least one '
'lists.\nThe first element in rough taxa is a %s.'
% rough_taxa[0].__class__)
num_ent = len(rough_taxa[0])
for entry in rough_taxa:
if not isinstance(entry, list):
raise TypeError('rough_taxa must be a list of lists')
if not len(entry) == num_ent:
raise ValueError('list size is inconsistant')
num_rough = num_ent-1
if isinstance(formatting_keys, list):
num_keys = len(formatting_keys)
else:
num_keys = 1
if isinstance(hundredx, list):
num_hund = len(hundredx)
else:
num_hund = 1
if not isinstance(formatting_keys, (list, str)):
raise TypeError('formatting_keys must be a list or string.')
if not num_rough == num_keys and isinstance(formatting_keys, list):
raise ValueError('The number of elements in rough_taxa (%i) and the '
'number of elements in formatting_keys (%i) must be '
'equal.' % (num_rough, num_keys))
elif not isinstance(hundredx, (list, bool)):
raise TypeError('hundredx must be a list or bool.')
elif not num_rough == num_hund and isinstance(hundredx, list):
raise ValueError('The number of elements in rough_taxa(%i) and the '
'number of elements in hundredx(%i) must be equal.'
% (num_rough, num_hund))
# Converts formatting keys and hundredx to lists
if isinstance(formatting_keys, str):
formatting_keys = [formatting_keys]*num_rough
if isinstance(hundredx, bool):
hundredx = [hundredx]*num_rough
# Creates formatted list
formatted_taxa = []
for element in rough_taxa:
taxon = element[0]
element.pop(0)
new_element = [taxon]
for idx, item in enumerate(element):
if formatting_keys[idx] == 'SKIP':
continue
if hundredx[idx]:
item = item * 100
new_element.append(formatting_keys[idx] % item)
formatted_taxa.append(new_element)
return formatted_taxa | caf34db307f661eff91d4a56e4af94a114f9551b | 693,389 |
def get_flip_mutation_function():
"""
Returns a function that returns the negated value of the input, where the input is a boolean value; see :ref:`mutation-functions`
:Valid For:
``"bool"`` and ``"[bool]"`` gene types
:returns: a function that returns the negated value if its input
"""
return lambda value: not value | c8b487526b1370574a6c41ca8d01fbbf7ac4621c | 693,390 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.