content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def torsion(a, b, c):
"""
Find the torsion angle between planes ab and bc.
Arguments:
*a,b,c*
Vector instances.
Returns:
The torsion angle in radians
"""
n1 = a.cross(b)
n2 = b.cross(c)
return n1.arg(n2) | 898019acb8724c61ab56b887c71cee8d0134eae9 | 694,291 |
import unicodedata
def normalize_email_address(email):
"""
Normalizes an email address by stripping the whitespace, converting to lowercase
and by normlizing the unicode.
:param email: The email address that needs to be normalized.
:type email: str
:return: The normalized email address.
:rtype: str
"""
return unicodedata.normalize("NFKC", email).strip().lower() | 9baa0130050e5d1579a88a4e7312a385c2d39e61 | 694,292 |
def _get_dev_port_var(backend, instance=None):
"""Return the environment variable for a backend port.
Backend ports are stored at GET_PORT_<backend> for backends
and GET_PORT_<backend>.<instance> for individual instances.
Args:
backend: The name of the backend.
instance: The backend instance (optional).
Returns:
string: The environment variable where the backend port is stored.
"""
port_var = 'BACKEND_PORT.%s' % str(backend).lower()
if instance is not None:
port_var = '%s.%d' % (port_var, instance)
return port_var | 1eea3611bf16edd477df0becd04436672283e176 | 694,293 |
import math
def pnorm(z):
"""
Returns the area under a normal distribution
from -inf to z standard deviations
@type z: C{float}
@param z:
@return: the area under a normal distribution
from -inf to z standard deviations
@rtype: C{float}
"""
t = 1/(1 + 0.2316419 * z)
pd = 1 - 0.3989423 * \
math.exp(-z * z/2) * \
((((1.330274429 * t - 1.821255978) * t \
+ 1.781477937) * t - 0.356563782) * t + 0.319381530) * t
# /* see Gradsteyn & Rhyzik, 26.2.17 p932 */
return pd | 8c5707a15cb220913b27c3c0dbc169ec016e62e2 | 694,294 |
from typing import Dict
from typing import List
def config_to_match_string(config: Dict, config_space: Dict, keys: List[str]) -> str:
"""
Maps configuration to a match string, which can be used to compare configs
for (approximate) equality. Only keys in `keys` are used, in that ordering.
:param config: Configuration to be encoded in match string
:param config_space: Configuration space
:param keys: Keys of parameters to be encoded
:return: Match string
"""
parts = []
for key in keys:
domain = config_space[key]
value = config[key]
parts.append(f"{key}:{domain.match_string(value)}")
return ",".join(parts) | 8d577183a2112306cea6c03e48515bff3329b2c3 | 694,295 |
def _CollectDependencies(apt_cache, pkg, cache, dependencies):
"""Collect dependencies that need to be built."""
C_OR_CXX_DEPS = [
'libc++1',
'libc6',
'libc++abi1',
'libgcc1',
'libstdc++6',
]
BLACKLISTED_PACKAGES = [
'multiarch-support',
]
if pkg.name in BLACKLISTED_PACKAGES:
return False
if pkg.section != 'libs' and pkg.section != 'universe/libs':
return False
if pkg.name in C_OR_CXX_DEPS:
return True
is_c_or_cxx = False
for dependency in pkg.candidate.dependencies:
dependency = dependency[0]
if dependency.name in cache:
is_c_or_cxx |= cache[dependency.name]
else:
is_c_or_cxx |= _CollectDependencies(apt_cache, apt_cache[dependency.name],
cache, dependencies)
if is_c_or_cxx:
dependencies.append(pkg.name)
cache[pkg.name] = is_c_or_cxx
return is_c_or_cxx | 4cb2ecd9284bb028d8f3ffc8acd554cae3b6a4b0 | 694,296 |
def length(value):
"""return len(value)"""
return len(value) | 8186069361dd9f9f729671eb4085b16012c49e8a | 694,297 |
def _format_key(key: str) -> str:
"""Internal function for formatting keys in Tensorboard format."""
return key.title().replace('_', '') | 498b31240754164b0259ecc0a6ca3c46728db332 | 694,298 |
import numpy as np
def h5_to_string(char_array):
"""Python3 distinguishes between char strings and string objects.
This causes incompatibilities with HDF5.
"""
if type(char_array) in [bytes, np.bytes_]:
return char_array.decode()
if type(char_array) == str:
return char_array
raise TypeError("Char_array must be a string or byte array!\n"
+"Your type is: {}.\n".format(type(char_array))) | 02f0a66162dd4b8c8da2a8ed584d730bbc318f18 | 694,299 |
import fnmatch
def is_matching(filename, patterns=None):
"""Check if a filename matches the list of positive and negative patterns.
Positive patterns are strings like ``"1.txt"``, ``"[23].txt"``, or
``"*.txt"``.
Negative patterns are strings like ``"!1.txt"``, ``"![23].txt"``, or
``"!*.txt"``.
Each pattern is checked in turn, so the list of patterns ``["!*.txt",
"1.txt"]`` will still match ``"1.txt"``.
>>> from django_remote_submission.tasks import is_matching
>>> is_matching("1.txt", patterns=["1.txt"])
True
>>> is_matching("1.txt", patterns=["[12].txt"])
True
>>> is_matching("1.txt", patterns=["*.txt"])
True
>>> is_matching("1.txt", patterns=["1.txt", "!*.txt"])
False
>>> is_matching("1.txt", patterns=["!*.txt", "[12].txt"])
True
"""
if patterns is None:
patterns = ['*']
is_matching = False
for pattern in patterns:
if not pattern.startswith('!'):
if fnmatch.fnmatch(filename, pattern):
is_matching = True
else:
if fnmatch.fnmatch(filename, pattern[1:]):
is_matching = False
return is_matching | 1cb02d694640664ab3c743f9b30573618082f1cb | 694,300 |
def max_duplicate_counter(duplicate_statics, object_list, category_dict):
# ********************************************************** #
# if there are more than one rare instances in a single image, we duplicate
# with the largest number in statics results.
# ********************************************************** #
"""
duplicate_statics: {category_name: duplicate times}
object_list: List[int]
Returns:
int
"""
# reverse category dict
id_category = dict((v, k) for k, v in category_dict.items())
# the number of times we need to duplicate the data
counter = 1
for obj in object_list:
if id_category[obj] in duplicate_statics:
counter = max(counter,
duplicate_statics[id_category[obj]])
return counter | 1a0096ef0544499deef7bf55c39f401fa7aad7de | 694,301 |
def get_allen_relation(duration1, duration2):
"""Generates an Allen interval algebra relation between two discrete durations of time
:param duration1: First duration of time (start_frame, end_frame)
:type duration1: tuple
:param duration2: Second duration of time (start_frame, end_frame)
:type duration2: tuple
"""
is1, ie1 = duration1
is2, ie2 = duration2
if is2-1 == ie1:
return 'm'
elif is1-1 == ie2:
return 'mi'
elif is1 == is2 and ie1 == ie2:
return '='
elif is2 > ie1:
return '<'
elif is1 > ie2:
return '>'
elif ie1 >= is2 and ie1 < ie2 and is1 < is2:
return 'o'
elif ie2 >= is1 and ie2 < ie1 and is2 < is1:
return 'oi'
elif is1 > is2 and ie1 < ie2:
return 'd'
elif is1 < is2 and ie1 > ie2:
return 'di'
elif is1 == is2 and ie1 < ie2:
return 's'
elif is1 == is2 and ie1 > ie2:
return 'si'
elif ie1 == ie2 and is2 < is1:
return 'f'
elif ie1 == ie2 and is2 > is1:
return 'fi' | 20dd58181bdfd8485ca6cf90d9b22f47bac697c9 | 694,302 |
def to_bits(val):
""" converts a netmask to bits """
bits = ''
for octet in val.split('.'):
bits += bin(int(octet))[2:].zfill(8)
return bits | 930ee3064e8a170850b4cbd4dd4e241db20d6b33 | 694,304 |
def gradDivVectorFieldListGolden2(fieldList = "const FieldList<%(Dimension)s, typename %(Dimension)s::Vector>&",
position = "const FieldList<%(Dimension)s, typename %(Dimension)s::Vector>&",
weight = "const FieldList<%(Dimension)s, typename %(Dimension)s::Scalar>&",
mass = "const FieldList<%(Dimension)s, typename %(Dimension)s::Scalar>&",
rho = "const FieldList<%(Dimension)s, typename %(Dimension)s::Scalar>&",
Hfield = "const FieldList<%(Dimension)s, typename %(Dimension)s::SymTensor>&",
kernel = "const TableKernel<%(Dimension)s>&"):
"""Calculate the gradient of the divergence of a Vector FieldList.
More complex method which relies on the "golden rule", in that we move the mass
density inside the operator."""
return "FieldList<%(Dimension)s, typename %(Dimension)s::Vector>" | 3a9b2858be4ea501e06673900f56d32b80b70902 | 694,305 |
def rssError(yArr,yHatArr):
"""
函数说明:计算平方误差
Parameters:
yArr - 预测值
yHatArr - 真实值
Returns:
Website:
http://www.cuijiahua.com/
Modify:
2017-12-03
"""
return ((yArr-yHatArr)**2).sum() | 6bc32ba5e4b45eb05cdf6417fc91148ebb6fd2e3 | 694,306 |
def simplify(value):
"""Return an int if value is an integer, or value otherwise.
>>> simplify(8.0)
8
>>> simplify(2.3)
2.3
>>> simplify('+')
'+'
"""
if isinstance(value, float) and int(value) == value:
return int(value)
return value | ef4d4332544b5a11b85a8d067f8b39d3873f9304 | 694,307 |
def decode_pdf_string(bytestring):
"""
PDF Strings can sometimes be UTF-16. Detect and convert if necessary
"""
if(bytestring.startswith(b'\xfe\xff') or bytestring.startswith(b'\xff\xfe')):
string = bytestring.decode("utf-16")
else:
string = bytestring.decode("ascii")
return(string) | fe5df9fdc4e8fa7b2564dd7189fdcde69df3987f | 694,308 |
def extend_event(events, time, max_time):
"""Extends events in event list by time.
The start time of each event is moved time seconds back and the end
time is moved time seconds later
Args:
events: list of events. Each event is a tuple
time: time to extend each event in seconds
max_time: maximum end time allowed of an event.
Return
extended_events: list of events which each event extended.
"""
extended_events = events.copy()
for i, event in enumerate(events):
extended_events[i] = [max(0, event[0] - time),
min(max_time, event[1] + time)]
return extended_events | 8d52afb64e6be7f09ecf619aa0a45d7acfb0fbf2 | 694,309 |
import argparse
def get_args():
"""get arguments"""
parser = argparse.ArgumentParser(
description = 'Find directory name(s)',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-w',
'--width',
help = 'width space of each line',
type = int,
metavar = "int",
default = 50,
)
parser.add_argument(
'directory',
help = 'directory name(s)',
type = str,
metavar = "DIR",
nargs = '+',
)
return parser.parse_args() | 65e6bebb740fbf2a902a386a8af9c591a4ca0e15 | 694,310 |
import uuid
def generate_random_string(string_length=10):
"""Generate a random string of specified length string_length.
Args:
string_length (int): Size of string to generate.
Returns:
str: Random string of specified length (maximum of 32 characters)
"""
random_str = str(uuid.uuid4())
random_str = random_str.upper()
random_str = random_str.replace("-", "")
return random_str[0:string_length] | 0637b2cd345bb9e16bb07b5407bb3a198751757d | 694,311 |
import os
def find_large_files(directory, limit):
"""Find files in directory that exceed the size limit.
:param str directory: path of directory where to find large files
:param float limit: size limit (in MBs) that determines whether a
file is a large file
:return: list of large files
:rtype: list
"""
large_files = []
for f in sorted(os.listdir(directory)):
file_path = os.path.join(directory, f)
size = os.path.getsize(file_path)
if size > limit * 1024 ** 2:
large_files.append(file_path)
return large_files | 0c867ac5a4863b5bfd6d638822b863b85f028428 | 694,312 |
def target_mapper(feature, targets, nodes):
"""Maps feature (of nodes df) to targets df. Will remove rows in which map has no value"""
target_mapper_dict = {
target_id: feature
for target_id, feature, node_type in zip(
nodes["id"].values, nodes[feature].values, nodes["type"].values
)
if node_type == "target"
} # dictionary {target1: f_value_of_target1, target2: f_value_of_target2, ... }
targets[feature] = (
targets["id"].map(target_mapper_dict).fillna("remove_me")
) # map to targets, note which are not connected (with 'remove_me')
return targets[targets.f_value != "remove_me"] | d9c3670e6314ec5ace82c5d0ab4177c038388adc | 694,313 |
import socket
def get_hostname():
"""Returns hostname of to be used as client id for connection"""
return socket.getfqdn() | 290376bb5b7716aca1f1a936821f1b8569103d4f | 694,314 |
import os
import subprocess
def use_npm_ci(path):
"""Return true if npm ci should be used in lieu of npm install."""
# https://docs.npmjs.com/cli/ci#description
with open(os.devnull, 'w') as fnull:
if ((os.path.isfile(os.path.join(path,
'package-lock.json')) or
os.path.isfile(os.path.join(path,
'npm-shrinkwrap.json'))) and
subprocess.call(
['npm', 'ci', '-h'],
stdout=fnull,
stderr=subprocess.STDOUT
) == 0):
return True
return False | ef5c2194cf0838ce6a1d461274dfdfdba0038ca4 | 694,315 |
import unicodedata
def normalize(label):
"""normalize string to unicode Normal Form C (composed)"""
return unicodedata.normalize('NFC', label) | bccffae75756d696187e81cbf3743579350d2181 | 694,316 |
def true_solar_time(hour_of_day: float, local_time_constant: float, time_ekvation: float) -> float:
"""True solar time in hours - DK: Sand soltid"""
true_solar_time = hour_of_day + (local_time_constant - time_ekvation) / 60
return true_solar_time | 67908d604ed9a9522e5fd3de027bc20ddc421e79 | 694,317 |
def deep_scrub(value, parent):
"""Scrubs all primitives in document data recursively. Useful for scrubbing
any and all secret data that may have been substituted into the document
data section before logging it out safely following an error.
"""
primitive = (int, float, complex, str, bytes, bool)
def is_primitive(value):
return isinstance(value, primitive)
if is_primitive(value):
if isinstance(parent, list):
parent[parent.index(value)] = 'Scrubbed'
elif isinstance(parent, dict):
for k, v in parent.items():
if v == value:
parent[k] = 'Scrubbed'
elif isinstance(value, list):
for v in value:
deep_scrub(v, value)
elif isinstance(value, dict):
for v in value.values():
deep_scrub(v, value) | 9eabd3a2ce240b0120fd64100ba4de4cda8af686 | 694,318 |
from typing import Union
from typing import Dict
from typing import Any
from typing import List
import copy
def _get(prop: str) -> Union[Dict[str, Any], List[Dict[str, Any]]]:
"""Returns a deep copy of the global property with the given name
(normally either an object definition or an object definition list)."""
return copy.deepcopy(globals()['_' + prop]) | 3baec32f1dc849db5920cef26dbcc7d13a5f16ef | 694,319 |
def all_of(*validators):
"""
Simply invokes all the given validators
Args:
validators:
Returns:
"""
def _all_of(inst, attr, value):
for validator in validators:
validator(inst, attr, value)
return _all_of | 0d2c4cda3ae55f294750995c5360c068d3f3e31d | 694,321 |
def build_command(
upstream_git_url,
upstream_git_commit,
clusterinstance,
):
"""upstream_git_url is the Git URL where the service lives (e.g.
git@git.yelpcorp.com:services/foo)
instancename is where you want to deploy. E.g. cluster1.canary indicates
a Mesos cluster (cluster1) and an instance within that cluster (canary)
"""
cmd = 'git push -f %s %s:refs/heads/paasta-%s' % (
upstream_git_url,
upstream_git_commit,
clusterinstance,
)
return cmd | 9b2e2b356e6f1d2a2ae4d63ba4c259dc4ec8c8f5 | 694,322 |
def correctly_classified_negatives(negatives, threshold):
"""Evaluates correctly classifed negatives in a set, based on a threshold
This method returns an array composed of booleans that pin-point, which
negatives where correctly classified for the given threshold
The pseudo-code for this function is:
.. code-block:: python
classified = []
for k in negatives:
if k < threshold:
classified.append(True)
else:
classified.append(False)
Parameters
==========
negatives : numpy.ndarray (1D, float)
The scores generated by comparing objects of different classes
threshold : float
The threshold, for which scores should be considered to be
correctly classified
Returns
=======
classified : numpy.ndarray (1D, bool)
The decision for each of the ``negatives``
"""
return negatives < threshold | 60248319b0ba887eb601cdbfc289958c738f7e95 | 694,323 |
def constructCommonRating(tup1, tup2):
"""
Args:
tup1 and tup2 are of the form (user, [(movie, rating)])
Returns:
((user1, user2), [(rating1, rating2)])
"""
user1, user2 = tup1[0], tup2[0]
mrlist1 = sorted(tup1[1])
mrlist2 = sorted(tup2[1])
ratepair = []
index1, index2 = 0, 0
while index1 < len(mrlist1) and index2 < len(mrlist2):
if mrlist1[index1][0] < mrlist2[index2][0]:
index1 += 1
elif mrlist1[index1][0] == mrlist2[index2][0]:
ratepair.append((mrlist1[index1][1], mrlist2[index2][1]))
index1 += 1
index2 += 1
else:
index2 += 1
return ((user1, user2), ratepair) | 761690baf2a9ad261bf40fa3e6e92500235840e4 | 694,324 |
def cell_count_from_extent(extent):
"""Returns the number of cells in a grid with the given extent"""
result = 1
for d in range(len(extent)): # list, tuple or 1D numpy array
result *= extent[d]
return result | d2350a87c6b69b8a7b1602d32f59ae312ac14d74 | 694,325 |
def observation(s, breaks, matrix, memo):
"""
Returns an observation using memoization, based on the parameter s.
"""
b = len(breaks)
if b == 0 or s <= breaks[0]:
return s * matrix[0]
for i in range(1, b + 1):
if i == b or s <= breaks[i]:
j = (i - 1) * 2
return memo[j] + s * matrix[i] - memo[j + 1] | ebd1ed9c8c8c987a795a587bab76eb5cba379824 | 694,326 |
def resolution_human_readable(output: dict) -> dict:
"""
Creates the human readable dictionary from the output of the resolution of the request
Args:
output: The resolution output that was created for the called request
Returns:
A dictionary containing all the valid fields in the resolution output
"""
hr = {}
for key in output.keys():
if key == 'SubmittedBy':
hr['SubmittedBy'] = output.get('SubmittedBy', {}).get('name', '')
else:
hr[key] = output.get(key, '')
return hr | 0204d93b843449a476ccdff46d10703ee2eb1e68 | 694,327 |
def grad_for(primal):
"""Generates a decorator to decorate `primal`'s customized gradient function.
See :func:`~taichi.lang.grad_replaced` for examples.
Args:
primal (Callable): The primal function, must be decorated by :func:`~taichi.ad.grad_replaced`.
Returns:
Callable: The decorator used to decorate customized gradient function."""
def decorator(func):
def decorated(*args, **kwargs):
func(*args, **kwargs)
if not hasattr(primal, 'grad'):
raise RuntimeError(
f'Primal function `{primal.__name__}` must be decorated by ti.ad.grad_replaced'
)
if primal.grad is not None:
raise RuntimeError(
'Primal function must be a **python** function instead of a taichi kernel. Please wrap the taichi kernel in a @ti.ad.grad_replaced decorated python function instead.'
)
primal.grad = decorated
return decorated
return decorator | f504c006d8b5fc6e9eb2cda16c85d67f44caea46 | 694,329 |
def most_frequent_best_dist(
df, train_end_time, metric, parameter, dist_from_best_case, n=1
):
"""Pick the model that is most frequently within `dist_from_best_case` from the
best-performing model group across test sets so far
Arguments:
dist_from_best_case (float) -- distance from the best performing model
metric (string) -- model evaluation metric, such as 'precision@'
parameter (string) -- model evaluation metric parameter,
such as '300_abs'
train_end_time (Timestamp) -- current train end time
df (pandas.DataFrame) -- dataframe containing the columns:
model_group_id,
model_id,
train_end_time,
metric,
parameter,
raw_value,
below_best
n (int) -- numbers of model group id
Returns: (int) the model group id to select, with highest mean raw metric value
"""
met_df = df.loc[(df["metric"] == metric) & (df["parameter"] == parameter)]
met_df["within_dist"] = (df["dist_from_best_case"] <= dist_from_best_case).astype(
"int"
)
if n == 1:
# sample(frac=1) to shuffle rows so we don't accidentally introduce bias in breaking ties
return [
met_df.groupby(["model_group_id"])["within_dist"]
.mean()
.sample(frac=1)
.idxmax()
]
else:
return (
met_df.groupby(["model_group_id"])["within_dist"]
.mean()
.nlargest(n)
.index.tolist()
) | 0b489dfcc4515c00f114bc1ac1bfa423038a53b2 | 694,330 |
def getCoverage(contigHeader):
"""
Gets the coverage given a contigHeader.
"""
return float(contigHeader.split(' ')[-1].split('_')[1]) | 77dc814ed96cd2479c86843bf4030fa0476e355b | 694,331 |
import requests
import json
def get_ironsrc_data_url(token_, appkey):
"""https://developers.ironsrc.com/ironsource-mobile/air/user-ad-revenue-v2/#step-1"""
headers = {'Authorization': 'Bearer ' + token_.replace('"', '')}
url_request = "https://platform.ironsrc.com/partners/userAdRevenue/v3?appKey={key}&date={date_iso_string}&reportType=1".format(key=appkey, date_iso_string = '2020-02-20')
resp = requests.get(url_request, headers=headers)
urls = json.loads(resp.text).get('urls')
return urls | bbdb4a19ac1e4796037fa1f1b0240bdc614f89a4 | 694,333 |
import os
def get_current_pane(server):
"""Return Pane if one found in env"""
if os.getenv('TMUX_PANE') is not None:
try:
return [
p
for p in server._list_panes()
if p.get('pane_id') == os.getenv('TMUX_PANE')
][0]
except IndexError:
pass | dd027c07cbdcbc2f44660f515a915c795ea5f7a4 | 694,334 |
def make_identity_dict(rng):
""" make_identity_dict(rng) -> dict
Return a dictionary where elements of the rng sequence are
mapped to themselves.
"""
return {i:i for i in rng} | 1e8e66ebc5aa146d47ce24a6631f11723330aede | 694,335 |
def convert_audio_frequency_to_duty_cycle(freqs):
"""
Converts audio frequencies to duty cycle (out of 1000)
"""
d = (freqs - 1000) / 15
return d | c4059a90abb11034a5f9fe13714280cdaa601c77 | 694,336 |
def SlashEscapePackageName(pkg_ref, unused_args, request):
"""Escapes slashes in package name for ListVersionsRequest."""
request.parent = "{}/packages/{}".format(
pkg_ref.Parent().RelativeName(),
pkg_ref.packagesId.replace("/", "%2F"))
return request | 46d4a20f119ca4655cf6e4f693d9c4b9eb524b1c | 694,337 |
import hashlib
def get_file_md5(path):
"""Calculate the md5 sum from file content.
@param path: (string) file path
@return: (string) md5
"""
with open(path, 'rb') as file_data:
md5 = hashlib.md5()
block_size = 128 * md5.block_size
while True:
data = file_data.read(block_size)
if not data:
break
md5.update(data)
return md5.hexdigest() | d0aa7bc0b72130a422c59b47b1c88b0c534316df | 694,338 |
def interval_partitioning(events):
"""
Created a sorted collection of `Event(t)`, where an event can be arrival or
departure, sorted based on the time.
Then starting from the beginning, keep a count of `#arrival - #departure`.
The max value ever encountered of `#arrival - #departure` is the answer.
"""
# CAREFUL: I was thinking of heap, but this solution is so much better than that.
events_sorted = []
for event in events:
events_sorted.append((event[0], True))
events_sorted.append((event[1], False))
events_sorted.sort()
answer = 0
num_resources_required_right_now = 0
for event in events_sorted:
if event[1]:
num_resources_required_right_now += 1
else:
num_resources_required_right_now -= 1
answer = max(answer, num_resources_required_right_now)
return answer | 7515831cb83220cad3bd36ce47adce805f0ebd63 | 694,339 |
def extract_tf_name(filepos='./Cards/proc_card.dat'):
""" read the file to find the requested change of variable"""
found=0
for line in open(filepos):
if found:
name=line.split()[0] #remove blank spae,end of line,...
return name
if line.startswith('# Begin transfer_function'):
found=1 | 4c777f29c7ff8f9093e8fbb68b9c1755b6bd3e18 | 694,340 |
def _calc_hgt(f):
"""Calculates the geopotential height in m from the variables hgtsfc and
delz. Note: To use this function the delz value needs to go from surface
to top of atmosphere in vertical. Because we are adding the height of
each grid box these are really grid top values
Parameters
----------
f : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xr.DataArray
Geoptential height with attributes.
"""
sfc = f.surfalt_m.load()
dz = f.dz_m.load() * -1.0
# These are negative in RRFS-CMAQ, but you resorted and are adding from the surface,
# so make them positive.
dz[:, 0, :, :] = dz[:, 0, :, :] + sfc # Add the surface altitude to the first model level only
z = dz.rolling(z=len(f.z), min_periods=1).sum()
z.name = "alt_msl_m_full"
z.attrs["long_name"] = "Altitude MSL Full Layer in Meters"
z.attrs["units"] = "m"
return z | fbec437d512cd169f24e55e407a863bb3e788a31 | 694,341 |
def is_constant_fill(bytes, fill_byte):
"""Check a range of bytes for a constant fill"""
return all(b == fill_byte for b in bytes) | 11106cc2a3d82fe71ca6daf712992d846ee24c96 | 694,342 |
def _update_cluster(partial, oldC_newC, grid):
"""Update the information about the cluster."""
df1, settings = partial
clusters = settings['clusters']
primary_key = settings['idCol']
clusterCol = settings['predCol']
lat_col = settings['lat_col']
lon_col = settings['lon_col']
init_lat, init_lon, end_lat, end_lon = grid
if len(df1) > 0:
f = lambda point: all([round(point[lat_col], 5) >= init_lat,
round(point[lon_col], 5) >= init_lon,
round(point[lat_col], 5) < (end_lat+0.00001),
round(point[lon_col], 5) < (end_lon+0.00001)])
tmp = df1.apply(f, axis=1)
df1 = df1.loc[tmp]
df1.drop_duplicates([primary_key], inplace=False, ignore_index=True)
for key in oldC_newC:
if key in clusters:
df1.loc[df1[clusterCol] == key, clusterCol] = oldC_newC[key]
df1.loc[df1[clusterCol].str.contains("-9", na=False), clusterCol] = -1
df1 = df1.drop([primary_key], axis=1)
return df1 | 060781b62659f1411077d4b83b47522450983c32 | 694,343 |
import subprocess
def isize(irods_path):
"""Returns the size in bytes of the most recent version of the file"""
raw_output = subprocess.check_output(
"ils -l {} | tail -n1 |awk '{{print $4}}'".format(irods_path), shell=True
)
try:
return int(raw_output.decode("ascii").strip())
except ValueError:
raise IOError("File not found or an iRODS error occured.") | 6d9165f5d4ad7d22f3260c73b9dc523c4a7aba34 | 694,345 |
import requests
import logging
def pdns_get_masters(**kwargs):
"""
Get masters from a domain
"""
url = "{}/api/v1/servers/localhost/zones/{}".format(kwargs['remote_api'], kwargs['domain'])
headers = {'X-API-Key' : kwargs['remote_api_key']}
payload = {'server_id':'localhost', 'zone_id' : kwargs['domain']}
req = requests.get(url, headers=headers, data=payload)
data = req.json()
if 'error' in data.keys():
logging.error(data['error'])
return []
return data['masters'] | a83ee57d6ddde2571249f3fddc149c1fc0281b92 | 694,346 |
import socket
def is_connected():
"""
Check that gov.uk is accessible.
Used to skip tests if no internet connection is available.
"""
try:
host = socket.gethostbyname("www.gov.uk")
socket.create_connection((host, 80), 2)
return True
except:
pass
return False | 0880d4940a4d877176c1c4718f98579a8c3fd50c | 694,347 |
def read(f):
"""Read contents of specified file."""
return open(f).read().strip() | 5b3b0d06b3e746b7af6f9126288883680ed4fc59 | 694,348 |
import numpy
import random
def get_random_classification(num_points, num_attributes, num_classes):
"""Return a random classification dataset.
Attributes are continuous, in the range [-1, 1].
"""
# Random values in range [-1, 1]
random_inputs = (numpy.random.random(
(num_points, num_attributes)) * 2.0) - 1.0
# Make random targets
random_targets = []
for _ in range(num_points):
# One hot targets
random_target = [0.0] * num_classes
random_target[random.randint(0, num_classes - 1)] = 1.0
random_targets.append(random_target)
random_targets = numpy.array(random_targets)
return random_inputs, random_targets | 77ccee7bc8506c4dd31d4901ff036e5fdedc45a9 | 694,349 |
def json_to_tone(json_object):
"""
Receives json and returns the tone of the sentence.
"""
tone = json_object['document_tone']['tones'][0]
return tone['tone_id'] | 1b7a654fae115d485bb37180d1a6759871a71cfa | 694,350 |
def doc_value_wrapper(doc_cls, value_cls):
"""
Wrap both the doc and the value
Code copied from couchdbkit.schema.base.QueryMixin.__view
"""
#from corehq.apps.users.models import CouchUser
def wrapper(row):
data = row.get('value')
docid = row.get('id')
doc = row.get('doc')
data['_id'] = docid
if 'rev' in data:
data['_rev'] = data.pop('rev')
value_cls._allow_dynamic_properties = True
doc_cls._allow_dynamic_properties = True
value_inst = value_cls.wrap(data)
doc_inst = doc_cls.wrap(doc)
return doc_inst, value_inst
return wrapper | 605b62ab7e452f6a4e6ac68d9d30d19a900ee4e8 | 694,351 |
def bins(G):
"""Bin genotypes by hamming distance from wildtype.
Parameters
----------
G : GenotypePhenotypeGraph object.
A GenotypePhenotypeGraph object.
"""
temp_bins = {}
for i in range(0, len(G.nodes("binary")[0]) + 1):
temp_bins[i] = []
for node in range(len(list(G.nodes()))):
node_attr = G.node[node]
# Calculate the level of each node
level = node_attr["binary"].count("1")
temp_bins[level].append(node)
return temp_bins | 6638d56ce71dd376a3d7e4b71d225f219a1d7052 | 694,352 |
import torch
def gradient_wrt_input(model, target_weights, initial_guess, n_iter=100, mask=None, lr=1e-1, verbose=True, device=None,
dtype=None):
"""Find input tensor such that the model produces an allocation close to the target one.
Parameters
----------
model : torch.Module
Network that predicts weight allocation given feature tensor.
target_weights : torch.Tensor
Vector of targeted asset weights of shape `(n_assets,)`.
initial_guess : torch.Tensor
Initial feature tensor serving as the starting point for the optimization. The shape is
`(n_channels, lookback, n_assets)` - the sample dimension is not included.
n_iter : int
Number of iterations of the gradients descent (or other) algorithm.
mask : None or torch.Tensor
If specified, then boolean ``torch.Tensor`` of the same shape as `initial_guess` than
one can elementwise choose what parts of the inputs to optimize (True) and which
keep the same as the initial guess (False).
lr : float
Learning rate for the optimizer.
verbose : bool
If True, then verbosity activated.
dtype : None or torch.dtype
Dtype to be used. If specified, casts all used tensors.
device : None or torch.device
Device to be used. If specified, casts all used tensors.
Returns
-------
result : torch.Tensor
Feature tensor of the same shape as `initial_guess` that is mapped by the network (hopefully)
close to `target_weights`.
hist : list
List of losses per iteration.
"""
device = device or torch.device('cpu')
dtype = dtype or torch.float32
x = initial_guess.clone().to(device=device, dtype=dtype)
x.requires_grad = True
if mask is None:
mask = torch.ones_like(x)
elif torch.is_tensor(mask):
if mask.shape != x.shape:
raise ValueError('Inconsistent shape of the mask.')
else:
raise TypeError('Incorrect type of the mask, either None or torch.Tensor.')
# casting
mask = mask.to(dtype=torch.bool, device=device)
model.to(device=device, dtype=dtype)
target_weights = target_weights.to(device=device, dtype=dtype)
optimizer = torch.optim.Adam([x], lr=lr)
model.train()
hist = []
for i in range(n_iter):
if i % 50 == 0 and verbose:
msg = '{}-th iteration, loss: {:.4f}'.format(i, hist[-1]) if i != 0 else 'Starting optimization'
print(msg)
loss_per_asset = (model((x * mask)[None, ...])[0] - target_weights) ** 2
loss = loss_per_asset.mean()
hist.append(loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
if verbose:
print('Optimization done, final loss: {:.4f}'.format(hist[-1]))
return x, hist | c645ae9389a9d44adcf006d6a772a35f4c42507d | 694,353 |
import numpy as np
def _seed_npy_before_worker_init(worker_id, seed, worker_init_fn=None):
"""
Wrapper Function to wrap the existing worker_init_fn and seed numpy before
calling the actual ``worker_init_fn``
Parameters
----------
worker_id : int
the number of the worker
seed : int32
the base seed in a range of [0, 2**32 - (1 + ``num_workers``)].
The range ensures, that the whole seed, which consists of the base
seed and the ``worker_id``, can still be represented as a unit32,
as it needs to be for numpy seeding
worker_init_fn : callable, optional
will be called with the ``worker_id`` after seeding numpy if it is not
``None``
"""
try:
np.random.seed(seed + worker_id)
except ImportError:
pass
if worker_init_fn is not None:
return worker_init_fn(worker_id) | 778b5885fda917ae932fe20e6caab70708de8b33 | 694,354 |
import bz2
import gzip
def _open(infile, mode='rb'):
"""
Openning of input, regardless of compression
"""
if infile.endswith('.bz2'):
return bz2.open(infile, mode)
elif infile.endswith('.gz'):
return gzip.open(infile, mode)
else:
return open(infile) | d501ef222d747bb6ab652d4d7a754c3d8afa4541 | 694,355 |
def findExternLink( headers, localsMinusOne ):
""" Проверка нахождения конеченй точки перехода интерфейсному методу """
for i in headers:
if i == localsMinusOne:
return False
return True | c6062124d44a2b185e80be258c24320d5d51a3f3 | 694,356 |
def wrap_distance(idx_lt, similar_distance):
""" wrap distance computation so it can be called in parallel mode
:param ((int, int) (obj, obj)) idx_lt:
:param func similar_distance:
:return (int, int), int:
"""
idx, seqs = idx_lt
d = similar_distance(seqs[0], seqs[1])
return idx, d | 27a1860eeee0ac878d15665025620f7efc059e4f | 694,358 |
import math
def estimate_parameters(N, n):
"""표본(샘플)의 평균으로 모집단의 평균, 표준편차 추정
N: 실험 회수, n: 발견된 회수"""
p = n / N
sigma = math.sqrt(p * (1 - p) / N)
return p, sigma | dd4fdb387015dd86076e068862a540d9e2e7a78d | 694,359 |
import os
import yaml
def _get_schema(modules):
"""Load the config schema for the provided modules.
NOTE: this schema is intended to have 1-1 relationship with they keys in
the default config and is used a means to verify valid overrides provided
by the user.
:param modules: stack modules config schema to lookup.
:returns: modules default schema dictionary.
"""
schema = os.path.join(os.path.dirname(__file__),
'defaults/%s.yaml.schema' % (modules))
return yaml.safe_load(open(schema)) | 5d0380dda93582cbc29d6a127e658c9538ba1ad3 | 694,360 |
def to_oscars(router_port):
""" Translates a router/port into an OSCARS address
es.net:2013::<node>:<port>#<VLAN>
Args:
router_port (routerport): router and port
urn:ogf:network:domain=es.net:node=bois-cr1:port=xe-1/2/0:link=*
Returns
string: NML
"""
address = "urn:ogf:network:domain=es.net:node=" + router_port['router'] + ":"
address += router_port['port'] + ":link=*"
return address | 7ffbbf011195af809f79646c8e75f51e0c767854 | 694,361 |
def update_baseline_with_removed_secrets(results, baseline, filelist):
"""
NOTE: filelist is not a comprehensive list of all files in the repo
(because we can't be sure whether --all-files is passed in as a
parameter to pre-commit).
:type results: SecretsCollection
:type baseline: SecretsCollection
:type filelist: list(str)
:param filelist: filenames that are scanned.
:rtype: bool
:returns: True if baseline was updated
"""
updated = False
for filename in filelist:
if filename not in baseline.data:
# Nothing to modify, because not even there in the first place.
continue
if filename not in results.data:
# All secrets relating to that file was removed.
# We know this because:
# 1. It's a file that was scanned (in filelist)
# 2. It was in the baseline
# 3. It has no results now.
del baseline.data[filename]
updated = True
continue
# We clone the baseline, so that we can modify the baseline,
# without messing up the iteration.
for baseline_secret in baseline.data[filename].copy():
new_secret_found = results.get_secret(
filename,
baseline_secret.secret_hash,
baseline_secret.type,
)
if not new_secret_found:
# No longer in results, so can remove from baseline
old_secret_to_delete = baseline.get_secret(
filename,
baseline_secret.secret_hash,
baseline_secret.type,
)
del baseline.data[filename][old_secret_to_delete]
updated = True
elif new_secret_found.lineno != baseline_secret.lineno:
# Secret moved around, should update baseline with new location
old_secret_to_update = baseline.get_secret(
filename,
baseline_secret.secret_hash,
baseline_secret.type,
)
old_secret_to_update.lineno = new_secret_found.lineno
updated = True
return updated | 8bf58a4757e56dcea2a182e984fb5f2b96a76520 | 694,363 |
def StripPrefix(s: str, prefix: str):
"""Skips the provided prefix from s, if present at its beginning."""
if s.startswith(prefix):
return s[len(prefix):]
return s | 19dae6a11f61b81f80bbbca2a87b59348ca8cd25 | 694,364 |
def same_node(node1, node2):
"""
Return whether these two objects both refer to same cluster node,
i.e. have same UUID.
:param node1: ``Node`` or ``NodeState`` instance.
:param node2: ``Node`` or ``NodeState`` instance.
:return: Whether the two instances have same UUID.
"""
return node1.uuid == node2.uuid | b95392b8f25b82072ce7d2a784b41c7fe5d27b40 | 694,365 |
def get_version_from_file(python_version_file="./VERSION"):
"""
Purpose:
Get python requirements from a specified requirements file.
Args:
python_requirements_file (String): Path to the requirements file (usually
it is requirements.txt in the same directory as the setup.py)
Return:
requirements (List of Strings): The python requirements necessary to run
the library
"""
version = "unknown"
with open(python_version_file) as version_file:
version = version_file.readline().strip().strip("\n")
return version | 7e34a25f81846e1a199d8eaaadaaae7e2a89338b | 694,366 |
def Question1(s,t):
"""Given two strings s and t, determine whether some anagram of t is a substring of s. For example: if s = "udacity" and t = "ad", then the function returns True. Your function definition should look like: question1(s, t) and return a boolean True or False."""
if len(s)==0 or len(t)==0:
return False
s_len=len(s)
t_len=len(t)
t_sort=sorted(t)
for start in range(s_len-t_len+1):
if t_sort == sorted(s[start:start+t_len]):
return True
return False | 7172a635bbbae2807bd24188103eb3e9d228b224 | 694,367 |
def hflip(tensor):
"""Flips tensor horizontally.
"""
tensor = tensor.flip(2)
return tensor | d7bcb1ea7d463e0b4826f75437ecc3f1f298dd2f | 694,368 |
import torch
def cbrt(x):
"""Cube root. Equivalent to torch.pow(x, 1/3), but numerically stable."""
return torch.sign(x) * torch.exp(torch.log(torch.abs(x)) / 3.0) | 759a873f820daac609b68682cd5f41283f8f175d | 694,370 |
def TrimDataset(dataset, seq_length, eval_mode=False, sentence_pair_data=False):
"""Avoid using excessively long training examples."""
if eval_mode:
return dataset
else:
if sentence_pair_data:
new_dataset = [example for example in dataset if
len(example["premise_transitions"]) <= seq_length and
len(example["hypothesis_transitions"]) <= seq_length]
else:
new_dataset = [example for example in dataset if len(
example["transitions"]) <= seq_length]
return new_dataset | 0a73e45ebab6cd1aba3499f3d12bf84d3854f251 | 694,372 |
def merge_lists(a, b):
"""
Merge lists - e.g.,
[1, 2, 3, 4, 5, 6] & ['a', 'b', 'c']
=> [1, 'a', 2, 'b', 3, 'c', 4, 5, 6]
:param a: List a
:param b: List b
:return: Merged lists
"""
result = []
length = min([len(a), len(b)])
for i in range(length):
result.append(a[i])
result.append(b[i])
result += a[length:] + b[length:]
return result | 8272fe726299953f1cd28aea662e6720fb75c300 | 694,373 |
import io
import math
def get_virtual_tx_size(tx):
"""Return the virtual transaction size as defined in BIP141"""
def streamed_size(tx, include_witness_data):
"""Return the streamed size of a tx, optionally with witness data"""
buffer_ = io.BytesIO()
tx.stream(buffer_, include_witness_data=include_witness_data)
return len(buffer_.getvalue())
base_tx_size = streamed_size(tx, include_witness_data=False)
total_tx_size = streamed_size(tx, include_witness_data=True)
tx_weight = base_tx_size * 3 + total_tx_size
return int(math.ceil(tx_weight / 4.0)) | fc7ab30ebfaf8cb74b01da2c31ec3bb82bee973a | 694,375 |
import re
def correctlyQuoteArg(arg):
"""quote any string that has white space in it"""
m = re.match(r'.*\s+.*', arg)
if m:
return '"{}"'.format(arg)
return arg | 056260317f4290c32f267cdc3f3c5cbd228e97ac | 694,376 |
import functools
def foldl(func, acc, xs):
"""
Erlang's implementation of lists:foldl/3
"""
result = functools.reduce(func, reversed(xs), acc)
return result | 85bc569266ef4d3ea92bf1852bed1a9d5d6bb2ec | 694,377 |
def pad_octets(octet_array, octet_missing_num):
"""Pad appropriate number of 0 octets if IPv6 is abbreviated"""
padded_octect = '0'
length = len(octet_array)
# If the first or last octect is blank, zero them.
if octet_array[0] == '':
octet_array[0] = padded_octect
if octet_array[length - 1] == '':
octet_array[length - 1] = padded_octect
# Check the rest of the array for blank octets and pad as needed.
for i in range(length):
if octet_array[i] == '':
octet_array[i] = padded_octect
for j in range(octet_missing_num):
octet_array.insert(i, padded_octect)
return octet_array | 4acb82b58ea4aedf4ae9430f2eb994e3306fbc1a | 694,378 |
def binary_search(array, item):
"""
Given a sorted array, this function performs a binary search to locate the
item.
Parameters: Array, item
Returns: index of the item or -1 if the item is not found
"""
first = 0
last = len(array) - 1
indx = None
while first <= last:
mid = (first + last) / 2
# Check if the item is at the middle
if array[mid] == item:
indx = mid
return indx
# If item is greater than half, ignore the left half of the array
elif array[mid] < item:
first = mid + 1
# If item is less than half, ignore the right half of the array
elif array[mid] > item:
last = mid - 1
return -1 | 247b00888eaf1da2d3bf1ec9cb672d9ba0eb5436 | 694,379 |
def relatively_prime(a, b):
"""
Returns true or false for whether the given numbers are relatively prime,
having no common factors other than 1.
"""
for number in range(2, min(a, b) + 1):
if a % number == b % number == 0:
return False
return True | 21db4d8bf44d1829859713fd0ca1cd72b4e62266 | 694,380 |
def match_month(i):
""" 月份序号转换成英文简写 """
mon_list = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
return mon_list[int(i)-1] | c39c350ce60b0b0eedbc634c8948392172039c55 | 694,381 |
def verify_users(g, user):
"""Verify a |user| or all names in the collection |user| as being valid
Github users."""
if isinstance(user, str):
user = [ user ]
for u in user:
try:
g.get_user(u)
except:
raise SystemExit('User "%s" does not exist"' % (u,))
return 1 | 4c033b713e376fc0b630cfa83e083e005fb5de91 | 694,382 |
def check_proto(proto):
"""return proto value"""
"""proto value == -1, means all protocols"""
if str(proto) == '-1':
return 'all'
else:
return proto | b13ffc4ab0a300089dd8081a36cf034819469c6b | 694,383 |
def _make_filedict(file_list):
"""
Make a dict out of the file_list.
This is for better readability in the templates.
"""
file_dict = []
for item in file_list:
temp_list = {}
temp_list['filename'] = item[0]
temp_list['filesize_long'] = item[1]
temp_list['filesize_str'] = item[2]
temp_list['date'] = item[3]
temp_list['path_thumb'] = item[4]
temp_list['link'] = item[5]
temp_list['select_link'] = item[6]
temp_list['file_extension'] = item[7]
temp_list['file_type'] = item[8]
temp_list['image_dimensions'] = item[9]
temp_list['thumb_dimensions'] = item[10]
temp_list['filename_lower'] = item[11]
temp_list['flag_makethumb'] = item[12]
temp_list['flag_deletedir'] = item[13]
temp_list['flag_imageversion'] = item[14]
file_dict.append(temp_list)
return file_dict | 6b9f5c49bdff9b8799b5dbe33b48bd0ad1f7433d | 694,384 |
import yaml
def load_yaml(filepath):
"""
Safely read a yaml file.
:param `filepath`: a filepath to a yaml file
:type filepath: str
:returns: Python objects holding the contents of the yaml file
"""
with open(filepath, "r") as _file:
return yaml.safe_load(_file) | cfbda1dcb4e63441d36750d8a63da0b8f8cea777 | 694,385 |
def prepare_sentence(str_words, word_to_id, lower=False):
"""
Prepare a sentence for evaluation.
"""
def f(x): return x.lower() if lower else x
words = [word_to_id[f(w) if f(w) in word_to_id else '<UNK>']
for w in str_words]
return {
'str_words': str_words,
'words': words,
} | 5ae0e1bab0a04b0e34d56c057708a46d3f1b120a | 694,387 |
def count_items(column_list):
"""
Função para contar a quantidade itens em uma lista de dados.
Argumentos:
data_list: Lista de dados.
Retorna:
Uma tupla contendo os types dos items e quantidades dos itens respectivamente.
"""
dictionary = {}
for data in column_list:
if not dictionary.get(data):
dictionary[data] = 0
dictionary[data] += 1
return list(dictionary.keys()), list(dictionary.values()) | 832a54561c7b986c185d2b387b2659bc5e5a7894 | 694,388 |
def arg_command_group(parser, group_name, group_argument_list):
"""Add a group of optional arguments to the parser.
Params:
parser: argparse.ArgumentParser where the argument group will be added.
group_name: String with the name of the argument group.
group_argument_list: List of dict objects where each dict specifies an
argument.
Returns:
group: The argument group object that has been created for the parser.
Raises:
ValueError: if the group_argument_list is empty
"""
if not group_argument_list:
raise ValueError('Invalid group_argument_list')
# Add argument group
group = parser.add_argument_group(group_name)
# Add arguments
for arg_dict in group_argument_list:
arg_name = arg_dict['name']
arg_name = f'--{arg_name}'
arg_help = arg_dict['help']
arg_value = arg_dict['default']
if isinstance(arg_value, bool):
# Attention: always interpret boolean flag in a positive sense
# The arg_value specifies where to add the corresponding template
# component by default. The presence of a boolean flag negates the
# corresponding boolean action.
group.add_argument(arg_name, action='store_true', help=arg_help)
else:
group.add_argument(arg_name, default=arg_value, help=arg_help)
return group | 89b5ebd14fe2f331255aaaa1656c4c4857bc1f40 | 694,389 |
def get_input_path_parameters(path):
""""Get the input parameters from the path url."""
path_params = []
params = path.split('/')
for param in params:
if len(param) > 0 and param[0] == '{' and param[len(param) - 1] \
== '}':
path_params.append(param[1:-1])
return path_params | 475bd81890b61b7aafc2ad6b756b043620957a5d | 694,391 |
def create_service(*, name):
"""Create a Service resource for the Schema Registry.
Parameters
----------
name : `str`
Name of the StrimziKafkaUser, which is also used as the name of the
deployment.
Returns
-------
service : `dict`
The Service resource.
"""
s = {
'apiVersion': 'v1',
'kind': 'Service',
'metadata': {
'name': name,
'labels': {
'name': name
}
},
'spec': {
'ports': [
{
'name': 'schema-registry',
'port': 8081
}
],
'selector': {
'app': name,
}
}
}
return s | eaa16b23dae0f889970d8589271807ba1fbad228 | 694,392 |
def compare_partial_dicts(result, expected):
"""
Make sure all the keys in expected are matched by keys in result, and
that the values stored in those keys match. Result can contain more
items than expected - those are ignored.
Used in the test_lvs, test_pvs and test_vgs tests.
"""
# return all(result[k] == expected[k] for k in expected.keys())
mismatches = 0
for k in expected.keys():
if not result[k] == expected[k]:
print("Failed for key {k}, {r} != {e}".format(k=k, r=result[k], e=expected[k]))
mismatches += 1
return mismatches == 0 | 88c16fdf45b1e8fe917f2d23f0b05c0bcf6b7b6b | 694,393 |
def select(*columns):
"""Selects columns from input dataframe
Args:
*columns: list of columns to be selected
Returns:
Feature whose output contains only selected columns
Examples:
>>> cat_cols = ["a", "b", "c"]
>>> ohe_enc = OneHotEncoder(sparse=False)
>>> compose(
... select(*cat_cols),
... drop("month", "day_of_week"),
... apply_transformer(ohe_enc, "ohe_enc")
... )
"""
def _select(df, **k):
return df[list(columns)]
return _select | 49b6a0999a5488167a887ed0dff9390d4400bc00 | 694,395 |
def digits(s):
"""grab digits from string"""
return str("".join([c for c in s if c.isdigit()])) | 35413e792e0610aee763bcdca5d792ecf76c637a | 694,396 |
import operator
def majorityCnt(classList):
"""
返回该数据集中类别数最多的类名
该函数使用分类名称的列表(某个数据集或者其子集的),然后创建键值为classList中唯一值的数据字典。
字典对象的存储了classList中每个类标签出现的频率。最后利用operator操作键值排序字典,
并返回出现次数最多的分类名称
Parameters
----------
classList:分类类别列表
Returns:子节点的分类
-------
数据集已经处理了所有属性,但是类标签依然不是唯一的,则采用多数判决的方法决定该子节点的分类
"""
classCount = {} # 创建字典
for vote in classList: # 对类名列表遍历
if vote not in classCount.keys(): # 键已存在字典中+1,不存在字典中创建后初始为0后+1
classCount[vote] = 0
classCount[vote] += 1
# print(classCount)
sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1),
reverse=True) # 将字典转换成列表并按照值([i][1])进行从大到小排序
return sortedClassCount[0][0] | 585eeb4897921e7aff1bd17d22b88c7e3a45ba2e | 694,397 |
def detrend_none(x, axis=None):
"""
Return x: no detrending.
Parameters
----------
x : any object
An object containing the data
axis : int
This parameter is ignored.
It is included for compatibility with detrend_mean
See Also
--------
detrend_mean : Another detrend algorithm.
detrend_linear : Another detrend algorithm.
detrend : A wrapper around all the detrend algorithms.
"""
return x | 73a99772443220314c7ce803fdbd815910e706d0 | 694,398 |
def get_top_k_from_counts(n, counts):
"""
Given a map of counts mapping from a key to its frequency, returns the top k keys (based on frequency) after
normalizing the frequencies by the total.
:param n: The number of keys to return
:param counts: A map of counts mapping from a key to its frequency.
:return: A map from every key to its normalized frequency
"""
total = sum(counts.values())
sorted_counts = sorted([(k, v / total) for (k, v) in counts.items() if k != 'total'], key=lambda x: x[1],
reverse=True)
return sorted_counts[:n] | c2f1b1ff0ac7c78ba02472a81b28c0573411830b | 694,399 |
def derive_form_imported(config, data):
"""
This derives the form imported field
"""
forms = config['forms']
form_names = [form['form_name'] for form in forms]
form_importeds = [form['form_imported'] for form in forms]
for record in data:
for name, imported in zip(form_names, form_importeds):
if record.get(name):
record[name][imported] = 'Y'
return data | 6c2e840c4659ee1bfaa1ce8a0d55456011f0ddc8 | 694,400 |
def mod(input, n, d=0.0):
"""
Returns a new tensor whose elements corresepond to the modulo with offset of the elements of the input.
Args:
input (`torch.FloatTensor`): input tensor.
n (float): modulus.
d (float, optional): offset. Defaults to 0.0.
Returns:
(`torch.FloatTensor`): output tensor.
"""
return (input - d) % n + d | e7c31651768ee311b40ea0beb0e34837ca7cdbb7 | 694,401 |
import numpy
def compress_pause_to_time(signal, sampling_rate, time_step = 0.01, frame_window = 0.025):
"""compress pause index to time
Args:
signal (numpy.array(bool)): A list of pause sequence. True indicating pause.
sampling_rate (int): sampling frequency in Hz.
time_step (float, optional): The time interval (in seconds) between two pauses. Default to 0.01.
frame_window (float, optional): The length of speech (in seconds) used to estimate pause. Default to 0.025.
Returns:
numpy.array(bool): compressed pause.
"""
T = int(sampling_rate * time_step)
Fr = int(sampling_rate * frame_window)
Fr +=int(sampling_rate * frame_window > Fr)
length = (len(signal) - Fr)//T + 1
pause = numpy.full( length, False )
for i in range(length):
if len(numpy.where(signal[i*T:(i+1)*T])[0]) > T/2:
pause[i] = True
return pause | 392e21cf4686811c434ad80201522ae6c152147d | 694,402 |
def param_filter(curr_dict, key_set, remove=False):
"""
Filters param dictionary to only have keys in the key set
Args:
curr_dict (dict): param dictionary
key_set (set): set of keys you want
remove (bool): filters by what to remove instead of what to keep
Returns:
filtered param dictionary
"""
if remove:
return {key: curr_dict[key] for key in curr_dict.keys() if key not in key_set}
else:
return {key: curr_dict[key] for key in key_set if key in curr_dict.keys()} | 0c9ff5cc546b56749e344d3302de05e523bef190 | 694,403 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.